def test_mongodb_origin_simple_with_decimal(sdc_builder, sdc_executor, mongodb): """ Validate that we properly process decimal type. The pipeline looks like: mongodb_origin >> trash """ ORIG_BINARY_DOCS = [{'data': decimal128.Decimal128("0.5")}] pipeline_builder = sdc_builder.get_pipeline_builder() pipeline_builder.add_error_stage('Discard') mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin') mongodb_origin.set_attributes(capped_collection=False, database=get_random_string(ascii_letters, 5), collection=get_random_string( ascii_letters, 10)) trash = pipeline_builder.add_stage('Trash') mongodb_origin >> trash pipeline = pipeline_builder.build().configure_for_environment(mongodb) try: # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database # when used for inserting in collection. Hence the deep copy. docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS) # Create documents in MongoDB using PyMongo. # First a database is created. Then a collection is created inside that database. # Then documents are created in that collection. logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection) mongodb_database = mongodb.engine[mongodb_origin.database] mongodb_collection = mongodb_database[mongodb_origin.collection] insert_list = [ mongodb_collection.insert_one(doc) for doc in docs_in_database ] assert len(insert_list) == len(docs_in_database) # Verify that insert was in-fact successful assert docs_in_database == [ item for item in mongodb.engine[mongodb_origin.database][ mongodb_origin.collection].find() ] # Start pipeline and verify the documents using snapshot. sdc_executor.add_pipeline(pipeline) snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot sdc_executor.stop_pipeline(pipeline) rows_from_snapshot = [{ 'data': decimal128.Decimal128(str(record.field['data'])) } for record in snapshot[mongodb_origin].output] assert rows_from_snapshot == ORIG_BINARY_DOCS finally: logger.info('Dropping %s database...', mongodb_origin.database)
class MyMongo(Mongo): """Overwrites `eve.io.mongo.mongo.Mongo` to change datetime serializer and json encoder class. This class can be passed to Eve constructor as `data` argument - -> app = Eve(data=MyMongo)""" serializers = { "objectid": lambda value: ObjectId(value) if value else None, "datetime": parse_datetime, "integer": lambda value: int(value) if value is not None else None, "float": lambda value: float(value) if value is not None else None, "number": lambda val: json.loads(val) if val is not None else None, "boolean": lambda v: { "1": True, "true": True, "0": False, "false": False }[str(v).lower()], "dbref": lambda value: DBRef(value["$col"], value["$id"], value["$db"] if "$db" in value else None) if value is not None else None, "decimal": lambda value: decimal128.Decimal128(decimal.Decimal(str(value))) if value is not None else None, } json_encoder_class = MyMongoJSONEncoder
def _sum_operation(values): values_list = list() if decimal_support: for v in values: if isinstance(v, numbers.Number): values_list.append(v) elif isinstance(v, decimal128.Decimal128): values_list.append(v.to_decimal()) else: values_list = list(v for v in values if isinstance(v, numbers.Number)) sum_value = sum(values_list) return decimal128.Decimal128(sum_value) if isinstance(sum_value, decimal.Decimal) else sum_value
class Mongo(DataLayer): """ MongoDB data access layer for Eve REST API. .. versionchanged:: 0.5 Properly serialize nullable float and integers. #469. Return 400 if unsupported query operators are used. #387. .. versionchanged:: 0.4 Don't serialize to objectid if value is null. #341. .. versionchanged:: 0.2 Provide the specialized json serializer class as ``json_encoder_class``. .. versionchanged:: 0.1.1 'serializers' added. """ serializers = { "objectid": lambda value: ObjectId(value) if value else None, "datetime": str_to_date, "integer": lambda value: int(value) if value is not None else None, "float": lambda value: float(value) if value is not None else None, "number": lambda val: json.loads(val) if val is not None else None, "boolean": lambda v: { "1": True, "true": True, "0": False, "false": False }[str(v).lower()], "dbref": lambda value: DBRef( value["$col"] if "$col" in value else value["$ref"], value["$id"], value["$db"] if "$db" in value else None, ) if value is not None else None, "decimal": lambda value: decimal128.Decimal128(decimal.Decimal(str(value))) if value is not None else None, } # JSON serializer is a class attribute. Allows extensions to replace it # with their own implementation. json_encoder_class = MongoJSONEncoder operators = set( ["$gt", "$gte", "$in", "$lt", "$lte", "$ne", "$nin"] + ["$or", "$and", "$not", "$nor"] + ["$mod", "$regex", "$text", "$where"] + ["$options", "$search", "$language", "$caseSensitive"] + ["$diacriticSensitive", "$exists", "$type"] + [ "$geoWithin", "$geoIntersects", "$near", "$nearSphere", "$centerSphere" ] + ["$geometry", "$maxDistance", "$box"] + ["$all", "$elemMatch", "$size"] + ["$bitsAllClear", "$bitsAllSet", "$bitsAnyClear", "$bitsAnySet"] + ["$center", "$expr"]) def init_app(self, app): """ Initialize PyMongo. .. versionchanged:: 0.6 Use mongo_prefix for multidb support. .. versionchanged:: 0.0.9 Support for Python 3.3. """ # mongod must be running or this will raise an exception self.driver = PyMongos(self) self.mongo_prefix = None def find(self, resource, req, sub_resource_lookup): """ Retrieves a set of documents matching a given request. Queries can be expressed in two different formats: the mongo query syntax, and the python syntax. The first kind of query would look like: :: ?where={"name": "john doe"} while the second would look like: :: ?where=name=="john doe" The resultset if paginated. :param resource: resource name. :param req: a :class:`ParsedRequest`instance. :param sub_resource_lookup: sub-resource lookup from the endpoint url. .. versionchanged:: 0.6 Support for multiple databases. Filter soft deleted documents by default .. versionchanged:: 0.5 Support for comma delimited sort syntax. Addresses #443. Return the error if a blacklisted MongoDB operator is used in query. Abort with 400 if unsupported query operator is used. #387. Abort with 400 in case of invalid sort syntax. #387. .. versionchanged:: 0.4 'allowed_filters' is now checked before adding 'sub_resource_lookup' to the query, as it is considered safe. Refactored to use self._client_projection since projection is now honored by getitem() as well. .. versionchanged:: 0.3 Support for new _mongotize() signature. .. versionchanged:: 0.2 Support for sub-resources. Support for 'default_sort'. .. versionchanged:: 0.1.1 Better query handling. We're now properly casting objectid-like strings to ObjectIds. Also, we're casting both datetimes and objectids even when the query was originally in python syntax. .. versionchanged:: 0.0.9 More informative error messages. .. versionchanged:: 0.0.7 Abort with a 400 if the query includes blacklisted operators. .. versionchanged:: 0.0.6 Only retrieve fields in the resource schema Support for projection queries ('?projection={"name": 1}') .. versionchanged:: 0.0.5 handles the case where req.max_results is None because pagination has been disabled. .. versionchanged:: 0.0.4 retrieves the target collection via the new config.SOURCES helper. """ args = dict() if req and req.max_results: args["limit"] = req.max_results if req and req.page > 1: args["skip"] = (req.page - 1) * req.max_results # TODO sort syntax should probably be coherent with 'where': either # mongo-like # or python-like. Currently accepts only mongo-like sort # syntax. # TODO should validate on unknown sort fields (mongo driver doesn't # return an error) client_sort = self._convert_sort_request_to_dict(req) spec = self._convert_where_request_to_dict(req) bad_filter = validate_filters(spec, resource) if bad_filter: abort(400, bad_filter) if sub_resource_lookup: spec = self.combine_queries(spec, sub_resource_lookup) if (config.DOMAIN[resource]["soft_delete"] and not (req and req.show_deleted) and not self.query_contains_field(spec, config.DELETED)): # Soft delete filtering applied after validate_filters call as # querying against the DELETED field must always be allowed when # soft_delete is enabled spec = self.combine_queries(spec, {config.DELETED: {"$ne": True}}) spec = self._mongotize(spec, resource) client_projection = self._client_projection(req) datasource, spec, projection, sort = self._datasource_ex( resource, spec, client_projection, client_sort) if req and req.if_modified_since: spec[config.LAST_UPDATED] = {"$gt": req.if_modified_since} if len(spec) > 0: args["filter"] = spec if sort is not None: args["sort"] = sort if projection: args["projection"] = projection self.__last_target = self.pymongo(resource).db[datasource], spec try: self.__last_cursor = self.pymongo(resource).db[datasource].find( **args) except TypeError as e: # pymongo raises ValueError when invalid query paramenters are # included. We do our best to catch them beforehand but, especially # with key/value sort syntax, invalid ones might still slip in. self.app.logger.exception(e) abort(400, description=debug_error_message(str(e))) return self.__last_cursor @property def last_documents_count(self): if not self.__last_target: return None try: target, spec = self.__last_target return target.count_documents(spec) except: # fallback to deprecated method. this might happen when the query # includes operators not supported by count_documents(). one # documented use-case is when we're running on mongo 3.4 and below, # which does not support $expr ($expr must replace $where # in # count_documents()). # 1. Mongo 3.6+; $expr: pass # 2. Mongo 3.6+; $where: pass (via fallback) # 3. Mongo 3.4; $where: pass (via fallback) # 4. Mongo 3.4; $expr: fail (operator not supported by db) # See: http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.count return self.__last_cursor.count() def find_one(self, resource, req, check_auth_value=True, force_auth_field_projection=False, **lookup): """ Retrieves a single document. :param resource: resource name. :param req: a :class:`ParsedRequest` instance. :param **lookup: lookup query. .. versionchanged:: 0.6 Support for multiple databases. Filter soft deleted documents by default .. versionchanged:: 0.4 Honor client projection requests. .. versionchanged:: 0.3.0 Support for new _mongotize() signature. Custom ID_FIELD lookups would raise an exception. See #203. .. versionchanged:: 0.1.0 ID_FIELD to ObjectID conversion is done before `_datasource_ex` is called. .. versionchanged:: 0.0.6 Only retrieve fields in the resource schema .. versionchanged:: 0.0.4 retrieves the target collection via the new config.SOURCES helper. """ self._mongotize(lookup, resource) client_projection = self._client_projection(req) datasource, filter_, projection, _ = self._datasource_ex( resource, lookup, client_projection, check_auth_value=check_auth_value, force_auth_field_projection=force_auth_field_projection, ) if ((config.DOMAIN[resource]["soft_delete"]) and (not req or not req.show_deleted) and (not self.query_contains_field(lookup, config.DELETED))): filter_ = self.combine_queries(filter_, {config.DELETED: { "$ne": True }}) # Here, we feed pymongo with `None` if projection is empty. return (self.pymongo(resource).db[datasource].find_one( filter_, projection or None)) def find_one_raw(self, resource, **lookup): """ Retrieves a single raw document. :param resource: resource name. :param **lookup: lookup query. .. versionchanged:: 0.6 Support for multiple databases. .. versionadded:: 0.4 """ id_field = config.DOMAIN[resource]["id_field"] _id = lookup.get(id_field) datasource, filter_, _, _ = self._datasource_ex( resource, {id_field: _id}, None) lookup = self._mongotize(lookup, resource) return self.pymongo(resource).db[datasource].find_one(lookup) def find_list_of_ids(self, resource, ids, client_projection=None): """ Retrieves a list of documents from the collection given by `resource`, matching the given list of ids. This query is generated to *preserve the order* of the elements in the `ids` list. An alternative would be to use the `$in` operator and accept non-dependable ordering for a slight performance boost see <https://jira.mongodb.org/browse/SERVER-7528?focusedCommentId= 181518&page=com.atlassian.jira.plugin.system.issuetabpanels:comment -tabpanel#comment-181518> To preserve order, we use a query of the form db.collection.find( { $or:[ { _id:ObjectId(...) }, { _id:ObjectId(...) }...] } ) Instead of the simpler {'_id': {'$in': ids}} -- via http://stackoverflow.com/a/13185509/1161906 :param resource: resource name. :param ids: a list of ObjectIds corresponding to the documents to retrieve :param client_projection: a specific projection to use :return: a list of documents matching the ids in `ids` from the collection specified in `resource` .. versionchanged:: 0.6 Support for multiple databases. .. versionchanged:: 0.1.1 Using config.ID_FIELD instead of hard coded '_id'. .. versionadded:: 0.1.0 """ id_field = config.DOMAIN[resource]["id_field"] query = {"$or": [{id_field: id_} for id_ in ids]} datasource, spec, projection, _ = self._datasource_ex( resource, query=query, client_projection=client_projection) # projection of {} return all fields in MongoDB, but # pymongo will only return `_id`. It's a design flaw upstream. # Here, we feed pymongo with `None` if projection is empty. documents = (self.pymongo(resource).db[datasource].find( filter=spec, projection=(projection or None))) return documents def aggregate(self, resource, pipeline, options): """ .. versionadded:: 0.7 """ datasource, _, _, _ = self.datasource(resource) challenge = self._mongotize({"key": pipeline}, resource)["key"] return self.pymongo(resource).db[datasource].aggregate( challenge, **options) def insert(self, resource, doc_or_docs): """ Inserts a document into a resource collection. .. versionchanged:: 0.6.1 Support for PyMongo 3.0. .. versionchanged:: 0.6 Support for multiple databases. .. versionchanged:: 0.0.9 More informative error messages. .. versionchanged:: 0.0.8 'write_concern' support. .. versionchanged:: 0.0.6 projection queries ('?projection={"name": 1}') 'document' param renamed to 'doc_or_docs', making support for bulk inserts apparent. .. versionchanged:: 0.0.4 retrieves the target collection via the new config.SOURCES helper. """ datasource, _, _, _ = self._datasource_ex(resource) coll = self.get_collection_with_write_concern(datasource, resource) if isinstance(doc_or_docs, dict): doc_or_docs = [doc_or_docs] try: return coll.insert_many(doc_or_docs, ordered=True).inserted_ids except pymongo.errors.BulkWriteError as e: self.app.logger.exception(e) # since this is an ordered bulk operation, all remaining inserts # are aborted. Be aware that if BULK_ENABLED is True and more than # one document is included with the payload, some documents might # have been successfully inserted, even if the operation was # aborted. # report a duplicate key error since this can probably be # handled by the client. for error in e.details["writeErrors"]: # amazingly enough, pymongo does not appear to be exposing # error codes as constants. if error["code"] == 11000: abort( 409, description=debug_error_message( "Duplicate key error at index: %s, message: %s" % (error["index"], error["errmsg"])), ) abort( 500, description=debug_error_message( "pymongo.errors.BulkWriteError: %s" % e), ) def _change_request(self, resource, id_, changes, original, replace=False): """ Performs a change, be it a replace or update. .. versionchanged:: 0.8.2 Return 400 if update/replace with malformed DBRef field. See #1257. .. versionchanged:: 0.6.1 Support for PyMongo 3.0. .. versionchanged:: 0.6 Return 400 if an attempt is made to update/replace an immutable field. """ id_field = config.DOMAIN[resource]["id_field"] query = {id_field: id_} if config.ETAG in original: query[config.ETAG] = original[config.ETAG] datasource, filter_, _, _ = self._datasource_ex(resource, query) coll = self.get_collection_with_write_concern(datasource, resource) try: result = (coll.replace_one(filter_, changes) if replace else coll.update_one(filter_, changes)) if (config.ETAG in original and result and result.acknowledged and result.modified_count == 0): raise self.OriginalChangedError() except pymongo.errors.DuplicateKeyError as e: abort( 400, description=debug_error_message( "pymongo.errors.DuplicateKeyError: %s" % e), ) except (pymongo.errors.WriteError, pymongo.errors.OperationFailure) as e: # server error codes and messages changed between 2.4 and 2.6/3.0. server_version = self.driver.db.client.server_info()["version"][:3] if (server_version == "2.4" and e.code in (13596, 10148)) or ( server_version in ("2.6", "3.0", "3.2", "3.4", "3.6", "4.0") and e.code in (66, 16837)): # attempt to update an immutable field. this usually # happens when a PATCH or PUT includes a mismatching ID_FIELD. self.app.logger.warning(e) description = ( debug_error_message( "pymongo.errors.OperationFailure: %s" % e) or "Attempt to update an immutable field. Usually happens " "when PATCH or PUT include a '%s' field, " "which is immutable (PUT can include it as long as " "it is unchanged)." % id_field) abort(400, description=description) else: # see comment in :func:`insert()`. self.app.logger.exception(e) abort( 500, description=debug_error_message( "pymongo.errors.OperationFailure: %s" % e), ) def update(self, resource, id_, updates, original): """ Updates a collection document. .. versionchanged:: 0.6 Support for multiple databases. .. versionchanged:: 5.2 Raise OriginalChangedError if document is changed from the specified original. .. versionchanged:: 0.4 Return a 400 on pymongo DuplicateKeyError. .. versionchanged:: 0.3.0 Custom ID_FIELD lookups would fail. See #203. .. versionchanged:: 0.2 Don't explicitly convert ID_FIELD to ObjectId anymore, so we can also process different types (UUIDs etc). .. versionchanged:: 0.0.9 More informative error messages. .. versionchanged:: 0.0.8 'write_concern' support. .. versionchanged:: 0.0.6 projection queries ('?projection={"name": 1}') .. versionchanged:: 0.0.4 retrieves the target collection via the new config.SOURCES helper. """ return self._change_request(resource, id_, {"$set": updates}, original) def replace(self, resource, id_, document, original): """ Replaces an existing document. .. versionchanged:: 0.6 Support for multiple databases. .. versionchanged:: 5.2 Raise OriginalChangedError if document is changed from the specified original. .. versionchanged:: 0.3.0 Custom ID_FIELD lookups would fail. See #203. .. versionchanged:: 0.2 Don't explicitly convert ID_FIELD to ObjectId anymore, so we can also process different types (UUIDs etc). .. versionadded:: 0.1.0 """ return self._change_request(resource, id_, document, original, replace=True) def remove(self, resource, lookup): """ Removes a document or the entire set of documents from a collection. .. versionchanged:: 0.6.1 Support for PyMongo 3.0. .. versionchanged:: 0.6 Support for multiple databases. .. versionchanged:: 0.3 Support lookup arg, which allows to properly delete sub-resources (only delete documents that meet a certain constraint). .. versionchanged:: 0.2 Don't explicitly converto ID_FIELD to ObjectId anymore, so we can also process different types (UUIDs etc). .. versionchanged:: 0.0.9 More informative error messages. .. versionchanged:: 0.0.8 'write_concern' support. .. versionchanged:: 0.0.6 projection queries ('?projection={"name": 1}') .. versionchanged:: 0.0.4 retrieves the target collection via the new config.SOURCES helper. .. versionadded:: 0.0.2 Support for deletion of entire documents collection. :returns A document (dict) describing the effect of the remove or None if write acknowledgement is disabled. """ lookup = self._mongotize(lookup, resource) datasource, filter_, _, _ = self._datasource_ex(resource, lookup) coll = self.get_collection_with_write_concern(datasource, resource) try: coll.delete_many(filter_) except pymongo.errors.OperationFailure as e: # see comment in :func:`insert()`. self.app.logger.exception(e) abort( 500, description=debug_error_message( "pymongo.errors.OperationFailure: %s" % e), ) # TODO: The next three methods could be pulled out to form the basis # of a separate MonqoQuery class def combine_queries(self, query_a, query_b): """ Takes two db queries and applies db-specific syntax to produce the intersection. This is used because we can't just dump one set of query operators into another. Consider for example if the dataset contains a custom datasource pattern like -- 'filter': {'username': {'$exists': True}} If we simultaneously try to filter on the field `username`, then doing query_a.update(query_b) would lose information. This implementation of the function just combines everything in the two dicts using the `$and` operator. Note that this is exactly same as performing dict.update() except when multiple operators are operating on the /same field/. Example: combine_queries({'username': {'$exists': True}}, {'username': '******'}) {'$and': [{'username': {'$exists': True}}, {'username': '******'}]} .. versionadded: 0.1.0 Support for intelligent combination of db queries """ # Chain the operations with the $and operator return { "$and": [{ k: v } for k, v in itertools.chain(query_a.items(), query_b.items())] } def get_value_from_query(self, query, field_name): """ For the specified field name, parses the query and returns the value being assigned in the query. For example, get_value_from_query({'_id': 123}, '_id') 123 This mainly exists to deal with more complicated compound queries get_value_from_query( {'$and': [{'_id': 123}, {'firstname': 'mike'}], '_id' ) 123 .. versionadded: 0.1.0 Support for parsing values embedded in compound db queries """ if field_name in query: return query[field_name] elif "$and" in query: for condition in query["$and"]: if field_name in condition: return condition[field_name] raise KeyError def query_contains_field(self, query, field_name): """ For the specified field name, does the query contain it? Used know whether we need to parse a compound query. .. versionadded: 0.1.0 Support for parsing values embedded in compound db queries """ try: self.get_value_from_query(query, field_name) except KeyError: return False return True def is_empty(self, resource): """ Returns True if resource is empty; False otherwise. If there is no predefined filter on the resource we're relying on the db.collection.count_documents. However, if we do have a predefined filter we have to fallback on the find() method, which can be much slower. .. versionchanged:: 0.6 Support for multiple databases. .. versionadded:: 0.3 """ datasource, filter_, _, _ = self.datasource(resource) coll = self.pymongo(resource).db[datasource] try: if not filter_: # faster, but we can only afford it if there's now predefined # filter on the datasource. return coll.count_documents({}) == 0 else: # fallback on find() since we have a filter to apply. try: # need to check if the whole resultset is missing, no # matter the IMS header. del filter_[config.LAST_UPDATED] except: pass return coll.count_documents(filter_) == 0 except pymongo.errors.OperationFailure as e: # see comment in :func:`insert()`. self.app.logger.exception(e) abort( 500, description=debug_error_message( "pymongo.errors.OperationFailure: %s" % e), ) def _mongotize(self, source, resource): """ Recursively iterates a JSON dictionary, turning RFC-1123 strings into datetime values and ObjectId-link strings into ObjectIds. .. versionchanged:: 0.3 'query_objectid_as_string' allows to bypass casting string types to objectids. .. versionchanged:: 0.1.1 Renamed from _jsondatetime to _mongotize, as it now handles ObjectIds too. .. versionchanged:: 0.1.0 Datetime conversion was failing on Py2, since 0.0.9 :P .. versionchanged:: 0.0.9 support for Python 3.3. .. versionadded:: 0.0.4 """ schema = config.DOMAIN[resource] skip_objectid = schema.get("query_objectid_as_string", False) def try_cast(v): try: return datetime.strptime(v, config.DATE_FORMAT) except: if not skip_objectid: try: # Convert to unicode because ObjectId() interprets # 12-character strings (but not unicode) as binary # representations of ObjectId's. See # https://github.com/pyeve/eve/issues/508 try: r = ObjectId(unicode(v)) except NameError: # We're on Python 3 so it's all unicode already. r = ObjectId(v) return r except: return v else: return v for k, v in source.items(): if isinstance(v, dict): self._mongotize(v, resource) elif isinstance(v, list): for i, v1 in enumerate(v): if isinstance(v1, dict): source[k][i] = self._mongotize(v1, resource) else: source[k][i] = try_cast(v1) elif isinstance(v, str_type): source[k] = try_cast(v) return source def _sanitize(self, spec): """ Makes sure that only allowed operators are included in the query, aborts with a 400 otherwise. .. versionchanged:: 0.5 Abort with 400 if unsupported query operators are used. #387. DRY. .. versionchanged:: 0.0.9 More informative error messages. Allow ``auth_username_field`` to be set to ``ID_FIELD``. .. versionadded:: 0.0.7 """ def sanitize_keys(spec): ops = set([op for op in spec.keys() if op[0] == "$"]) unknown = ops - Mongo.operators if unknown: abort( 400, description=debug_error_message( "Query contains unknown or unsupported operators: %s" % ", ".join(unknown)), ) if set(spec.keys()) & set(config.MONGO_QUERY_BLACKLIST): abort( 400, description=debug_error_message( "Query contains operators banned in MONGO_QUERY_BLACKLIST" ), ) if isinstance(spec, dict): sanitize_keys(spec) for value in spec.values(): self._sanitize(value) if isinstance(spec, list): for value in spec: self._sanitize(value) return spec def _convert_sort_request_to_dict(self, req): """ Converts the contents of a `ParsedRequest`'s `sort` property to a dict """ client_sort = {} if req and req.sort: try: # assume it's mongo syntax (ie. ?sort=[("name", 1)]) client_sort = ast.literal_eval(req.sort) except ValueError: # it's not mongo so let's see if it's a comma delimited string # instead (ie. "?sort=-age, name"). sort = [] for sort_arg in [s.strip() for s in req.sort.split(",")]: if sort_arg[0] == "-": sort.append((sort_arg[1:], -1)) else: sort.append((sort_arg, 1)) if len(sort) > 0: client_sort = sort except Exception as e: self.app.logger.exception(e) abort(400, description=debug_error_message(str(e))) return client_sort def _convert_where_request_to_dict(self, req): """ Converts the contents of a `ParsedRequest`'s `where` property to a dict """ query = {} if req and req.where: try: query = self._sanitize(json.loads(req.where)) except HTTPException: # _sanitize() is raising an HTTP exception; let it fire. raise except: # couldn't parse as mongo query; give the python parser a shot. try: query = parse(req.where) except ParseError: abort( 400, description=debug_error_message( "Unable to parse `where` clause"), ) return query def _wc(self, resource): """ Syntactic sugar for the current collection write_concern setting. .. versionadded:: 0.0.8 """ return config.DOMAIN[resource]["mongo_write_concern"] def current_mongo_prefix(self, resource=None): """ Returns the active mongo_prefix that should be used to retrieve a valid PyMongo instance from the cache. If 'self.mongo_prefix' is set it has precedence over both endpoint (resource) and default drivers. This allows Auth classes (for instance) to override default settings to use a user-reserved db instance. Even a standard Flask view can set the mongo_prefix: from flask import g g.mongo_prefix = 'MONGO2' :param resource: endpoint for which a mongo prefix is needed. ..versionchanged:: 0.7 Allow standard Flask views (@app.route) to set the mongo_prefix on their own. ..versionadded:: 0.6 """ # the hack below avoids passing the resource around, which would not be # an issue within this module but would force an update to the # eve.io.media.MediaStorage interface, possibly breaking compatibility # for other database implementations. auth = None try: if resource is None and request and request.endpoint: resource = request.endpoint[:request.endpoint.index("|")] if request and request.endpoint: auth = resource_auth(resource) except ValueError: pass px = auth.get_mongo_prefix() if auth else None if px is None: px = g.get("mongo_prefix", None) if px is None: if resource: px = config.DOMAIN[resource].get("mongo_prefix", "MONGO") else: px = "MONGO" return px def pymongo(self, resource=None, prefix=None): """ Returns an active PyMongo instance. If 'prefix' is defined then it has precedence over the endpoint ('resource') and/or 'self.mongo_instance'. :param resource: endpoint for which a PyMongo instance is requested. :param prefix: PyMongo instance key. This has precedence over both 'resource' and eventual `self.mongo_prefix'. .. versionadded:: 0.6 """ px = prefix if prefix else self.current_mongo_prefix(resource=resource) if px not in self.driver: # instantiate and add to cache self.driver[px] = PyMongo(self.app, px) # important, we don't want to preserve state between requests self.mongo_prefix = None try: return self.driver[px] except Exception as e: raise ConnectionException(e) def get_collection_with_write_concern(self, datasource, resource): """ Returns a pymongo Collection with the desired write_concern setting. PyMongo 3.0+ collections are immutable, yet we still want to allow the maintainer to change the write concern setting on the fly, hence the clone. .. versionadded:: 0.6.1 """ wc = WriteConcern(config.DOMAIN[resource]["mongo_write_concern"]["w"]) return self.pymongo(resource).db[datasource].with_options( write_concern=wc)
def _handle_type_convertion_operator(self, operator, values): if operator == '$toString': try: parsed = self.parse(values) except KeyError: return None if isinstance(parsed, bool): return str(parsed).lower() if isinstance(parsed, datetime.datetime): return parsed.isoformat()[:-3] + 'Z' return str(parsed) if operator == '$toInt': try: parsed = self.parse(values) except KeyError: return None if decimal_support: if isinstance(parsed, decimal128.Decimal128): return int(parsed.to_decimal()) return int(parsed) raise NotImplementedError( 'You need to import the pymongo library to support decimal128 type.' ) # Document: https://docs.mongodb.com/manual/reference/operator/aggregation/toDecimal/ if operator == '$toDecimal': if not decimal_support: raise NotImplementedError( 'You need to import the pymongo library to support decimal128 type.' ) try: parsed = self.parse(values) except KeyError: return None if isinstance(parsed, bool): parsed = '1' if parsed is True else '0' decimal_value = decimal128.Decimal128(parsed) elif isinstance(parsed, int): decimal_value = decimal128.Decimal128(str(parsed)) elif isinstance(parsed, float): exp = decimal.Decimal('.00000000000000') decimal_value = decimal.Decimal(str(parsed)).quantize(exp) decimal_value = decimal128.Decimal128(decimal_value) elif isinstance(parsed, decimal128.Decimal128): decimal_value = parsed elif isinstance(parsed, str): try: decimal_value = decimal128.Decimal128(parsed) except decimal.InvalidOperation: raise OperationFailure( "Failed to parse number '%s' in $convert with no onError value:" 'Failed to parse string to decimal' % parsed) elif isinstance(parsed, datetime.datetime): epoch = datetime.datetime.utcfromtimestamp(0) string_micro_seconds = str((parsed - epoch).total_seconds() * 1000).split('.')[0] decimal_value = decimal128.Decimal128(string_micro_seconds) else: raise TypeError("'%s' type is not supported" % type(parsed)) return decimal_value # Document: https://docs.mongodb.com/manual/reference/operator/aggregation/arrayToObject/ if operator == '$arrayToObject': try: parsed = self.parse(values) except KeyError: return None if parsed is None: return None if not isinstance(parsed, (list, tuple)): raise OperationFailure( '$arrayToObject requires an array input, found: {}'.format(type(parsed)) ) if all(isinstance(x, dict) and set(x.keys()) == {'k', 'v'} for x in parsed): return {d['k']: d['v'] for d in parsed} if all(isinstance(x, (list, tuple)) and len(x) == 2 for x in parsed): return dict(parsed) raise OperationFailure( 'arrays used with $arrayToObject must contain documents ' 'with k and v fields or two-element arrays' )
def _handle_type_convertion_operator(self, operator, values): if operator == '$toString': try: parsed = self.parse(values) except KeyError: return None if isinstance(parsed, bool): return str(parsed).lower() if isinstance(parsed, datetime.datetime): return parsed.isoformat()[:-3] + 'Z' return str(parsed) if operator == '$toInt': try: parsed = self.parse(values) except KeyError: return None if decimal_support: if isinstance(parsed, decimal128.Decimal128): return int(parsed.to_decimal()) return int(parsed) raise NotImplementedError( 'You need to import the pymongo library to support decimal128 type.' ) # Document: https://docs.mongodb.com/manual/reference/operator/aggregation/toDecimal/ if operator == '$toDecimal': if not decimal_support: raise NotImplementedError( 'You need to import the pymongo library to support decimal128 type.' ) try: parsed = self.parse(values) except KeyError: return None if isinstance(parsed, bool): parsed = '1' if parsed is True else '0' decimal_value = decimal128.Decimal128(parsed) elif isinstance(parsed, int): decimal_value = decimal128.Decimal128(str(parsed)) elif isinstance(parsed, float): exp = decimal.Decimal('.00000000000000') decimal_value = decimal.Decimal(str(parsed)).quantize(exp) decimal_value = decimal128.Decimal128(decimal_value) elif isinstance(parsed, decimal128.Decimal128): decimal_value = parsed elif isinstance(parsed, str): try: decimal_value = decimal128.Decimal128(parsed) except decimal.InvalidOperation: raise OperationFailure( "Failed to parse number '%s' in $convert with no onError value:" 'Failed to parse string to decimal' % parsed) elif isinstance(parsed, datetime.datetime): epoch = datetime.datetime.utcfromtimestamp(0) string_micro_seconds = str( (parsed - epoch).total_seconds() * 1000).split('.')[0] decimal_value = decimal128.Decimal128(string_micro_seconds) else: raise TypeError("'%s' type is not supported" % type(parsed)) return decimal_value
def test_decimal_success(self): schema = {"decimal": {"type": "decimal"}} doc = {"decimal": decimal128.Decimal128("123.123")} v = Validator(schema, None) self.assertTrue(v.validate(doc))
def test_decimal_success(self): schema = {'decimal': {'type': 'decimal'}} doc = {'decimal': decimal128.Decimal128('123.123')} v = Validator(schema, None) self.assertTrue(v.validate(doc))