class BookSchema(ma.Schema): title = ma.fields.String() isbn = ma.fields.String() format = ma.fields.Integer() released = ma.fields.Integer() price = ma.fields.Float() page_count = ma.fields.Integer() read = MyBoolean(missing=False) authors = DelimitedList(ma.fields.Integer()) genres = ma.fields.List(ma.fields.Integer()) publishers = ma.fields.List(ma.fields.Integer()) languages = ma.fields.List(ma.fields.Integer()) series = ma.fields.Nested(SeriesSchema, many=True) @ma.pre_load def parse_series(self, data, **kwargs): data = dict(data) if "series" in data.keys(): data["series"] = json.loads(data.get("series")) return data @ma.pre_load def parse_isbn(self, data, **kwargs): data = dict(data) if "isbn" in data.keys(): data["isbn"] = data["isbn"].replace("-", "") return data class Meta: unknown = ma.EXCLUDE
class TextRes(Resource): GET_ARGS = { 'fields': DelimitedList(Str()), } @staticmethod def get_text(text_id): text = Text.query.get(text_id) if text is None: abort(mk_errors(404, '{} doest not exist'.format(text_id))) return text @staticmethod def parse_get_args(req): args = parser.parse(TextRes.GET_ARGS, req, locations=('querystring', )) return args @jwt_required def get(self, text_id): """ Return text resource. .. :quickref: Get Text; Get text. **Example request**: .. sourcecode:: http GET /texts/1 HTTP/1.1 Host: example.com Accept: application/json Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhb **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "text": { "content": "Bom dia!", "text_id": 1, "tags": [ "bomdia" ], "updated_at": null, "created_at": "2018-09-15T22:53:26+00:00" } } :reqheader Authorization: access token of logged in user (required) :param int text_id: id of text resource. :resheader Content-Type: application/json :status 200: text found :returns: :class:`memedata.models.Text` """ text = TextRes.get_text(text_id) try: args = TextRes.parse_get_args(request) except ValidationError as e: return mk_errors(400, fmt_validation_error_messages(e.messages)) obj = TextSchema().dump(text) return filter_fields(obj, args.get('fields')) @jwt_required def put(self, text_id): """ Update text resource. .. :quickref: Update Text; Update text. **Example request**: .. sourcecode:: http PUT /texts/1 HTTP/1.1 Host: example.com Accept: application/json Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhb **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "text": { "content": "Updated text", "text_id": 1, "tags": [ "bomdia" ], "updated_at": "2018-09-16T23:00:13+00:00", "created_at": "2018-09-15T22:53:26+00:00" } } :reqheader Authorization: access token of logged in user (required) :param int text_id: id of text resource. :form content: the text contents :form tags: comma-separated list of tags :resheader Content-Type: application/json :status 200: text updated :returns: :class:`memedata.models.Text` """ text = TextRes.get_text(text_id) try: schema = TextSchema() args = TextsRes.parse_post_args(request) text = schema.load(args, instance=text, partial=True) except ValidationError as e: return mk_errors(400, fmt_validation_error_messages(e.messages)) db.session.add(text) db.session.commit() return schema.dump(text) @jwt_required def delete(self, text_id): """ Delete text resource. .. :quickref: Delete Text; Remove text. **Example request**: .. sourcecode:: http DELETE /texts/1 HTTP/1.1 Host: example.com Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhb **Example response**: .. sourcecode:: http HTTP/1.1 204 NO CONTENT :reqheader Authorization: access token of logged in user (required) :param int text_id: id of text resource. :status 204: text deleted """ text = TextRes.get_text(text_id) db.session.delete(text) db.session.commit() return '', 204
class TextsRes(Resource): POST_MAX_N_TAGS = 16 POST_ARGS = { 'tags': DelimitedList(Str()), 'content': Str(), } TAGS_BLACKLIST = {} GET_MAX_N_RESULTS = 1000 GET_ARGS = { 'any_tags': DelimitedList(Str()), 'all_tags': DelimitedList(Str()), 'no_tags': DelimitedList(Str()), 'date_from': Date(), 'date_to': Date(), 'max_n_results': \ Integer(validate=lambda n: n >= 0, missing=GET_MAX_N_RESULTS), 'offset': \ Integer(validate=lambda n: n >= 0, missing=0), 'fields': DelimitedList(Str()), } @staticmethod def parse_post_args(req): args = parser.parse(TextsRes.POST_ARGS, req) if 'tags' in args: if len(args['tags']) > TextsRes.POST_MAX_N_TAGS: raise ValidationError('too many tags (limit={})'.format( TextsRes.POST_MAX_N_TAGS)) for tag in args['tags']: if tag in TextsRes.TAGS_BLACKLIST: raise ValidationError('"{}" is blacklisted'.format(tag)) args['tags'] = serialize_tags(get_tags(args['tags'])) return args @staticmethod def filter_texts(args): #sorting in decreasing order by creation time query = Text.query.order_by(-Text.created_at) if 'date_to' in args: query = query.filter(func.DATE(Text.created_at) <= args['date_to']) if 'date_from' in args: query = query.filter( func.DATE(Text.created_at) >= args['date_from']) if 'all_tags' in args: tags = Tag.query.filter(Tag.content.in_(args['all_tags'])).all() if len(tags) < len(args['all_tags']): return [], None #dirty hack TODO: get a better solution for t in tags: query = query.filter(Text.tags.contains(t)) elif 'any_tags' in args: query = query.join(Tag, Text.tags).join( Tag.query.join(Text, Tag.texts).filter( Tag.content.in_(args['any_tags']))) if 'no_tags' in args: tags = Tag.query.filter(Tag.content.in_(args['no_tags'])).all() #new dirty hack TODO: get a better solution for t in tags: query = query.filter(~Text.tags.contains(t)) query = query.offset(args['offset']) if query.count() <= args['max_n_results']: offset = None else: offset = args['offset'] + args['max_n_results'] texts = query.limit(args['max_n_results']).all() return texts, offset @staticmethod def parse_get_args(req): args = parser.parse(TextsRes.GET_ARGS, req, locations=('querystring', )) return args @jwt_required def post(self): """ Create new text resource. .. :quickref: Text creation; Create new text. **Example request**: .. sourcecode:: http POST /texts HTTP/1.1 Host: example.com Accept: application/json Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhb **Example response**: .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: application/json { "text": { "content": "Bom dia!", "text_id": 1, "tags": [ "bomdia" ], "updated_at": null, "created_at": "2018-09-15T22:53:26+00:00" }, } :reqheader Authorization: access token of logged in user (required) :form content: the text contents (required) :form tags: comma-separated list of tags :resheader Content-Type: application/json :status 201: resource created :returns: :class:`memedata.models.Text` """ try: args = TextsRes.parse_post_args(request) text = TextSchema().load(args) except ValidationError as e: return mk_errors(400, fmt_validation_error_messages(e.messages)) db.session.add(text) db.session.commit() return TextSchema().dump(text), 201 @jwt_required def get(self): """ Return collection of texts. .. :quickref: Get Texts; Get collection of texts. **Example request**: .. sourcecode:: http GET /texts HTTP/1.1 Host: example.com Accept: application/json Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhb **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "texts": [ { "content": "Bom dia!", "text_id": 1, "tags": [ "bomdia" ], "updated_at": null, "created_at": "2018-09-15T22:53:26+00:00" }, { "content": "Eu adoro as manhãs", "text_id": 32, "tags": [ "jesus", "sexta" ], "updated_at": null, "created_at": "2018-09-15T22:53:26+00:00" }, ], "offset": 2 } :reqheader Authorization: access token of logged in user (required) :query string fields: comma-separated list of fields to get for each \ text. :query string date_from: only texts created after specified date \ (inclusive). :query string date_to: only texts created before specified date. :query string any_tags: texts with at least one tags in specified list. :query string all_tags: texts only containing all specified tags. :query string no_tags: texts only not containing any of specified tags. :query int offset: pagination offset to start getting results :query int max_n_results: maximum number of results to return. :resheader Content-Type: application/json :status 200: texts found :returns: :class:`memedata.models.Text` """ try: args = TextsRes.parse_get_args(request) except ValidationError as e: return mk_errors(400, fmt_validation_error_messages(e.messages)) texts, offset = TextsRes.filter_texts(args) objs = TextSchema(many=True).dump(texts) objs = filter_fields(objs, args.get('fields')) objs['offset'] = offset return objs
class RoutingSchema(Schema): """ StationLite *routing* webservice schema definition. The parameters defined correspond to the definition `https://www.orfeus-eu.org/data/eida/webservices/routing/` """ format = fields.Str( # NOTE(damb): formats different from 'post' are not implemented yet. # missing='xml' missing="post", # validate=validate.OneOf(['xml', 'json', 'get', 'post']) validate=validate.OneOf(["post", "get"]), ) service = fields.Str( missing="dataselect", validate=validate.OneOf( ["availability", "dataselect", "station", "wfcatalog"]), ) nodata = NoData() alternative = FDSNWSBool(missing="false") access = fields.Str(missing="any", validate=validate.OneOf(["open", "closed", "any"])) level = fields.Str( missing="channel", validate=validate.OneOf(["network", "station", "channel", "response"]), ) method = DelimitedList( fields.Str(validate=validate.OneOf([ FDSNWS_QUERY_METHOD_TOKEN, FDSNWS_QUERYAUTH_METHOD_TOKEN, FDSNWS_EXTENT_METHOD_TOKEN, FDSNWS_EXTENTAUTH_METHOD_TOKEN, ]), ), missing=None, allow_none=True, ) # geographic (rectangular spatial) options # XXX(damb): Default values are defined and assigned within merge_keys () minlatitude = Latitude(missing=-90.0) minlat = Latitude(load_only=True) maxlatitude = Latitude(missing=90.0) maxlat = Latitude(load_only=True) minlongitude = Longitude(missing=-180.0) minlon = Latitude(load_only=True) maxlongitude = Longitude(missing=180.0) maxlon = Latitude(load_only=True) @pre_load def merge_keys(self, data, **kwargs): """ Merge both alternative field parameter values and assign default values. .. note:: The default :py:module:`webargs` parser does not provide this feature by default such that :code:`load_only` field parameters are exclusively parsed. :param dict data: data """ _mappings = [ ("minlat", "minlatitude"), ("maxlat", "maxlatitude"), ("minlon", "minlongitude"), ("maxlon", "maxlongitude"), ] for alt_key, key in _mappings: if alt_key in data and key in data: data.pop(alt_key) elif alt_key in data and key not in data: data[key] = data[alt_key] data.pop(alt_key) return data @validates_schema def validate_spatial(self, data, **kwargs): if (data["minlatitude"] >= data["maxlatitude"] or data["minlongitude"] >= data["maxlongitude"]): raise ValidationError("Bad Request: Invalid spatial constraints.") @validates_schema def validate_level(self, data, **kwargs): if data["level"] != "channel" and data["service"] != "station": raise ValidationError( f"Bad Request: Invalid 'level' value {data['level']!r} " f"for service {data['service']!r}.") @validates_schema def validate_access(self, data, **kwargs): if data["access"] != "any" and data["service"] not in ( "dataselect", "availability", ): raise ValidationError( f"Bad Request: Invalid 'access' value {data['access']!r} " f"for service {data['service']!r}") class Meta: strict = True
def Merge(*valid_values, **kwargs): return DelimitedList(fields.Str(validate=validate.OneOf(valid_values)), delimiter=FDSNWS_QUERY_LIST_SEPARATOR_CHAR, **kwargs)
from marshmallow import fields from marshmallow import validate from webargs.fields import DelimitedList from .utils import ParamsDict #: Base params for list view func. PageParams = ParamsDict( page=fields.Int(missing=1, required=False, validate=validate.Range(min=1, max=2**31)), page_size=fields.Int(missing=10, required=False, validate=validate.Range(min=5, max=100)), order_by=DelimitedList( fields.String(validate=validate.Regexp(r'^-?[a-zA-Z_]*$')), required=False, missing=['-id']), ) """Base params for list view func which contains ``page``、``page_size``、\ ``order_by`` params. Example:: @use_kwargs(PageParams) def list_users(page, page_size, order_by): pass """ class PaginationType(TypedDict): items: list
query = query.limit(max_n_results) return query, new_offset DEF_MAX_N_RESULTS = 2048 ORDER_BY_OPTIONS = [ 'newest', 'oldest', None, ] ORDER_TOPICS_OPTIONS = ORDER_BY_OPTIONS + ['newest_last_post'] DEF_GET_COLL_ARGS = { 'fields': DelimitedList(Str()), 'offset': Int(validate=lambda n: n >= 0), 'max_n_results': Int(validate=lambda n: n >= 0, missing=DEF_MAX_N_RESULTS), 'order': Str(validate=validate.OneOf(ORDER_BY_OPTIONS), missing='newest'), } GET_TOPICS_ARGS = DEF_GET_COLL_ARGS.copy() GET_TOPICS_ARGS.update({ 'order': Str(validate=validate.OneOf(ORDER_TOPICS_OPTIONS), missing='newest_last_post'), 'statuses': DelimitedList(Str()), })
class BatchSearchSchema(Schema): query = DelimitedList(fields.Str(), required=True)
def params(self): d = "filter by [field] existence, will return results where passed fields are null" e = ["?not_has=name", "?not_has=name,location"] des = create_params(d, e) return {self.key: DelimitedList(Str(), missing=[], description=des)}
def params(self): d = "A list of tag ids to filter by" e = ["?tags=3,2,1"] des = create_params(d, e) return {self.key: DelimitedList(Int(), description=des)}
def params(self): d = "A list of model id's to grab from db." e = ["?ids=213,221,423"] des = create_params(d, e) return {self.key: DelimitedList(Int(), description=des)}
def params(self): d = "Option to filter by coordinates, pass a bounding box, minLong, minLat, maxLong, maxLat" e = ["?coordinates=0,0,180,90"] des = create_params(d, e) return {self.key: DelimitedList(Str(), description=des)}
def params(self): d = "Filter by date or date range where the format is YYYY-MM-DD" e = ["?date_range=2013-03-23,2013-05-01"] des = create_params(d, e) return {self.key: DelimitedList(Str(), description=des)}
class DataFileFilterByModelID(HTTPEndpoint): """ A filterable datafile endpoint to find related models creates a custom json serialization that returns the model with id of the link """ args_schema = dict( sample_id=DelimitedList(Int(), description="sample id to filter datafile by"), session_id=DelimitedList( Int(), description="session id to filter datafile by"), analysis_id=DelimitedList( Int(), description="analysis id to filter datafile by"), ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # self.schema = get_database().interface.data_file( # many=True, allow_nests=["data_file_link"] # ) async def get(self, request): """Handler for all GET requests""" model_names = await parser.parse(self.args_schema, request, location="querystring") db = get_database() n_models = sum(len(v) for v in model_names.values()) if n_models == 0: raise SparrowAPIError("No IDs were passed to filter by") # Wrap entire query infrastructure in error-handling block. # We should probably make this a "with" statement or something # to use throughout our API code. with db.session_scope(commit=False): DataFile = db.model.data_file DataFileLink = db.model.data_file_link data = [] for model_name, model_ids in model_names.items(): q = db.session.query( DataFile.file_hash, DataFile.file_mtime, DataFile.basename, DataFile.type_id, getattr(DataFileLink, model_name), ).order_by(DataFile.file_mtime) q = q.join(DataFile.data_file_link_collection).filter( getattr(DataFileLink, model_name).in_(model_ids)) res = q.all() # SUPER messy way to create a custom json serialization for row in res: row_obj = {} file_hash, file_mtime, basename, type_id, model_id = row row_obj["file_hash"] = file_hash row_obj["file_mtime"] = file_mtime.isoformat() row_obj["basename"] = basename row_obj["type"] = type_id row_obj["model"] = model_name[:-3] row_obj["model_id"] = model_id data.append(row_obj) return JSONResponse(dict(data=data, total_count=len(data)))
class MetabolomicsBatchRequest(Schema): body = DelimitedList(fields.Nested(Metabolomics(exclude=("id", ))))
class FluxomicsBatchRequest(Schema): body = DelimitedList(fields.Nested(Fluxomics(exclude=("id", ))))