def website_id_in_db(website_id): if not db.session.query(WebsiteRecycler).get(website_id): raise ParamException('无此主域名') query_recycler_schema = { 'start': fields.Int(missing=0), 'length': fields.Int(missing=15, validate=length_validator), 'order': fields.Nested( { 'field': fields.Str(missing='create_time'), 'direction': fields.Str(missing='desc', validate=OneOf(['asc', 'desc'])) }, missing={}), 'fields': fields.List(fields.Str()), 'filter': fields.Nested( { 'url': fields.Str(missing=""), 'title': fields.Str(missing=""), 'domain': fields.Str(missing=""), 'host_dept': fields.Str(missing="") }, missing={}) }
class Washcodes(Resource): get_args = { "code": fields.String(required=False, missing=None), "fromDateTime": fields.DateTime(required=False, missing=None) } @use_kwargs(get_args) @require_headers def get(self, code, fromDateTime): if fromDateTime: result = [ washcode for washcode in washcodes if datetime.fromisoformat(washcode["CreatedTime"]) > fromDateTime ] else: result = [ washcode for washcode in washcodes if not code or washcode["Code"] == code ] return result, 200 post_args = { "Washcode": fields.Nested( { "credit": fields.Float(required=False, missing=0.0), "productRestriction": fields.Integer(required=False, missing=0), "limitCount": fields.Integer(required=False, missing=0), "expiry": fields.DateTime(required=False, missing=None) }, location="json", required=True) } @use_kwargs(post_args) @require_headers def post(self, Washcode): washcode = { "Code": ''.join( random.choices(string.ascii_uppercase + string.digits, k=10)), "Credit": Washcode["credit"], "LimitCount": Washcode["limitCount"], "ProductRestriction": Washcode["productRestriction"], "Expiry": Washcode["expiry"].isoformat(), "CreatedTime": datetime.utcnow().isoformat() } washcodes.append(washcode) return washcode, 200 del_args = {"code": fields.String(required=True)} @use_kwargs(del_args) @require_headers def delete(self, code): for washcode in washcodes: if washcode["Code"] == code: washcodes.remove(washcode) return "{}", 200 return "{ error : {errorCode : 40401, errorMessage : " "code not found!" "} }", 404
RHRoomsPermissions._jsonify_user_permissions.clear_cached(session.user) return jsonify(id=room.id) _base_args = { 'default': fields.Bool(), 'bounds': fields.Nested( { 'north_east': fields.Nested({ 'lat': fields.Float(), 'lng': fields.Float() }, required=True), 'south_west': fields.Nested({ 'lat': fields.Float(), 'lng': fields.Float() }, required=True) }, required=True) } _create_args = dict(_base_args, **{'name': fields.String(required=True)}) _update_args = { 'areas': fields.List( fields.Nested(dict(
class CustomSchema(ma.Schema): works = fields.List( fields.Nested({ "author": fields.Str(), "workname": fields.Str() }))
url = "http://router.project-osrm.org/table/v1/driving/{}".format( points_param) print("URL: ", url, file=sys.stderr) response = requests.get(url, data={'generate_hints': 'false'}) print("Response: " + str(response), file=sys.stderr) return response.json()['durations'] @app.route("/routes", methods=['post']) @cross_origin(origin='*') @use_kwargs({ 'vehicles_number': fields.Int(required=True), 'target_points': fields.List(fields.Nested({ 'latitude': fields.Float(required=True), 'longitude': fields.Float(required=True), }), required=True) }) def calculate_route(vehicles_number, target_points): if len(target_points) < 1: return jsonify({'iterations': 0, 'cost': 0, 'routes': []}) target_points = [(point['longitude'], point['latitude']) for point in target_points] nodes_to_points = {} for index, target_point in enumerate(target_points): nodes_to_points[index] = target_point
class LocationSchema(ModelSchema): city = fields.Nested(CitySchema, many=False) class Meta: model = Location
def get_covariance(): """ This endpoint returns covariance and score statistics within a given region. """ if request.content_type not in ("application/x-www-form-urlencoded", "application/json"): raise FlaskException("Content-Type must be application/json or application/x-www-form-urlencoded", 415) args_defined = { 'chrom': fields.Str(required=True, validate=lambda x: len(x) > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'start': fields.Int(required=True, validate=lambda x: x >= 0, error_messages={'validator_failed': 'Value must be greater than or equal to 0.'}), 'stop': fields.Int(required=True, validate=lambda x: x > 0, error_messages={'validator_failed': 'Value must be greater than 0.'}), 'genotypeDataset': fields.Int(required=False, validate=lambda x: x > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'phenotypeDataset': fields.Int(required=False, validate=lambda x: x > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'summaryStatisticDataset': fields.Int(required=False, validate=lambda x: x > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'phenotype': fields.Str(required=False, validate=lambda x: len(x) > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'samples': fields.Str(required=False, validate=lambda x: len(x) > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'masks': fields.List(fields.Int(), validate=lambda x: len(x) > 0, error_messages={'validator_failed': "Must provide at least 1 mask ID"}), 'maskDefinitions': fields.Nested(MaskSchema, many=True), 'genomeBuild': fields.Str(required=True, validate=lambda x: len(x) > 0, error_messages={'validator_failed': 'Value must be a non-empty string.'}), 'variantFormat': fields.Str(required=False, default="EPACTS", validate=OneOf(["EPACTS", "COLONS"])) } args = parser.parse( args_defined, validate = partial( validate_query, all_fields = ['chrom', 'start', 'stop'] ), location = "json_or_form" ) if not (bool(args.get("masks")) ^ bool(args.get("maskDefinitions"))): raise FlaskException("Must provide either 'masks' or 'maskDefinitions' in request, and not both.", 400) calc_mode = (args.get("genotypeDataset") is not None) and (args.get("phenotypeDataset") is not None) precalc_mode = args.get("summaryStatisticDataset") is not None if calc_mode and precalc_mode: raise FlaskException("Must give either genotypeDataset and phenotypeDataset, or summaryStatisticDataset by itself", 400) if not calc_mode and not precalc_mode: raise FlaskException("No genotypeDataset, phenotypeDataset, or summaryStatisticDataset was provided in the request", 400) config = ScoreCovarianceConfig() config.segment_size = current_app.config["SEGMENT_SIZE_BP"] chrom = str(args["chrom"]) start = args["start"] stop = args["stop"] build = args["genomeBuild"] genotype_dataset_id = args.get("genotypeDataset") phenotype_dataset_id = args.get("phenotypeDataset") summary_stat_dataset_id = args.get("summaryStatisticDataset") sample_subset = str(args.get("samples")) phenotype = str(args.get("phenotype")) masks = args.get("masks") mask_definitions = args.get("maskDefinitions") variant_format = args.get("variantFormat", "EPACTS") config.chrom = chrom config.start = start config.stop = stop config.sample_subset = str(args.get("samples")) if variant_format == "EPACTS": config.variant_format = VariantFormat.EPACTS elif variant_format == "COLONS": config.variant_format = VariantFormat.COLONS else: raise FlaskException("Invalid variant format given by variantFormat: {}".format(variant_format), 400) if (stop - start) > current_app.config["API_MAX_REGION_SIZE"]: raise FlaskException("Region requested for analysis exceeds maximum width of {}".format(current_app.config["API_MAX_REGION_SIZE"]), 400) if genotype_dataset_id or phenotype_dataset_id: if not (genotype_dataset_id and phenotype_dataset_id and phenotype): raise FlaskException("Must specify genotype dataset ID, phenotype dataset ID, and phenotype together") if genotype_dataset_id and phenotype_dataset_id: if not model.has_genome_build(build): raise FlaskException('Genome build \'{}\' was not found.'.format(build), 400) if not model.has_genotype_dataset(genotype_dataset_id): raise FlaskException('No genotype dataset \'{}\' available for genome build {}.'.format(genotype_dataset_id, build), 400) if not model.has_phenotype_dataset(phenotype_dataset_id): raise FlaskException('No phenotype dataset \'{}\' available for genome build {}.'.format(phenotype_dataset_id, build), 400) if not model.has_samples(genotype_dataset_id, sample_subset): raise FlaskException('Sample subset \'{}\' was not found in genotype dataset {}.'.format(sample_subset, genotype_dataset_id), 400) if not model.has_phenotype(phenotype_dataset_id, phenotype): raise FlaskException("Phenotype '{}' does not exist in phenotype dataset {}".format(phenotype, phenotype_dataset_id), 400) genotype_files = StringVec() genotype_files.extend([model.find_file(x) for x in model.get_genotype_files(genotype_dataset_id)]) config.genotype_files = genotype_files config.genotype_dataset_id = genotype_dataset_id if sample_subset != 'ALL': s = StringVec() s.extend(model.get_samples(genotype_dataset_id, sample_subset)) config.samples = s config.phenotype_file = model.find_file(model.get_phenotype_file(phenotype_dataset_id)) config.phenotype_column_types = model.get_column_types(phenotype_dataset_id) config.phenotype_nrows = model.get_phenotype_nrows(phenotype_dataset_id) config.phenotype_sample_column = str(model.get_phenotype_sample_column(phenotype_dataset_id)) config.phenotype_delim = str(model.get_phenotype_delim(phenotype_dataset_id)) config.phenotype_dataset_id = phenotype_dataset_id config.phenotype = phenotype analysis_cols = model.get_analysis_columns(phenotype_dataset_id) config.phenotype_analysis_columns = makeStringVec(analysis_cols) elif summary_stat_dataset_id: config.summary_stat_dataset_id = summary_stat_dataset_id score_files = model.get_score_files(summary_stat_dataset_id) cov_files = model.get_cov_files(summary_stat_dataset_id) score_files = [model.find_file(f) for f in score_files] cov_files = [model.find_file(f) for f in cov_files] score_vec = StringVec() cov_vec = StringVec() score_vec.extend(score_files) cov_vec.extend(cov_files) config.summary_stat_score_files = score_vec config.summary_stat_cov_files = cov_vec if current_app.config["CACHE_ENABLED"]: config.redis_hostname = current_app.config["CACHE_REDIS_HOSTNAME"] config.redis_port = current_app.config["CACHE_REDIS_PORT"] # Determine regions in which to compute LD/scores. # This is determined by the mask file, and the overall window requested. mask_vec = MaskVec() if masks: for mask_id in masks: mask = model.get_mask_by_id(mask_id) mask_path = model.find_file(mask["filepath"]) if not os.path.isfile(mask_path): raise FlaskException("Could not find mask file on server for mask ID {}".format(mask_id), 400) if mask["genome_build"] != build: raise FlaskException("Mask ID {} is invalid for genome build {}".format(mask_id, build), 400) if genotype_dataset_id and (genotype_dataset_id not in [g.id for g in mask["genotypes"]]): raise FlaskException("Mask ID {} is invalid for genotype dataset ID {}".format(mask_id, genotype_dataset_id), 400) if summary_stat_dataset_id and (summary_stat_dataset_id not in [s.id for s in mask["sumstats"]]): raise FlaskException("Mask ID {} is invalid for summary statistic dataset ID {}".format(mask_id, summary_stat_dataset_id), 400) tb = Mask(str(mask_path), mask_id, mask["group_type"], mask["identifier_type"], chrom, start, stop) mask_vec.append(tb) elif mask_definitions: for mask in mask_definitions: vg_vec = VariantGroupVector() for group_name, variants in mask["groups"].items(): vg = VariantGroup() vg.name = str(group_name) for v in variants: if variant_format == "COLONS": # Internally we still use EPACTS format but translate on read/write. Later we will move to using a # native variant object type internally. chrom, pos, ref, alt = v.split(":") v = "{}:{}_{}/{}".format(chrom, pos, ref, alt) vg.add_variant(str(v)) vg_vec.append(vg) tb = Mask( mask["id"], VariantGroupType.names.get(mask["group_type"]), GroupIdentifierType.names.get(mask["identifier_type"]), vg_vec ) mask_vec.append(tb) config.masks = mask_vec runner = ScoreCovarianceRunner(config) runner.run() json = runner.getJSON() resp = make_response(json, 200) resp.mimetype = 'application/json' return resp
def echo_nested_many_with_load_from(): args = { 'x_field': fields.Nested({'id': fields.Int()}, load_from='X-Field', many=True) } return J(parser.parse(args))
return login(identification, password) @api.route('/login/refresh/', methods=['POST']) @jwt_refresh_token_required def route_refresh_token(): return login_refresh() @api.route('/users/me/', methods=['PATCH']) @use_kwargs({ 'update_fields': fields.Nested( { 'name': fields.Str(), 'address': fields.Str(), 'medical_services': fields.List(fields.Str()), 'date_of_birth': fields.Date(), }, required=True) }) @jwt_required def route_user_patch(update_fields): return user_patch(update_fields) @api.route('/users/doctor/', methods=['POST']) @use_kwargs({ 'email': fields.Email(required=True), 'phone': fields.Str(required=True), 'identification': fields.Str(required=True), 'password': fields.Str(required=True),
class RHUpdateRoomAttributes(RHRoomAdminBase): @use_kwargs({'attributes': fields.Nested({'value': fields.Str(), 'name': fields.Str()}, many=True)}) def _process(self, attributes): update_room_attributes(self.room, attributes) return '', 204
def param(func): return flds.Nested({ "operator": flds.Str(required=True), "field": flds.Str(required=True), "value": func })
@use_kwargs({'location_id': fields.Int(required=True)}) @use_args(RoomUpdateArgsSchema) def _process_POST(self, args, location_id): room = Room() args['location_id'] = location_id update_room(room, args) db.session.add(room) db.session.flush() RHRoomsPermissions._jsonify_user_permissions.clear_cached(session.user) return jsonify(id=room.id) _base_args = { 'default': fields.Bool(), 'bounds': fields.Nested({ 'north_east': fields.Nested({'lat': fields.Float(), 'lng': fields.Float()}, required=True), 'south_west': fields.Nested({'lat': fields.Float(), 'lng': fields.Float()}, required=True) }, required=True) } _create_args = { **_base_args, 'name': fields.String(required=True) } _update_args = { 'areas': fields.List( fields.Nested({ **_base_args, 'id': fields.Int(required=True), 'name': fields.String() }, required=True)
validate=partial(check_valid_date, date_format=config['birth_date_format']), required=True ), "gender": fields.Str( validate=validate.OneOf(config['valid_genders']), required=True ), "relatives": fields.List( fields.Int(), required=True ) } recieve_import_data_args = { "citizens": fields.List( fields.Nested(citizen_info, required=True), required=True ) } update_citizen_info_args = { "town": fields.Str(), "street": fields.Str(), "building": fields.Str(), "apartment": fields.Int(), "name": fields.Str(), "birth_date": fields.Str( validate=partial(check_valid_date, date_format=config['birth_date_format'] )
from datetime import datetime from webargs import fields from app.exceptions import ParamError from app.engines import db from app.models import Infomation from app.schemas import length_validator, OneOf query_threat_ip_schema = { 'start': fields.Int(missing=0), 'length': fields.Int(missing=15, validate=length_validator), 'order': fields.Nested({ 'field': fields.Str(missing='crawl_time'), 'direction': fields.Str(missing='desc', validate=OneOf(['asc', 'desc'])) }, missing={}), 'filter': fields.Nested({ 'ip': fields.Str(), 'source': fields.Str(), 'asn': fields.Str(), 'category': fields.Str() # 'cnvd_id': fields.Str(), # 'affect_product': fields.Str(), # 'level': fields.Str(), # 'cve_id': fields.Str(), # 'validation_info': fields.Str(), }, missing={}) } query_threat_domain_schema = { 'start': fields.Int(missing=0),
def on_post(self, req, resp): args = { 'users': fields.Nested({'id': fields.Int(), 'name': fields.Str()}, many=True) } resp.body = json.dumps(parser.parse(args, req))
"branch": fields.Str(required=False), "commit": fields.Str(required=False), } job_script_args = { "source": fields.Str(required=True, allow_none=True), "destination": fields.Str(required=True), "patch": fields.Boolean(required=False), "action": fields.Str(required=False, allow_none=True), } job_start_args = { "username": fields.Str(required=True, strict=True), "repository": fields.Nested(job_repository_args, required=True, allow_none=True), "fields_to_patch": fields.List(fields.Nested(job_field_args)), "scripts": fields.List(fields.Nested(job_script_args)), } job_stop_args = {"scripts": fields.List(fields.Nested(job_script_args))} job_status_args = {"status": fields.Str(required=True, strict=True)} job_output_args = { "destination": fields.Str(required=True, strict=True), "type": fields.Str(required=True, strict=True), "name": fields.Str(required=True, strict=True), "label": fields.Str(required=True, strict=True),
class CityListSchema(SuccessListSchema): results = fields.List(fields.Nested(CitySchema()))
def echo_nested(): args = {"name": fields.Nested({"first": fields.Str(), "last": fields.Str()})} return J(parser.parse(args))
class LocationListSchema(SuccessListSchema): results = fields.List(fields.Nested(LocationSchema()))
def echo_nested_many(): args = { "users": fields.Nested({"id": fields.Int(), "name": fields.Str()}, many=True) } return J(parser.parse(args))
from webargs import fields as web_fields app = bottle.Bottle() @app.post('/') @use_args({ 'dataContext': fields.URL(required=True), 'parameterContext': fields.URL(required=True), 'productCode': fields.String(required=True), 'name': fields.String(required=True), 'translatorUrl': fields.URL(required=True), 'organizationPublicKeys': web_fields.Nested( { 'type': fields.String(required=True), 'url': fields.URL(required=True), }, required=True, many=True, only=('type', 'url') ), 'imageUrl': fields.URL(allow_none=True), 'description': fields.String(allow_none=True), }) def create(args): return { '@context': '<context URL>', '@type': 'Product', '@id': '<URL to the product resource>', 'product_code': 'XXX', 'dataContext': '<data context URL>', 'parameterContext': '<parameter context URL>', 'translatorUrl': '<translator URL>',
def echo_nested_many_with_data_key(): data_key_kwarg = { "load_from" if (MARSHMALLOW_VERSION_INFO[0] < 3) else "data_key": "X-Field" } args = {"x_field": fields.Nested({"id": fields.Int()}, many=True, **data_key_kwarg)} return J(parser.parse(args))
def on_post(self, req, resp): args = {"name": fields.Nested({"first": fields.Str(), "last": fields.Str()})} resp.body = json.dumps(parser.parse(args, req))
class MetricsCategorizationApiElement(Resource): @doc(description=dedent(""" Compute categorization metrics to assess the quality of categorization. In the case of binary categrorization, category labels are sorted alphabetically and the second one is expected to be the positive one. **Parameters** - y_true: [required] ground truth categorization data - y_pred: [required] predicted categorization results - metrics: [required] list of str. Metrics to compute, any combination of "precision", "recall", "f1", "roc_auc" """)) @use_args({ 'y_true': wfields.Nested(_CategorizationIndex, many=True, required=True), 'y_pred': wfields.Nested(_CategorizationPredictSchemaElement, many=True, required=True), 'metrics': wfields.List(wfields.Str()) }) @marshal_with(MetricsCategorizationSchema()) def post(self, **args): from sklearn.preprocessing import LabelEncoder from ..metrics import recall_at_k_score output_metrics = {} y_true = pd.DataFrame(args['y_true']) y_pred_b = [] for row in args['y_pred']: nrow = { 'document_id': row['document_id'], 'category': row['scores'][0]['category'], 'score': row['scores'][0]['score'] } y_pred_b.append(nrow) y_pred_b = pd.DataFrame(y_pred_b) index_cols = _check_mutual_index(y_true.columns, y_pred_b.columns) y_true = y_true.set_index(index_cols, verify_integrity=True) y_pred_b = y_pred_b.set_index(index_cols, verify_integrity=True) le = LabelEncoder() # this also sorts label by arithmetic order y_true['category_id'] = le.fit_transform(y_true.category.values) y_pred_b['category_id'] = le.transform(y_pred_b.category.values) y = y_true[['category_id']].merge(y_pred_b[['category_id', 'score']], how='inner', left_index=True, right_index=True, suffixes=('_true', '_pred')) if 'metrics' in args: metrics = args['metrics'] else: metrics = [ 'precision', 'recall', 'roc_auc', 'f1', 'average_precision', 'recall_at_k' ] _binary_metrics = ['precision', 'recall', 'f1'] cy_true = y.category_id_true.values cy_pred = y.category_id_pred.values n_classes = len(le.classes_) if n_classes == 2: cy_pred_score = y.score.values cy_pred_score[cy_pred == 0] *= -1 # wrapping metrics calculations, as for example F1 score can frequently print warnings # "F-score is ill defined and being set to 0.0 due to no predicted samples" with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) for func in [ precision_score, recall_score, f1_score, roc_auc_score, average_precision_score, recall_at_k_score ]: name = func.__name__.replace('_score', '') opts = {} if name in ['roc_auc', 'average_precision', 'recall_at_k' ] and n_classes == 2: y_targ = cy_pred_score if name == 'recall_at_k': opts = {'k': 0.2} else: y_targ = cy_pred if name in _binary_metrics and n_classes != 2: opts['average'] = 'micro' if name in metrics: if n_classes == 2 or name in _binary_metrics: output_metrics[name] = func(cy_true, y_targ, **opts) else: output_metrics[name] = np.nan if "recall_at_k" in output_metrics: output_metrics['recall_at_20p'] = output_metrics.pop( 'recall_at_k') return output_metrics
class Waskeys(Resource): get_args = { "id": fields.String(required=False, missing=None), "fromDateTime": fields.DateTime(required=False, missing=None), "active": fields.Boolean(required=False, missing=None), "name": fields.String(required=False, missing=None) } @use_kwargs(get_args) @require_headers def get(self, id, fromDateTime, active, name): if fromDateTime: result = [ washkey for washkey in washkeys if datetime.fromisoformat(washkey["CreatedTime"]) > fromDateTime ] elif active: result = [ washkey for washkey in washkeys if washkey["Active"] == active ] elif name: result = [ washkey for washkey in washkeys if name.lower() in washkey["Name"].lower() ] else: result = [ washkey for washkey in washkeys if not id or washkey["Id"] == id ] return result, 200 post_args = { "washKey": fields.Nested( { "id": fields.String(required=True), "credit": fields.Integer(required=True), "productrestriction": fields.Integer(required=False, missing=0), "name": fields.String(required=False, missing=""), "active": fields.Boolean(required=False, missing=True), "expiry": fields.Date(required=False, missing=None) }, location="json", required=True), "paymentType": fields.String(required=False, missing="", location="json") } @use_kwargs(post_args) @require_headers def post(self, washKey, paymentType): for washkey in washkeys: if washkey["Id"] == washKey["id"]: return "{ error : {errorCode : 40901, errorMessage : " "washkey already exists" "}", 409 washkey = { "Id": washKey["id"], "Name": washKey["name"], "Credit": washKey["credit"], "ProductRestriction": washKey["productrestriction"], "Active": washKey["active"], "Expiry": washKey["expiry"].isoformat(), "CreatedTime": datetime.utcnow().isoformat() } washkeys.append(washkey) return "{}", 200
from datetime import datetime from webargs import fields from app.exceptions import ParamError from app.engines import db from app.models import Job from app.schemas import length_validator, OneOf query_zhilian_schema = { 'start': fields.Int(missing=0), 'length': fields.Int(missing=15, validate=length_validator), 'order': fields.Nested( { 'field': fields.Str(missing='crawl_time'), 'direction': fields.Str(missing='desc', validate=OneOf(['asc', 'desc'])) }, missing={}), 'filter': fields.Nested({ 'positionName': fields.Str(), }, missing={}) }
from app.exceptions import ParamError from app.engines import db from app.models import Infomation from app.schemas import length_validator, OneOf query_infomations_schema = { 'start': fields.Int(missing=0), 'length': fields.Int(missing=15, validate=length_validator), 'order': fields.Nested( { 'field': fields.Str(missing='posted_time'), 'direction': fields.Str(missing='desc', validate=OneOf(['asc', 'desc'])) }, missing={}), 'filter': fields.Nested( { 'author': fields.Str(), 'source': fields.Str(), 'title': fields.Str(), 'summary': fields.Str(), 'keys': fields.Str() }, missing={}) }
def on_post(self, req, resp): args = { 'name': fields.Nested({'first': fields.Str(), 'last': fields.Str()}) } resp.body = json.dumps(parser.parse(args, req))
def id_in_db(id): if not db.session.query(WebsiteNews).filter_by(id=id).first(): raise RecordNotFound('无此数据') query_args = { 'page': fields.Int(missing=0), 'size': fields.Int(missing=25, validate=length_validator), 'order': fields.Nested( { 'field': fields.Str(missing='create_time'), 'direction': fields.Str(missing='desc', validate=OneOf(['asc', 'desc'])) }, missing={}), 'fields': fields.List(fields.Str()), 'filter': fields.Nested( { 'url': fields.Str(), 'domain': fields.Str(), 'ip_area': fields.Str(), 'title': fields.Str(), 'http_status': fields.Str() }, missing={})
def echo_nested_many(request): argmap = { 'users': fields.Nested({'id': fields.Int(), 'name': fields.Str()}, many=True) } return parser.parse(argmap, request)