class EventNoteRecordSchema(RecordSchema, EventNoteSchema): class Meta: model = EventNote non_indexable = ('note_id', 'type', 'event_id', 'contribution_id', 'subcontribution_id', 'category_id', 'category_path', 'url', 'modified_dt') fields = RecordSchema.Meta.fields + non_indexable _data = fields.Function(lambda note: _EventNoteDataSchema().dump(note)) category_path = fields.Function(lambda note, ctx: _get_category_chain( note.event, ctx.get('categories'))) url = mm.Function( lambda note: url_for('event_notes.view', note, _external=True))
class AdminCustomersView(AdminBaseView): """后台-客户-客户列表""" pagination_class = StandardResultsSetPagination @AdminBaseView.permission_required( [AdminBaseView.staff_permissions.ADMIN_CUSTOMER]) @use_args( { "sort_prop": fields.String( required=False, missing="", validate=[ validate.OneOf(["", "consume_amount", "consume_count"]) ], comment="排序字段", ), "sort": fields.Function( deserialize=lambda x: x.rstrip("ending"), required=False, missing="", validate=[validate.OneOf(["", "asc", "desc"])], comment="排序方式, +:正序,-:倒序", ), "keyword": fields.String(required=False, missing="", comment="搜索关键字,昵称或者手机号"), }, location="query") def get(self, request, args): shop = self.current_shop customer_list = list_customer_by_shop_id(shop.id, **args) customer_list = self._get_paginated_data(customer_list, AdminCustomerSerializer) return self.send_success(data_list=customer_list)
class RetreatModelSchema(Schema): id = fields.Int(dump_only=True) company_id = fields.Int(dump_only=True) name = fields.Str() num_employees = fields.Int(required=True) num_nights = fields.Int(required=True) flok_note = fields.String() employee_location_submission = fields.Function( serialize=lambda obj: RetreatEmployeeLocationSubmissionApiSchema.dump( obj=obj.employee_location_submissions[0]) if obj.employee_location_submissions else None) proposals = fields.Function(serialize=lambda obj: RetreatProposalApiSchema. dump(obj=obj.proposals, many=True)) selected_proposal_id = fields.Int() paid = fields.Function(serialize=lambda obj: len(obj.payments) != 0)
class SubContributionRecordSchema(RecordSchema, SubContributionSchema): class Meta: model = SubContribution indexable = ('title', 'description', 'persons', 'location') non_indexable = ('subcontribution_id', 'type', 'event_id', 'contribution_id', 'category_id', 'category_path', 'url', 'start_dt', 'end_dt', 'duration') fields = RecordSchema.Meta.fields + non_indexable _data = fields.Function(lambda subc: SubContributionSchema( only=SubContributionRecordSchema.Meta.indexable).dump(subc)) category_path = fields.Function(lambda subc, ctx: _get_category_chain( subc.event, ctx.get('categories'))) url = mm.Function(lambda subc: url_for( 'contributions.display_subcontribution', subc, _external=True)) @post_dump def _transform(self, data, **kwargs): if desc := data['_data'].get('description'): data['_data']['description'] = strip_tags(desc).strip() return data
class EventRecordSchema(RecordSchema, EventSchema): class Meta: model = Event indexable = ('title', 'description', 'keywords', 'location', 'persons') non_indexable = ('type', 'event_type', 'event_id', 'url', 'category_id', 'category_path', 'start_dt', 'end_dt') fields = RecordSchema.Meta.fields + non_indexable _data = fields.Function(lambda event: EventSchema( only=EventRecordSchema.Meta.indexable).dump(event)) category_path = fields.Function( lambda e, ctx: _get_category_chain(e, ctx.get('categories'))) # By default, CERNs global indexing requires external URLs url = mm.String(attribute='external_url') @post_dump def _transform(self, data, **kwargs): data['type_format'] = data.pop('event_type') if desc := data['_data'].get('description'): data['_data']['description'] = strip_tags(desc).strip() return data
class ContributionRecordSchema(RecordSchema, ContributionSchema): class Meta: model = Contribution indexable = ('title', 'description', 'location', 'persons') non_indexable = ('contribution_id', 'type', 'contribution_type', 'event_id', 'url', 'category_id', 'category_path', 'start_dt', 'end_dt', 'duration') fields = RecordSchema.Meta.fields + non_indexable _data = fields.Function(lambda contrib: ContributionSchema( only=ContributionRecordSchema.Meta.indexable).dump(contrib)) category_path = fields.Function( lambda c, ctx: _get_category_chain(c.event, ctx.get('categories'))) url = mm.Function(lambda contrib: url_for( 'contributions.display_contribution', contrib, _external=True)) @post_dump def _transform(self, data, **kwargs): if contribution_type := data.pop('contribution_type', None): data['type_format'] = contribution_type if desc := data['_data'].get('description'): data['_data']['description'] = strip_tags(desc).strip()
class AttachmentRecordSchema(RecordSchema, AttachmentSchema): class Meta: model = Attachment indexable = ('title', 'filename', 'user') non_indexable = ('attachment_id', 'folder_id', 'type', 'attachment_type', 'event_id', 'contribution_id', 'category_id', 'category_path', 'url', 'subcontribution_id', 'modified_dt') fields = RecordSchema.Meta.fields + non_indexable _data = fields.Function(lambda at: AttachmentSchema( only=AttachmentRecordSchema.Meta.indexable).dump(at)) category_path = fields.Function(lambda a, ctx: _get_category_chain( a.folder.event, ctx.get('categories'))) url = mm.String(attribute='absolute_download_url') @post_dump def _translate_keys(self, data, **kwargs): data['type_format'] = data.pop('attachment_type') if user := data['_data'].pop('user', None): data['_data']['persons'] = user return data
def create_args(): """Defines and validates params for create""" return { "type": fields.String(missing="image"), "file": fields.Function(deserialize=validate_image, required=True, location="files", load_from="image"), "team_id": fields.UUID(missing=None), "user_tags": fields.List(fields.String(), load_from="tags"), }
class RecordSchema(ACLSchema): class Meta: fields = ('_data', '_access', 'schema') schema = fields.Function(lambda _, ctx: ctx.get('schema'), data_key='$schema') @post_dump def remove_none_fields(self, data, **kwargs): """Remove fields that are None to avoid json schema validation errors.""" return remove_none_entries(data) @post_dump def site(self, data, **kwargs): if data['_data']: data['_data']['site'] = config.BASE_URL return data
class AuthSignupController(Resource): post_args = { "email": fields.Email(required=True), "password": fields.String(required=True), "login_provider": fields.Function( data_key="loginProvider", deserialize=lambda lp: UserLoginProviderType[lp], required=True, ), "first_name": fields.String(data_key="firstName"), "last_name": fields.String(data_key="lastName"), } @use_args(post_args, location="json") def post(self, args: Dict[str, Any]): new_user = auth_manager.signup_user( args["email"], login_provider=args["login_provider"], login_provider_uid=args["email"], login_provider_data=FlokLoginData(password=args["password"]), first_name=args.get("first_name"), last_name=args.get("last_name"), ) user_login_id = auth_manager.user_login_id(new_user) ### Auto creating company and retreat v1 upon signup for now. company_name = None if args.get("first_name"): company_name = f"{args.get('first_name')}'s company" new_company = company_manager.create_company(name=company_name, admins=[new_user]) retreat_manager.create_retreat(new_company, ) auth_manager.commit_changes() ### return responses.success( {"user": UserApiSchema.dump(obj=new_user)}, extra_headers=web.login_cookie_header(jwt, user_login_id.login_id), )
class AuthSigninController(Resource): post_args = { "email": fields.Email(required=True), "password": fields.String(required=True), "login_provider": fields.Function( data_key="loginProvider", deserialize=lambda lp: UserLoginProviderType[lp], required=True, ), } @use_args(post_args, location="json") def post(self, args: Dict[str, Any]): logged_in_user = auth_manager.signin_user( login_provider=args["login_provider"], login_provider_uid=args["email"], login_provider_data=FlokLoginData(password=args["password"]), ) user_login_id = auth_manager.user_login_id(logged_in_user) auth_manager.commit_changes() return responses.success( {"user": UserApiSchema.dump(obj=logged_in_user)}, extra_headers=web.login_cookie_header(jwt, user_login_id.login_id), ) @jwt.requires_auth def delete(self): return responses.success( {"message": "Successfully logged out"}, extra_headers={ "Set-Cookie": f"{jwt.jwt_cookie_name}=logged; Path=/; Expires=Mon, 01, Jan 2000, 00:00:00 GMT; HttpOnly" }, )
def get_transcript(): arguments = { 'transcript_id': fields.Str(required=True, validate=lambda x: len(x) > 0), 'allele_count': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, int))), 'allele_freq': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, float))), 'allele_num': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, int))), 'site_quality': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, float))), 'filter': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'annotations.lof': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'annotations.consequence': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'sort': fields.Function(deserialize=deserialize_query_sort), 'vcf': fields.Bool(required=False, missing=False), 'limit': fields.Int(required=False, validate=lambda x: x > 0, missing=pageSize), 'last': fields.Function(deserialize=deserialize_query_last) } args = parser.parse(arguments, validate=validate_query) db = get_db() transcript = db.transcripts.find_one( {'transcript_id': args['transcript_id']}, projection={'_id': False}) if not transcript: raise UserError( 'Transcript with identifier equal to {} was not found.'.format( args['transcript_id'])) response = { 'transcript': { 'transcript_id': transcript['transcript_id'], 'gene_id': transcript['gene_id'], 'chrom': transcript['chrom'], 'start': transcript['start'], 'stop': transcript['stop'], 'strand': transcript['strand'] } } mongo_filter, mongo_sort = build_region_query(args, transcript['xstart'], transcript['xstop']) annotations_filter = [{'Feature': transcript['transcript_id']}] annotations = args.get('annotations', None) if annotations is not None: filters = annotations.get('lof', None) if filters is not None: if len(filters) == 1: annotations_filter.append({'LoF': filters[0]}) else: annotations_filter.append( {'$or': [{ 'LoF': v } for v in filters]}) filters = annotations.get('consequence', None) if filters is not None: annotations_filter.append({ '$or': [{ 'Consequence': re.compile(v.values()[0]) } if v.keys()[0] == '$eq' else { 'Consequence': { '$not': re.compile(v.values()[0]) } } for v in filters] }) mongo_filter['$and'].append( {'vep_annotations': { '$elemMatch': { '$and': annotations_filter } }}) data = [] last_variant = None last_object_id = None collection = db[api_collection_name] # can be replaced with collection.aggregate. However in Mongo 3.4. collection.aggregate produced different query plan than collection.find, which was not optimal cursor = collection.find(mongo_filter, projection).sort(mongo_sort + [('_id', ASCENDING)]).limit( args['limit']) if not args['vcf']: response['format'] = 'json' for r in cursor: last_object_id = r.pop('_id') r['annotations'] = { k: a[k] for k in annotations_ordered for a in r['vep_annotations'] if a['Feature'] == transcript['transcript_id'] } r.pop('xpos', None) r.pop('vep_annotations', None) data.append(r) last_variant = r else: response['format'] = 'vcf' response['header'] = vcf_header response['meta'] = vcf_meta for r in cursor: last_object_id = r.pop('_id') r.pop('xpos', None) data.append( '{}\t{}\t{}\t{}\t{}\t{}\t{}\tAN={};AC={};AF={};AVGDP={};AVGDP_ALT={};AVGGQ={};AVGGQ_ALT={};CSQ={}' .format( r['chrom'], r['pos'], ';'.join(r['rsids']) if r['rsids'] else '.', r['ref'], r['alt'], r['site_quality'], r['filter'], r['allele_num'], r['allele_count'], r['allele_freq'], r['avgdp'], r['avgdp_alt'], r['avggq'], r['avggq_alt'], ','.join([ '|'.join(a[k] for k in annotations_ordered) for a in r['vep_annotations'] if a['Feature'] == transcript['transcript_id'] ]))) last_variant = r response['data'] = data response['next'] = build_link_next( args, last_object_id, last_variant, mongo_sort) if len(data) == args['limit'] else None response = jsonify(response) response.status_code = 200 return response
def get_region(): arguments = { 'chrom': fields.Str(required=True, validate=lambda x: len(x) > 0), 'start': fields.Int(required=True, validate=lambda x: x >= 0), 'end': fields.Int(required=True, validate=lambda x: x > 0), 'allele_count': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, int))), 'allele_freq': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, float))), 'allele_num': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, int))), 'site_quality': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, float))), 'filter': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'annotations.lof': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'annotations.consequence': fields.List( fields.Function( deserialize=lambda x: deserialize_query_filter(x, str))), 'sort': fields.Function(deserialize=deserialize_query_sort), 'vcf': fields.Bool(required=False, missing=False), 'limit': fields.Int(required=False, validate=lambda x: x > 0, missing=pageSize), 'last': fields.Function(deserialize=deserialize_query_last) } args = parser.parse(arguments, validate=validate_query) if args['start'] >= args['end']: raise UserError('Start position must be less than end position.') # check if start less than end if args['end'] - args['start'] > maxRegion: raise UserError( 'Regions larger than {} base-pairs are not allowed.'.format( maxRegion)) # transform start and end to xpos if not Xpos.check_chrom(args['chrom']): raise UserError('Invalid chromosome name.') xstart = Xpos.from_chrom_pos(args['chrom'], args['start']) xend = Xpos.from_chrom_pos(args['chrom'], args['end']) mongo_filter, mongo_sort = build_region_query(args, xstart, xend) annotations_filter = [] annotations = args.get('annotations', None) if annotations is not None: filters = annotations.get('lof', None) if filters is not None: if len(filters) == 1: annotations_filter.append({'LoF': filters[0]}) else: annotations_filter.append( {'$or': [{ 'LoF': v } for v in filters]}) filters = annotations.get('consequence', None) if filters is not None: annotations_filter.append({ '$or': [{ 'Consequence': re.compile(v.values()[0]) } if v.keys()[0] == '$eq' else { 'Consequence': { '$not': re.compile(v.values()[0]) } } for v in filters] }) if annotations_filter: mongo_filter['$and'].append( {'vep_annotations': { '$elemMatch': { '$and': annotations_filter } }}) #print mongo_filter data = [] response = {} last_variant = None last_object_id = None db = get_db() collection = db[api_collection_name] # can be replaced with collection.aggregate. However in Mongo 3.4. collection.aggregate produced different query plan than collection.find, which was not optimal cursor = collection.find(mongo_filter, projection).sort(mongo_sort + [('_id', ASCENDING)]).limit( args['limit']) if not args['vcf']: response['format'] = 'json' for r in cursor: last_object_id = r.pop('_id') r['annotations'] = [{k: a[k] for k in annotations_ordered} for a in r['vep_annotations']] r.pop('vep_annotations') r.pop('xpos', None) data.append(r) last_variant = r else: response['format'] = 'vcf' response['header'] = vcf_header response['meta'] = vcf_meta for r in cursor: last_object_id = r.pop('_id') r.pop('xpos', None) data.append( '{}\t{}\t{}\t{}\t{}\t{}\t{}\tAN={};AC={};AF={};AVGDP={};AVGDP_ALT={};AVGGQ={};AVGGQ_ALT={};CSQ={}' .format( r['chrom'], r['pos'], ';'.join(r['rsids']) if r['rsids'] else '.', r['ref'], r['alt'], r['site_quality'], r['filter'], r['allele_num'], r['allele_count'], r['allele_freq'], r['avgdp'], r['avgdp_alt'], r['avggq'], r['avggq_alt'], ','.join('|'.join(a[k] for k in annotations_ordered) for a in r['vep_annotations']))) last_variant = r response['data'] = data response['next'] = build_link_next( args, last_object_id, last_variant, mongo_sort) if len(data) == args['limit'] else None response = jsonify(response) response.status_code = 200 return response
class AdminLogsView(AdminBaseView): """后台-员工-操作日志""" @AdminBaseView.permission_required( [AdminBaseView.staff_permissions.ADMIN_STAFF]) @use_args( { "operator_ids": fields.Function( deserialize=lambda x: x.replace(" ", "").split(","), missing=[], comment="操作人ID", ), "operate_module_ids": StrToList( missing=[], validate=[ validate.ContainsOnly(list(all_module_dict.values())) ], comment="模块ID", ), }, location="query") def get(self, request, args): args["operate_module_ids"] = [ int(_) for _ in args.get("operate_module_ids") ] # django时区问题 args["end_date"] = make_aware(datetime.datetime.today() + datetime.timedelta(1)) args["from_date"] = make_aware(datetime.datetime.today() - datetime.timedelta(90)) shop_id = self.current_shop.id # 查询单个还是多个 operate_module_ids = args.pop("operate_module_ids") # 查单个 if len(operate_module_ids) == 1: module_id = operate_module_ids[0] log_list = list_one_module_log_by_filter(shop_id, module_id, **args) module_id_2_log_list = {module_id: log_list} # 查询多个 else: module_id_2_log_list = dict_more_modules_log_by_filter( shop_id, operate_module_ids, **args) """ module_2_log_list = { 1: [log, log ...], 2: [log, log ...], ... } """ # 封装数据 module_id_2_name = {v: k.lower() for k, v in all_module_dict.items()} all_log_list = [] for module_id, log_list_query in module_id_2_log_list.items(): def_name = "format_{}_data".format(module_id_2_name.get(module_id)) log_list = getattr(self, def_name)(log_list_query) all_log_list.extend(log_list) all_log_list = sorted(all_log_list, key=lambda x: x["operate_time"], reverse=True) return self.send_success(data_list=all_log_list) def format_order_data(self, log_list_query): """封装订单日志数据""" for log in log_list_query: if log.operate_type in [ OrderLogType.HOME_DELIVERY_AMOUNT, OrderLogType.HOME_MINIMUM_FREE_AMOUNT, OrderLogType.HOME_MINIMUM_ORDER_AMOUNT, OrderLogType.PICK_MINIMUM_FREE_AMOUNT, OrderLogType.PICK_SERVICE_AMOUNT, ]: log.old_value = log.operate_content.split("|")[0] log.new_value = log.operate_content.split("|")[1] log.operate_content = "" order_log_serializer = OrderLogSerializer(log_list_query, many=True) log_list = order_log_serializer.data return log_list def format_config_data(self, log_list_query): """封装设置日志数据""" log_list = ConfigLogSerializer(log_list_query, many=True).data return log_list def format_product_data(self, log_list_query): """封装货品日志数据""" log_list = ProductLogSerializer(log_list_query, many=True).data return log_list def format_promotion_data(self, log_list_query): """封装货品日志数据""" log_list = PromotionLogSerializer(log_list_query, many=True).data return log_list
return False return True pagination_args = { "page": fields.Int(required=False, validate=validate_page, missing=1), "per_page": fields.Int(required=False, validate=validate_per_page, missing=10), } clear_search = lambda txt: slugify(txt, separator=" ") search_args = { "search": fields.Function(deserialize=clear_search, required=False, missing=None), } tag_args = { "tag": fields.Function(deserialize=slugify, required=False, missing=None) } def get_or_404(model_or_query: Any, ident: Any) -> Any: """ only single primary keys """ query = (model_or_query if isinstance(model_or_query, Query) else model_or_query.query) rv = query.get(ident) if rv is None: abort(404) return rv
class ArgsUserPut(DefaultSchema): user = fields.Function(required=True, deserialize=parse.user, load_from='user_id', location='view_args') username = fields.String(location='json') email = fields.String(location='json', validate=validate.Email()) notify = fields.Boolean(location='json')