class EntityDocRelationListResource(Resource): @parse({ "doc_relation_ids": fields.List(fields.Integer(), missing=[]), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), }) def get(self, args: typing.Dict, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 获取所有条款,不分页 """ result, count = DocTypeService().get_relation_list(doc_type_id, args.get("offset"), args.get("limit"), doc_relation_ids=args.get("doc_relation_ids")) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "doc_relation_name": fields.String(required=True), "doc_term_ids": fields.List(fields.Integer(), required=True) }) def post(self, args: typing.Dict, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 创建一个关系 """ result = DocTypeService().create_relation(doc_type_id, args.get("doc_term_ids"), args.get("doc_relation_name")) return { "message": "创建成功", "result": result, }, 201
class WordsegMarkJobListResource(Resource): @parse({ "is_superuser": fields.Boolean(missing=False), "query": fields.String(missing=''), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "doc_type_id": fields.Integer(missing=None), 'order_by': fields.String(missing='-created_time'), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: count, result = MarkJobService().get_mark_job_list_by_nlp_task( args, nlp_task=NlpTaskEnum.wordseg) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse( { "mark_job_name": fields.String(required=True), "mark_job_type": fields.String(required=True), "mark_job_desc": fields.String(), "doc_type_id": fields.Integer(required=True), "files": fields.List(fields.File(), required=True), "assign_mode": fields.String(required=True, validate=lambda x: x in ['average', 'together']), "assessor_id": fields.Integer(missing=0), "labeler_ids": fields.List(fields.Integer(), required=True), }, locations=('form', 'files')) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: files = args["files"] assign_mode = args["assign_mode"] if assign_mode == AssignModeEnum.together: abort(400, message="不支持共同标注") job_type = Common().check_job_type_by_files(files) if job_type != "text": abort(400, message="请上传纯文本文档(txt/csv)") else: args['mark_job_type'] = job_type try: result = MarkJobService().create_mark_job(files, NlpTaskEnum.wordseg, args) return {"message": "创建成功", "result": result}, 201 except TypeError: abort(400, message="上传文件类型错误")
class RelationMarkJobListResource(Resource): @parse({ "is_superuser": fields.Boolean(missing=False), "query": fields.String(missing=''), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "doc_type_id": fields.Integer(missing=None), 'order_by': fields.String(missing='-created_time'), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: count, result = MarkJobService().get_mark_job_list_by_nlp_task( args, NlpTaskEnum.relation) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse( { "mark_job_name": fields.String(required=True), "mark_job_type": fields.String(required=True), "mark_job_desc": fields.String(), "doc_type_id": fields.Integer(required=True), "files": fields.List(fields.File(), required=True), "assign_mode": fields.String(required=True, validate=lambda x: x in ['average', 'together']), "assessor_id": fields.Integer(), "labeler_ids": fields.List(fields.Integer(), required=True), "use_rule": fields.Integer(missing=1) # 默认使用规则 }, locations=('form', 'files')) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: files = args['files'] job_type = Common().check_job_type_by_files(files) if not job_type: abort(400, message='请上传全部纯文本文档(txt/csv)或者全部电子文档(pdf/word文档)') else: args['mark_job_type'] = job_type result = MarkJobService().create_mark_job(files, NlpTaskEnum.relation, args) return {"message": "创建成功", "result": result}, 201
class TrainTaskSchema(Schema): # type: ignore model_train_id = fields.Integer(attribute="train_task_id") model_train_config = fields.Dict(attribute="train_config") model_train_state = fields.Function( lambda obj: StatusEnum(obj.train_status).name) model_id = fields.Integer(attribute="train_job_id") mark_job_ids = fields.List(fields.Integer()) train_terms = fields.List(fields.Nested(TrainTermTaskSchema)) created_time = fields.DateTime() last_updated_time = fields.DateTime(attribute="updated_time") model_version = fields.String()
class GetExtractMarkJobDataResource(Resource): @parse({ "mark_job_ids": fields.List(fields.Integer(), missing=[]), "doc_term_ids": fields.List(fields.Integer(), missing=[]), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: items = MarkJobService().get_mark_job_data_by_ids(args["mark_job_ids"], args, prefix="NER") return { "message": "请求成功", "result": items, "count": len(items), }, 200
class GetRelationMarkJobDataResource(Resource): @parse({ "mark_job_ids": fields.List(fields.Integer(), missing=[]), "doc_relation_ids": fields.List(fields.Integer(), missing=[]), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: items = MarkJobService().get_mark_job_data_by_ids(args["mark_job_ids"], args, doc_type_key="entity_doc_type", prefix='RE') return { "message": "请求成功", "result": items, "count": len(items), }, 200
class MarkTaskSchema(Schema): # type: ignore task_id = fields.Integer(attribute="mark_task_id") doc = fields.Nested(DocSchema) doc_type = fields.Nested({ "doc_type_id": fields.Integer(), "doc_type_name": fields.String(), "doc_type_desc": fields.String(), }) user_task_list = fields.List(fields.Nested(UserTaskSchema)) task_state = fields.Function( lambda obj: StatusEnum(obj.mark_task_status).name) status = fields.Function(lambda obj: not obj.is_deleted) created_time = fields.String() task_result = fields.List(fields.Dict, attribute="mark_task_result")
class DocTypeSchema(Schema): doc_terms = fields.List(fields.Integer()) doc_term_list = fields.List(fields.Nested(DocTermSchema)) doc_relation_list = fields.List(fields.Nested(EntityDocRelationSchema)) doc_lexicon_list = fields.List(fields.Nested(WordsegDocLexiconSchema), attribute='doc_rules') doc_type_id = fields.Integer() doc_type_name = fields.String() doc_type_desc = fields.String() is_top = fields.Boolean(attribute="is_favorite") created_time = fields.DateTime() group_id = fields.Integer() status = fields.Function(lambda obj: not obj.is_deleted)
class DocTypeListResource(Resource, CurrentUserMixin): @parse({ "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "mark_job_ids": fields.List(fields.Integer(), missing=[]), "is_online": fields.Integer() }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 获取所有文档条款 :param args: :return: """ nlp_task_id = Common().get_nlp_task_id_by_route() args.update({ 'nlp_task_id': nlp_task_id }) result, count = DocTypeService().get_doc_type(self.get_current_user(), args) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "doc_type_name": fields.String(required=True), "doc_type_desc": fields.String(), "group_id": fields.Integer(default=-1), "doc_term_list": fields.List(fields.Nested({ "doc_term_name": fields.String(required=True), "doc_term_alias": fields.String(default=""), "doc_term_shortcut": fields.String(default="", validate=lambda x: len(x) < 2), "doc_term_color": fields.String(required=True), "doc_term_index": fields.Integer(required=False), "doc_term_desc": fields.String(required=False, allow_none=True), "doc_term_data_type": fields.String(default=""), }), missing=[]) }) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 创建一个文档类型包括它的条款 """ args.update({'nlp_task_id': Common().get_nlp_task_id_by_route()}) args.update({"group_id": self.get_current_user().user_groups[0]}) result = DocTypeService().create_doc_type(self.get_current_user(), args) return { "message": "创建成功", "result": result, }, 201
class EntityDocRelationItemResource(Resource): @parse({ "doc_relation_name": fields.String(), "doc_term_ids": fields.List(fields.Integer(), required=True) }) def patch(self: Resource, args: typing.Dict, doc_type_id: int, doc_relation_id: int) -> typing.Tuple[typing.Dict, int]: """ 修改一个条款 """ doc_term_ids = list(set(args.pop("doc_term_ids", []))) if len(doc_term_ids) != 2: abort(400, message="文档条款不正确,请确保填写了正确的文档条款") result = DocTypeService().update_relation(doc_type_id, args.get("doc_relation_name"), args.get("doc_term_ids", [])) return { "message": "更新成功", "result": result, }, 201 def delete(self: Resource, doc_type_id: int, doc_relation_id: int) -> typing.Tuple[typing.Dict, int]: """ 删除一个条款 """ DocTypeService().delete_relation(doc_relation_id) return { "message": "删除成功", }, 204
class ClassifyMarkJobImportResource(Resource): @parse({ "mark_job_name": fields.String(required=True), "mark_job_type": fields.String(required=True), "mark_job_desc": fields.String(), "doc_type_id": fields.Integer(required=True), "files": fields.List(fields.File(), required=True), }, locations=('form', 'files')) def post( self: Resource, args: typing.Dict ) -> typing.Tuple[typing.Dict, int]: files = args['files'] for f in files: if get_ext(f.filename) not in ["csv"]: abort(400, message="已标注分类数据仅支持csv格式。") try: result = MarkJobService().import_mark_job(files, args, nlp_task=NlpTaskEnum.classify) return { "message": "创建成功", "result": result }, 201 except UnicodeDecodeError: abort(400, message="文件编码错误 请上传utf-8编码文件") except KeyError: abort(400, message="文件格式不合规 请查看csv文件模版")
class RelationMarkJobMultiDelete(Resource): @parse({"job_ids": fields.List(fields.Integer(), missing=[])}) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: MarkJobService().delete_mark_jobs(args["job_ids"]) return {"message": "批量删除成功", "result": args}, 200
class WordsegMarkJobImportResource(Resource): @parse( { "mark_job_name": fields.String(required=True), "mark_job_type": fields.String(required=True), "mark_job_desc": fields.String(), "doc_type_id": fields.Integer(required=True), "files": fields.List(fields.File(), required=True), "task_type": fields.String(required=True, validate=lambda x: x in ['machine', 'manual']), }, locations=('form', 'files')) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 上传已标注数据 """ files = args['files'] # validate file extensions for f in files: if get_ext(f.filename) not in ["txt"]: abort(400, message="上传已标注分词数据仅支持txt格式。") result = MarkJobService().import_mark_job(files, args, nlp_task=NlpTaskEnum.wordseg) return {"message": "创建成功", "result": result}, 201
class ExportJobSchema(Schema): export_id = fields.Integer(attribute="export_job_id") file_path = fields.String(attribute="export_file_path") mark_type = fields.Function(lambda obj: NlpTaskEnum(obj.nlp_task_id).name) # nlp_task_id export_state = fields.Function(lambda obj: StatusEnum(obj.export_job_status).name) project_name = fields.String(attribute="doc_type_name") created_time = fields.String() mark_job_ids = fields.List(fields.Integer(), attribute="export_mark_job_ids")
class WordsegMarkJobMultiDelete(Resource): @parse({"job_ids": fields.List(fields.Integer(), missing=[])}) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: MarkJobService.delete_mark_jobs(args['job_ids']) return { "message": "删除成功", }, 200
class PredictJobSchema(Schema): doc_type = fields.Nested(DocTypeSchema) task_list = fields.List(fields.Nested(PredictTaskSchema)) extract_job_id = fields.Integer(attribute="predict_job_id") extract_job_name = fields.String(attribute="predict_job_name") extract_job_type = fields.String(attribute="predict_job_type.value") extract_job_state = fields.Function(lambda obj: StatusEnum(obj.predict_job_status).name) extract_job_desc = fields.String(attribute="predict_job_desc") is_batch = fields.Boolean() created_time = fields.DateTime()
class TrainJobSchema(Schema): # type: ignore model_id = fields.Integer(attribute='train_job_id') model_name = fields.String(attribute="train_job_name") model_desc = fields.String(attribute="train_job_desc") status = fields.Function(lambda obj: not obj.is_deleted) doc_type = fields.Nested(DocTypeSchema) created_time = fields.String() model_version = fields.String() train_list = fields.List(fields.Nested(TrainTaskSchema)) model_evaluate = fields.Nested(EvaluateTaskSchema) preprocess = fields.Dict()
class DocTermSchema(Schema): doc_lexicon_list = fields.List(fields.Nested(WordsegDocLexiconSchema), attribute='doc_rules') doc_term_id = fields.Integer() doc_term_name = fields.String() doc_term_alias = fields.String() doc_term_index = fields.Integer() doc_term_color = fields.String() doc_term_desc = fields.String() doc_term_data_type = fields.String() doc_term_shortcut = fields.String()
class EvaluateTaskSchema(Schema): model_evaluate_id = fields.Integer(attribute="evaluate_task_id") model_evaluate_name = fields.String(attribute="evaluate_task_name") model_evaluate_desc = fields.String(attribute="evaluate_task_desc") model_evaluate_state = fields.Function( lambda obj: StatusEnum(obj.evaluate_task_status).name) model_evaluate_result = fields.Dict(attribute="evaluate_task_result") model_id = fields.Integer(attribute="train_job_id") mark_job_ids = fields.List(fields.Integer()) created_time = fields.DateTime() last_updated_time = fields.DateTime(attribute="updated_time")
class ModelListResource(Resource, CurrentUserMixin): @parse({ "query": fields.String(missing=''), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "doc_type_id": fields.Integer(missing=0), 'order_by': fields.String(missing='-created_time'), }) def get(self: Resource, args: Dict[str, Any]) -> Tuple[Dict[str, Any], int]: """ 获取模型记录,分页 """ nlp_task_id = Common.get_nlp_task_id_by_route() count, train_job_list = ModelService( ).get_train_job_list_by_nlp_task_id( nlp_task_id=nlp_task_id, doc_type_id=args['doc_type_id'], search=args['query'], offset=args['offset'], limit=args['limit'], current_user=self.get_current_user()) result = TrainJobSchema().dump(train_job_list, many=True) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "model_name": fields.String(required=True), "model_desc": fields.String(missing=""), "doc_type_id": fields.Integer(required=True), "model_train_config": fields.Raw(required=True ), # algorithm_type = ('extract', 'ner', 'seg', 'pos') "mark_job_ids": fields.List(fields.Integer(), missing=[]), }) def post(self: Resource, args: Dict[str, Any]) -> Tuple[Dict[str, Any], int]: """ 创建模型 """ # create new train_job = ModelService().create_train_job_by_doc_type_id( doc_type_id=args["doc_type_id"], train_job_name=args["model_name"], train_job_desc=args["model_desc"], train_config=args["model_train_config"], mark_job_ids=args["mark_job_ids"]) result = TrainJobSchema().dump(train_job) return {"message": "创建成功", "result": result}, 201
class MarkJobSchema(Schema): mark_job_id = fields.Integer() mark_job_name = fields.String() mark_job_type = fields.Function(lambda obj: obj.mark_job_type.value) assign_mode = fields.Function(lambda obj: obj.assign_mode.value) mark_job_state = fields.Function( lambda obj: StatusEnum(obj.mark_job_status).name) mark_job_desc = fields.String() task_list = fields.List(fields.Integer()) created_time = fields.DateTime() assessor_id = fields.Function(lambda obj: obj.reviewer_ids[0] if len(obj.reviewer_ids) > 0 else 0) doc_type = fields.Nested(DocTypeSchema, exclude=('doc_term_list', )) labeler_ids = fields.List(fields.Integer(), attribute='annotator_ids') stats = fields.Nested({ "all": fields.Integer(), "labeled": fields.Integer(), "audited": fields.Integer() })
class DocListResource(Resource): @parse({ "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "doc_ids": fields.List(fields.Integer(), missing=[]), # ...?doc_ids=1&doc_ids=2 "mark_job_ids": fields.List(fields.Integer(), missing=[]), "doc_term_ids": fields.List(fields.Integer(), missing=[]), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 获取所有文档,可以按照[doc_id]进行过滤 """ items, count = DocService().get_docs(args) result = DocSchema(many=True).dump(items) return { "message": "请求成功", "result": result, "count": count, }, 200
class UserTaskSchema(Schema): doc = fields.Nested(DocSchema) doc_type = fields.Nested({ "doc_type_id": fields.Integer(), "doc_type_name": fields.String(), "doc_type_desc": fields.String(), }) task_id = fields.Integer(attribute="user_task_id") labeler_id = fields.Integer(attribute="annotator_id") manual_task_id = fields.Integer(attribute="mark_task_id") task_result = fields.List(fields.Dict, attribute="user_task_result") task_state = fields.Function( lambda obj: StatusEnum(obj.user_task_status).name) status = fields.Function(lambda obj: not obj.is_deleted) created_time = fields.String()
class ClassifyModelListResource(Resource, CurrentUserMixin): @parse({ "query": fields.String(missing=''), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "doc_type_id": fields.Integer(missing=0), 'order_by': fields.String(missing='-created_time'), }) def get(self: Resource, args: Dict[str, Any]) -> Tuple[Dict[str, Any], int]: """ 获取模型记录,分页 """ count, train_job_list = ModelService( ).get_train_job_list_by_nlp_task_id( nlp_task_id=int(NlpTaskEnum.classify), doc_type_id=args['doc_type_id'], search=args['query'], offset=args['offset'], limit=args['limit'], current_user=self.get_current_user()) # get the serialized result result = TrainJobSchema().dump(train_job_list, many=True) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "model_name": fields.String(required=True), "model_desc": fields.String(missing=""), "doc_type_id": fields.Integer(required=True), "model_train_config": fields.Dict(required=True), "mark_job_ids": fields.List(fields.Integer(), missing=[]), "custom_id": fields.Integer(missing=0), }) def post(self: Resource, args: Dict[str, Any]) -> Tuple[Dict[str, Any], int]: train_job = ModelService().create_classify_train_job_by_doc_type_id( doc_type_id=args["doc_type_id"], train_job_name=args["model_name"], train_job_desc=args["model_desc"], train_config=args["model_train_config"], mark_job_ids=args["mark_job_ids"], custom_id=args['custom_id']) result = TrainJobSchema().dump(train_job) return {"message": "创建成功", "result": result}, 201
class ClassifyMarkJobMultiExportResource(Resource): @parse({ "job_ids": fields.List(fields.Integer(), missing=[]) }) def post( self: Resource, args: typing.Dict ) -> typing.Tuple[typing.Dict, int]: try: file_path = MarkJobService().export_multi_mark_file( nlp_task_id=int(NlpTaskEnum.classify), mark_job_id_list=args["job_ids"]) return { "message": "请求成功", "file_path": file_path }, 200 except ValueError as e: abort(400, message=e.args[0])
class DocTypeItemResource(Resource, CurrentUserMixin): def get(self: Resource, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 获取一个文档类型 """ result = DocTypeService().get_doc_type_items(doc_type_id) return { "message": "请求成功", "result": result, }, 200 @parse({ "doc_type_name": fields.String(), "doc_type_desc": fields.String(), "doc_term_list": fields.List(fields.Nested({ "doc_term_name": fields.String(required=True), "doc_term_alias": fields.String(default=""), "doc_term_color": fields.String(required=True), "doc_term_index": fields.Integer(required=False, allow_none=True), "doc_term_shortcut": fields.String(default=""), "doc_term_id": fields.Integer(required=False), "doc_term_desc": fields.String(required=False, allow_none=True), "doc_term_data_type": fields.String(default="String"), })) }) def patch(self: Resource, args: typing.Dict, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 修改一个文档类型,不包括修改它的条款 """ result = DocTypeService().update_doc_type(args, doc_type_id) return { "message": "更新成功", "result": result, }, 201 def delete(self: Resource, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 删除一个文档类型 """ DocTypeService().delete_doc_type(doc_type_id) return { "message": "删除成功", }, 204
class GetDocTermListResource(Resource, CurrentUserMixin): @parse({ "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "exclude_terms_ids": fields.List(fields.Integer(), missing=[]), }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 获取所有条款,分页,可选排除条件exclude_terms_ids """ nlp_task_id = Common().get_nlp_task_id_by_route() args.update({ 'nlp_task_id': nlp_task_id }) result, count = DocTermService().get_doc_term_list(args) return { "message": "请求成功", "result": result, "count": count, }, 200
class ExtractMarkJobImportResource(Resource): @parse({ "mark_job_name": fields.String(required=True), "mark_job_type": fields.String(required=True), "mark_job_desc": fields.String(), "doc_type_id": fields.Integer(required=True), "files": fields.List(fields.File(), required=True), }, locations=('form', 'files')) def post(self: Resource, args: typing.Dict): files = args['files'] args['task_type'] = 'manual' # validate file extensions for f in files: if get_ext(f.filename) not in ["txt"]: abort(400, message="导入已标注序列标注数据仅支持txt格式。") result = MarkJobService().import_mark_job(files, args, nlp_task=NlpTaskEnum.extract) return { "message": "创建成功", "result": result }, 201
class RelationDocTypeListResource(Resource, CurrentUserMixin): @parse({ "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), "mark_job_ids": fields.List(fields.Integer(), missing=[]), "is_online": fields.Integer() }) def get(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 获取所有文档条款 :param args: :return: """ nlp_task_id = Common().get_nlp_task_id_by_route() args.update({ 'nlp_task_id': nlp_task_id }) result, count = DocTypeService().get_doc_type(self.get_current_user(), args) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "doc_type_name": fields.String(required=True), "doc_type_desc": fields.String(), }) def post(self: Resource, args: typing.Dict) -> typing.Tuple[typing.Dict, int]: """ 创建一个文档类型包括它的条款 """ args.update({"nlp_task_id": NlpTaskEnum.relation.value}) args.update({"group_id": self.get_current_user().user_groups[0]}) result = DocTypeService.create_relation_doc_type(args) return { "message": "创建成功", "result": result, }, 201
class ClassifyDocTermListResource(Resource, CurrentUserMixin): @parse({ "doc_term_ids": fields.List(fields.Integer(), missing=[]), "offset": fields.Integer(missing=0), "limit": fields.Integer(missing=10), }) def get(self, args: typing.Dict, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 获取所有条款,不分页 """ if args.get('doc_term_ids'): result, count = DocTermService().get_doc_term_by_doctype(doc_type_id, args['offset'], args['limit']) else: result, count = DocTermService().get_doc_term_by_doctype(doc_type_id, args['offset'], args['limit'], doc_term_ids=args.get('doc_term_ids')) return { "message": "请求成功", "result": result, "count": count, }, 200 @parse({ "doc_term_name": fields.String(required=True), "doc_term_color": fields.String(required=True), "doc_term_index": fields.Integer(required=True), "doc_term_desc": fields.String(default=""), "doc_term_data_type": fields.String(required=True), }) def post(self, args: typing.Dict, doc_type_id: int) -> typing.Tuple[typing.Dict, int]: """ 创建一个条款 """ doc_rule_list = args.pop('doc_rule_list') result = DocTermService().create_classify_doc_term(args, doc_type_id, doc_rule_list) DocTermService().update_rule_to_redis(doc_type_id) return { "message": "创建成功", "result": result, }, 201