Beispiel #1
0
 def patch(self: Resource, args: typing.Dict, model_id: int,
           model_evaluate_id: int) -> typing.Tuple[typing.Dict, int]:
     """
     更新一条评估记录
     """
     update_params = {}
     if args.get("model_evaluate_state"):
         update_params.update(evaluate_task_status=status_str2int_mapper()[
             args["model_evaluate_state"]])
     if args.get("model_evaluate_result"):
         update_params.update(
             evaluate_task_result=args["model_evaluate_result"])
     if args.get("model_evaluate_name"):
         update_params.update(
             evaluate_task_name=args["model_evaluate_name"])
     if args.get("model_evaluate_desc"):
         update_params.update(
             evaluate_task_desc=args["model_evaluate_desc"])
     evaluate_task = ModelEvaluateService().update_evaluate_task_by_id(
         evaluate_task_id=model_evaluate_id, args=update_params)
     result = EvaluateTaskSchema().dump(evaluate_task)
     return {
         "message": "更新成功",
         "result": result,
     }, 200
Beispiel #2
0
 def get(self: Resource, args: typing.Dict, model_id: int,
         model_evaluate_id: int) -> typing.Tuple[typing.Dict, int]:
     """
     获取单条模型评估记录
     """
     evaluate_task = ModelEvaluateService().get_evaluate_task_by_id(
         model_evaluate_id)
     result = EvaluateTaskSchema().dump(evaluate_task)
     return {
         "message": "请求成功",
         "result": result,
     }, 200
    def put(self, args):
        """
        更新一条评估记录
        """
        update_params = {}
        if args.get("model_evaluate_state"):
            update_params.update(evaluate_task_status=status_str2int_mapper()[
                args["model_evaluate_state"]])
        if args.get("model_evaluate_result"):
            update_params.update(
                evaluate_task_result=args["model_evaluate_result"])

        evaluate_task = ModelEvaluateService().update_evaluate_task_by_id(
            evaluate_task_id=args["model_evaluate_id"], args=update_params)
        result = EvaluateTaskSchema().dump(evaluate_task)
        return {
            "message": "更新成功",
            "result": result,
        }, 201
Beispiel #4
0
 def post(
     self: Resource,
     args: typing.Dict,
     model_id: int,
 ) -> typing.Tuple[typing.Dict, int]:
     """
     创建一条评估记录
     """
     evaluate_task = ModelEvaluateService(
     ).create_evaluate_task_by_train_job_id(
         train_job_id=model_id,
         evaluate_task_name=args["model_evaluate_name"],
         evaluate_task_desc=args["model_evaluate_desc"],
         mark_job_ids=args["mark_job_ids"],
         doc_term_ids=args["doc_term_ids"],
         doc_relation_ids=args["doc_relation_ids"],
         use_rule=args["use_rule"])
     result = EvaluateTaskSchema().dump(evaluate_task)
     return {"message": "创建成功", "result": result}, 201
Beispiel #5
0
 def get(self: Resource, args: typing.Dict,
         model_id: int) -> typing.Tuple[typing.Dict, int]:
     """
     获取模型评估记录,分页
     """
     order_by = args["order_by"][1:]
     order_by_desc = True if args["order_by"][0] == "-" else False
     count, evaluate_task_list = ModelEvaluateService(
     ).get_evaluate_task_list_by_train_job_id(train_job_id=model_id,
                                              order_by=order_by,
                                              order_by_desc=order_by_desc,
                                              offset=args["offset"],
                                              limit=args["limit"])
     result = EvaluateTaskSchema(many=True).dump(evaluate_task_list)
     return {
         "message": "请求成功",
         "result": result,
         "count": count,
     }, 200
Beispiel #6
0
 def get(self, args):
     """
     查看抽取文档类型下的最新上线模型信息
     """
     data = ModelService().get_online_model_info_by_doc_type_id(
         doc_type_id=args["doc_type_id"],
         current_user=self.get_current_user())
     if not data:
         abort(400, message="未查询到数据")
     # assign
     train_task, evaluate_task, train_job, doc_type = data
     result = {
         "doc_type": DocTypeSchema().dump(doc_type),
         "model": TrainJobSchema().dump(train_job),
         "train": TrainTaskSchema().dump(train_task),
         "evaluate": EvaluateTaskSchema().dump(evaluate_task),
     }
     return {
         "message": "请求成功",
         "result": result,
     }, 200
    def get_doc_type_info_by_nlp_task_by_user(nlp_task_id, current_user: CurrentUser):
        """
        获取管理大厅首页的doc_type信息
        """
        result = []
        # get doc_type list by user
        _, doc_type_list = DocTypeModel().get_by_nlp_task_id_by_user(nlp_task_id=nlp_task_id, current_user=current_user)
        for doc_type, terms in doc_type_list:
            doc_type.doc_terms = [int(t) for t in terms.split(",")] if terms is not None else []
        doc_type_list = [d[0] for d in doc_type_list]
        doc_type_list = [{"doc_type": DocTypeSchema().dump(doc_type)} for doc_type in doc_type_list]

        # get all job count and approved job count
        all_status, all_marked_status = MarkTaskModel().count_status_by_user(nlp_task_id=nlp_task_id, current_user=current_user)

        # calculate marked mark_job count and all mark_job for each doc_type
        all_status_dict = Common().tuple_list2dict(all_status)
        all_marked_status_dict = Common().tuple_list2dict(all_marked_status)

        for doc_type in doc_type_list:
            doc_type_id = doc_type["doc_type"]["doc_type_id"]
            mark_job_count = len(all_status_dict.get(doc_type_id, {}))
            marked_mark_job_count = 0
            for _mark_job_id, _count_sum in all_status_dict.get(doc_type_id, {}).items():
                if _count_sum == all_marked_status_dict.get(doc_type_id, {}).get(_mark_job_id, 0):
                    marked_mark_job_count += 1
            doc_type.update(progress_state={"job_num": mark_job_count,
                                            "labeled_job_number": marked_mark_job_count,
                                            "progress_rate": round(marked_mark_job_count / mark_job_count, 2) if mark_job_count > 0 else 0})

            # get latest evaluation result if exists
            latest_evaluate = EvaluateTaskModel().get_latest_evaluate_by_doc_type_id(nlp_task_id=nlp_task_id,
                                                                                     doc_type_id=doc_type_id)
            if latest_evaluate:
                doc_type.update(evaluate=EvaluateTaskSchema().dump(latest_evaluate))
            result.append(doc_type)
        return result