Exemplo n.º 1
0
def nlp_generate_actionname():
    if request.method == 'POST':
        global server_ip_req
        global flag_server1_train
        global flag_server2_train
        global flag_timer
        global R1
        global R2
        if server_ip_req == server_ip_arr[1]:
            rep = server_process.server_jump(request, request.method,
                                             server_ip_req, port[0])
            rep = jsonify(rep)
            return rep
        elif server_ip_req == server_ip_arr[0]:
            req_d = request.json
            bId = req_d["botId"]
            action_flag = req_d["action_flag"]
            if not bId == botId:
                return rep_body.rep_common(201, {})
            if action_flag == "n":
                return rep_body.rep_common(200, {})
            elif action_flag == "y":
                # domain文件的文件路径
                basePath = os.path.abspath(
                    os.path.dirname(os.path.dirname(__file__)))
                domainPath = basePath + slash + "training" + slash + "domain.yml"
                # 生成自定义动作名
                dic_actionname = action_name_generate.action_name_generate(
                    domainPath)

            return rep_body.rep_common(200, dic_actionname)

    return rep_body.rep_common(201, {})
Exemplo n.º 2
0
def task_core_server_start():
    if request.method == 'POST' and 'timestr' in request.json:
        timestr = request.json['timestr']
        # .sh脚本文件绝对路径
        path = "/rasa/zndhjqr_nlp/rasa_shell/startTaskCoreServer.sh"
        os.popen(path + " " + timestr)
        return rep_body.rep_common(200, {})
    return rep_body.rep_common(201, {})
Exemplo n.º 3
0
def axiosT():
    if request.method == 'GET':
        # 返回体
        dictData = {
            "msg": "并发1成功"
        }
        return rep_body.rep_common(200, dictData)

    return rep_body.rep_common(201, {})
Exemplo n.º 4
0
def sd():
    if request.method == 'POST':
        if request.json["sd"] == 1:
            # 返回体
            dictData = {
                "msg": "post请求成功"
            }
            return rep_body.rep_common(200, dictData)

    return rep_body.rep_common(201, {})
Exemplo n.º 5
0
def po():
    if request.method == 'POST':
        if request.json["a"] == 1:
            # 返回体
            dictData = {
                "msg": "并发2成功"
            }
            return rep_body.rep_common(200, dictData)

    return rep_body.rep_common(201, {})
Exemplo n.º 6
0
def rasa_server_stop():
    if request.method == 'GET':
        # .sh脚本文件绝对路径
        path = "/rasa/zndhjqr_nlp/rasa_shell/stopRasaServer.sh"
        try:
            os.popen(path)
        except Exception as e:
            print(e)
            return rep_body.rep_common(201, {})
        return rep_body.rep_common(200, {})
    return rep_body.rep_common(201, {})
Exemplo n.º 7
0
def iT():
    if request.method == 'GET':
        # 返回体
        dictData = {
            "token": "abc"
        }
        return jsonify(dictData)

    return rep_body.rep_common(201, {})
Exemplo n.º 8
0
def nlp_text_anyq():
    if request.method == "POST":
        try:
            if request.form:
                params = request.form.to_dict()
                visitor_id = params["visitorId"]
                bot_id = int(params["botId"])
                user_type = params["userType"]
            else:
                params = request.json
                visitor_id = params["visitorId"]
                bot_id = int(params["botId"])
                user_type = params["userType"]
            if type(visitor_id) == str:
                visitor_id = int(visitor_id.replace("voice", ""))

            input_text = params["inputText"]
            api_key = params["apiKey"]

            if api_key == apiKey and bot_id == 1:
                return "中间件之间测试成功了!"
                # # AnyQ一问一答返回结果
                # anyq_url = anyq_request_url + "?question=" + input_text
                # # print("anyq_url:" + anyq_url)
                # anyq_response = requests.get(anyq_url)
                # if anyq_response.content:
                #     anyq_service_response = anyq_response.json()
                #     # print("anyq_service_response:" % anyq_service_response)
                #
                #     # 一问一答型机器人置信度
                #     if len(anyq_service_response) != 0:
                #         bot_confidence1 = anyq_service_response[0]["confidence"]
                #     else:
                #         bot_confidence1 = 0
                #
                #     # 一问一答型机器人回复
                #     # round 返回浮点数的后几位  ps: round(3.1415,2) ---> 3.14
                #     anyq_answer = anyq_service_response[0]["answer"]
                #     # print("ananyq_answer:" % anyq_answer)
                #     bot_type = 1
                #     askfor_data = {
                #         "botId": bot_id,
                #         "botType": bot_type,
                #         "replyText": [anyq_answer],
                #         "confidence": bot_confidence1,
                #         "notHeard": "N",
                #         "unexpectCount": unexpect_count
                #     }
                #     return rep_body.rep_common(200, askfor_data)
        except Exception as e:
            return rep_body.rep_common(201, {"err_msg": str(e)})
Exemplo n.º 9
0
def file_process(fileList):
    # 获取当前文件所在的目录的上级目录
    basePath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
    counter = 0
    try:
        while counter <= 2:
            f = fileList["file" + "[" + str(counter) + "]"]
            # 限定post方法,文件列表不为空且为一问一答类型
            if allowed_file(f.filename):
                # 拼凑文件存储路径
                filePath = basePath + slash + "training" + slash + f.filename
                # 打印路径
                print(filePath)
                # 将文件保存到相应路径
                f.save(filePath)
            counter += 1
    except:
        return rep_body.rep_common(201, {})
    # 返回项目根目录
    return basePath + slash
Exemplo n.º 10
0
def nlp_task_ed():
    if request.method == 'POST' and request.files and request.form["d"]:
        global server_ip_req
        global flag_server1_train
        global flag_server2_train
        global flag_timer
        global R1
        global R2
        if flag_timer:
            return rep_body.rep_common(200, {"status": "正在训练中"})
        else:
            if server_ip_req == server_ip_arr[1]:
                flag_server2_train = "y"
                rep = server_process.server_jump(request, request.method,
                                                 server_ip_req, port[0])
                flag_server2_train = "n"
                rep = jsonify(rep)
                flag_timer = True
                time.sleep(3600)
                R1.clear()
                flag_timer = False
                return rep
            elif server_ip_req == server_ip_arr[0]:
                flag_server1_train = "y"
                fqList = request.files
                d = json.loads(request.form["d"])
                bot_id = d["botId"]
                # 判断botId值
                if not bot_id == botId:
                    return rep_body.rep_common(201, {})
                # 处理培训数据
                path_prefix = file_process(fqList)
                # 生成时间戳
                time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
                # nlu和core的目录路径
                nlu_path = path_prefix + "now_models" + slash + "nlu"
                core_path = path_prefix + "now_models" + slash + "core"
                # 使用训练服务器进行训练
                # 1.关闭rasa服务
                req_host = server_ip_req + ":" + port[1]
                req_url1 = req_host + "/rasa_server_stop"
                rasa_rep = requests.get(req_url1).json()
                if rasa_rep["code"] == 200:
                    # 删除运行模型目录下的nlu模型和core模型
                    if os.path.exists(nlu_path) and os.path.exists(core_path):
                        shutil.rmtree(nlu_path)
                        shutil.rmtree(core_path)
                        # 新建运行模型目录
                        os.mkdir(nlu_path)
                        os.mkdir(nlu_path + slash + "qa")
                        os.mkdir(nlu_path + slash + "task")
                        os.mkdir(core_path)
                # 训练nlu和core模型
                task_model_path = bot_train.train_task_nlu(
                    path_prefix, slash, time_str)
                core_model_path = bot_train.train_core(path_prefix, slash,
                                                       time_str)
                # 经过替换
                now_task_path = task_model_path.replace('old', 'now')
                now_core_path = core_model_path.replace('old', 'now')
                # 复制训练模型(包括nlu、core)到运行模型目录
                shutil.copytree(task_model_path, now_task_path)
                shutil.copytree(core_model_path, now_core_path)

                # 如果有自定义行为
                if any(d["action"]):
                    # 取得自定义action的dict
                    dic_action = d["action"]
                    # 以下字段均为自定义action信息
                    method = dic_action["method"]  # 请求方式
                    action_name = dic_action["action_name"]  # 动作名字
                    action_url = dic_action["url"]  # 请求url
                    action_data = dic_action["data"]  # 请求数据
                    action_rule = dic_action["rule"]  # 返回模板

                    # 后端给定参数s
                    headers = {'Content-Type': 'application/json'}  # 头部
                    mid_url = "http://" + ""  # 中间件url
                    path = "../action.py"  # 写入的action.py文件的路径

                    generate_action_script.new_action(method, action_name,
                                                      action_url, action_data,
                                                      action_rule, headers,
                                                      mid_url, path)
                # 3.开启rasa相关服务
                # nlu服务
                req_url2 = req_host + "/nlu_server_start"
                rasa_nlu_rep = requests.get(req_url2).json()
                # qa服务
                rasa_qa_data = {"timestr": time_str}
                h = {"Content-Type": "application/json"}
                req_url3 = req_host + "/qa_core_server_start"
                rasa_qa_rep = requests.post(req_url3,
                                            data=json.dumps(rasa_qa_data),
                                            headers=h).json()
                # task服务
                rasa_qa_data = {"timestr": time_str}
                h = {"Content-Type": "application/json"}
                req_url4 = req_host + "/task_core_server_start"
                rasa_task_rep = requests.post(req_url4,
                                              data=json.dumps(rasa_qa_data),
                                              headers=h).json()
                # 动作服务
                req_url5 = req_host + "/rasa_server_sdk"
                rasa_action_rep = requests.get(req_url5).json()
                if rasa_nlu_rep["code"] == 200 and rasa_qa_rep["code"] == 200 and rasa_task_rep["code"] == 200 and \
                        rasa_action_rep["code"] == 200:
                    # 新模型加载到内存
                    nlu_service_params1 = {
                        "q": "",
                        "project": "qa",
                        "model": "qa_" + time_str
                    }
                    requests.post(nlu_service_url1,
                                  data=json.dumps(nlu_service_params1)).json()
                    nlu_service_params2 = {
                        "q": "",
                        "project": "task",
                        "model": "task_" + time_str
                    }
                    requests.post(nlu_service_url1,
                                  data=json.dumps(nlu_service_params2)).json()
                    flag_server1_train = "n"
                    flag_timer = True
                    time.sleep(3600)
                    R2.clear()
                    flag_timer = False
                    return rep_body.rep_common(200, {})

    return rep_body.rep_common(201, {})
Exemplo n.º 11
0
def nlp_qa_ed():
    if request.method == 'POST' and request.files and request.form["d"]:
        global server_ip_req
        global flag_server1_train
        global flag_server2_train
        global flag_timer
        global R1
        global R2
        if flag_timer:
            return rep_body.rep_common(200, {"status": "正在训练中"})
        else:
            if server_ip_req == server_ip_arr[1]:
                flag_server2_train = "y"
                rep = server_process.server_jump(request, request.method,
                                                 server_ip_req, port[0])
                flag_server2_train = "n"
                rep = jsonify(rep)
                flag_timer = True
                time.sleep(3600)
                R1.clear()
                flag_timer = False
                return rep
            elif server_ip_req == server_ip_arr[0]:
                # 训练标志设置为y
                flag_server1_train = "y"
                fqList = request.files
                d = json.loads(request.form["d"])
                id = d["botId"]
                # 判断botId值
                if not id == botId:
                    return rep_body.rep_common(201, {})
                # 处理培训数据
                path_prefix = file_process(fqList)
                # 生成时间戳
                time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
                # nlu和core的目录路径
                nlu_path = path_prefix + "now_models" + slash + "nlu"
                core_path = path_prefix + "now_models" + slash + "core"
                # 使用训练服务器进行训练
                # 1.关闭rasa服务
                req_host = server_ip_req + ":" + port[1]
                req_url1 = req_host + "/rasa_server_stop"
                rasa_rep = requests.get(req_url1).json()
                if rasa_rep["code"] == 200:
                    # 删除运行模型目录下的nlu模型和core模型
                    if os.path.exists(nlu_path) and os.path.exists(core_path):
                        shutil.rmtree(nlu_path)
                        shutil.rmtree(core_path)
                        # 新建运行模型目录
                        os.mkdir(nlu_path)
                        os.mkdir(nlu_path + slash + "qa")
                        os.mkdir(nlu_path + slash + "task")
                        os.mkdir(core_path)
                    # 2.训练nlu和core模型
                    qa_model_path = bot_train.train_qa_nlu(
                        path_prefix, slash, time_str)
                    core_model_path = bot_train.train_core(
                        path_prefix, slash, time_str)
                    # 经过替换
                    now_qa_path = qa_model_path.replace('old', 'now')
                    now_core_path = core_model_path.replace('old', 'now')
                    # 复制训练模型(包括nlu、core)到运行模型目录
                    shutil.copytree(qa_model_path, now_qa_path)
                    shutil.copytree(core_model_path, now_core_path)
                    # 3.开启rasa相关服务
                    # nlu服务
                    req_url2 = req_host + "/nlu_server_start"
                    rasa_nlu_rep = requests.get(req_url2).json()
                    # qa服务
                    rasa_qa_data = {"timestr": time_str}
                    h = {"Content-Type": "application/json"}
                    req_url3 = req_host + "/qa_core_server_start"
                    rasa_qa_rep = requests.post(req_url3,
                                                data=json.dumps(rasa_qa_data),
                                                headers=h).json()
                    # task服务
                    rasa_qa_data = {"timestr": time_str}
                    h = {"Content-Type": "application/json"}
                    req_url4 = req_host + "/task_core_server_start"
                    rasa_task_rep = requests.post(
                        req_url4, data=json.dumps(rasa_qa_data),
                        headers=h).json()
                    # 动作服务
                    req_url5 = req_host + "/rasa_server_sdk"
                    rasa_action_rep = requests.get(req_url5).json()
                    if rasa_nlu_rep["code"] == 200 and rasa_qa_rep["code"] == 200 and rasa_task_rep["code"] == 200 and \
                            rasa_action_rep["code"] == 200:
                        # 新模型加载到内存
                        nlu_service_params1 = {
                            "q": "",
                            "project": "qa",
                            "model": "qa_" + time_str
                        }
                        requests.post(
                            nlu_service_url1,
                            data=json.dumps(nlu_service_params1)).json()
                        nlu_service_params2 = {
                            "q": "",
                            "project": "task",
                            "model": "task_" + time_str
                        }
                        requests.post(
                            nlu_service_url1,
                            data=json.dumps(nlu_service_params2)).json()
                        flag_server1_train = "n"
                        flag_timer = True
                        time.sleep(3600)
                        R2.clear()
                        flag_timer = False
                        return rep_body.rep_common(200, {})
    return rep_body.rep_common(201, {})
Exemplo n.º 12
0
def nlp_text_task():
    if request.method == 'POST':
        try:
            # 变量声明
            local_useType = "0"
            botType = 0
            core_service_url = ''
            global unexpect_count
            if request.form:
                print("form对象")
                params = request.form.to_dict()
                print(params)
                visitorId = params['visitorId']
                botId = int(params['botId'])
                useType = params['useType']
                local_useType = useType
            else:
                print("json对象")
                params = request.json
                visitorId = params['visitorId']
                botId = params['botId']
                useType = params['useType']
                local_useType = useType

            print(params)
            inputText = params['inputText']
            print(inputText)
            api_key = params['apiKey']
            # 验证参数
            if api_key == apiKey and botId == 1:
                # 获取最新的qa和task模型名称
                basePath = os.path.abspath(
                    os.path.dirname(os.path.dirname(__file__)))
                qa_path = basePath + slash + "now_models" + slash + "nlu" + slash + "qa"
                qa_model_num = 0
                for index, filename in enumerate(os.listdir(qa_path)):
                    if index != 0:
                        temp_filename_num = int(filename.replace("qa_", ""))
                        if temp_filename_num >= qa_model_num:
                            qa_model_num = temp_filename_num
                    else:
                        qa_model_num = int(filename.replace("qa_", ""))

                task_path = basePath + slash + "now_models" + slash + "nlu" + slash + "task"
                task_model_num = 0
                for index, filename in enumerate(os.listdir(task_path)):
                    if index != 0:
                        temp_filename_num = int(filename.replace("task_", ""))
                        if temp_filename_num >= task_model_num:
                            task_model_num = temp_filename_num
                    else:
                        task_model_num = int(filename.replace("task_", ""))

                # 注释:加入qa后取消注释
                qa_model_name = "qa_" + str(qa_model_num)
                nlu_service_params1 = {
                    "q": inputText,
                    "project": "qa",
                    "model": qa_model_name
                }
                # nlu_service_response1 = requests.post(nlu_service_url1, data=json.dumps(nlu_service_params1)).json()
                nlu_service_response1 = requests.post(
                    test_server_ip,
                    data=json.dumps(nlu_service_params1)).json()
                task_model_name = "task_" + str(task_model_num)
                nlu_service_params2 = {
                    "q": inputText,
                    "project": "task",
                    "model": task_model_name
                }
                # nlu_service_response2 = requests.post(nlu_service_url1, data=json.dumps(nlu_service_params2)).json()
                nlu_service_response2 = requests.post(
                    test_server_ip,
                    data=json.dumps(nlu_service_params2)).json()

                print('nlu_service_response1: %s' % nlu_service_response1)
                print('nlu_service_response2: %s' % nlu_service_response2)

                # 一问一答型和任务型机器人的置信度
                bot_confidence1 = nlu_service_response1["intent"]["confidence"]
                bot_confidence2 = nlu_service_response2["intent"]["confidence"]

                print("qa_intent %s" % nlu_service_response1["intent"])
                print('task_intent %s' % nlu_service_response2["intent"])
                if (bot_confidence1 > bot_confidence2 and
                    len(nlu_service_response1["entities"]) == 0 and
                    nlu_service_response2["intent"]['name'] == "task_askfor_again" and
                    local_useType == "2") or \
                        (bot_confidence1 < bot_confidence2 and
                         nlu_service_response2["intent"]['name'] == "task_askfor_again" and
                         local_useType == "2"):
                    askfor_data = {
                        "botId": botId,
                        "botType": 2,
                        "replyText": [],
                        "confidence": 0,
                        "notHeard": "T",
                        "unexpectCount": unexpect_count
                    }
                    return rep_body.rep_common(200, askfor_data)

                # 取一问一答的置信度
                # qa机器人置信度>task机器人置信度,且qa机器人的实体数量不为0
                elif bot_confidence1 > bot_confidence2 and \
                        len(nlu_service_response1["entities"]) != 0:
                    # core_service_url = qa_service_url
                    core_service_url = test_qa_core_ip
                    bot_confidence = bot_confidence1
                    botType = 1

                # 取任务型机器人的置信度
                # 1.qa机器人置信度>task机器人置信度,且qa机器人的实体数量为0,意图不为task_askfor_again
                # 2.qa机器人置信度<=task机器人置信度,且意图不为task_askfor_again
                elif (bot_confidence1 > bot_confidence2 and
                      len(nlu_service_response1["entities"]) == 0 and
                      nlu_service_response2["intent"]['name'] != "task_askfor_again") or \
                        (bot_confidence1 <= bot_confidence2 and
                         nlu_service_response2["intent"]['name'] != "task_askfor_again"):
                    # core_service_url = task_service_url
                    core_service_url = test_task_core_ip
                    bot_confidence = bot_confidence2
                    botType = 2

                # 如果测试窗出现识别出用户未听清的情况,按未识别处理
                elif (bot_confidence1 > bot_confidence2 and
                      len(nlu_service_response1["entities"]) == 0 and
                      nlu_service_response2["intent"]['name'] == "task_askfor_again" and
                      local_useType == "1") or \
                        (bot_confidence1 < bot_confidence2 and
                         nlu_service_response2["intent"]['name'] == "task_askfor_again" and
                         local_useType == "1"):
                    unexpect_count += 1
                    test_list = []
                    test_list.append(
                        random_reply_generate.unrecognized_reply())
                    askfor_data = {
                        "botId": botId,
                        "botType": botType,
                        "replyText": test_list,
                        "confidence": random_pro_generate.random_generate(),
                        "notHeard": "N",
                        "unexpectCount": unexpect_count
                    }
                    if unexpect_count == 2:
                        unexpect_count = 0
                    return rep_body.rep_common(200, askfor_data)

                # 2.把语句发送给合适的nlu对应的core服务
                core_service_params = {
                    "sender": visitorId,
                    "message": inputText
                }
                core_service_headers = {'content-type': 'application/json'}
                core_service_response = requests.post(
                    core_service_url,
                    data=json.dumps(core_service_params),
                    headers=core_service_headers).json()
                print(core_service_response)

                # 3.触发后备动作的记录
                # 遍历core_service_response列表,拼接字符串
                linklist = []
                if len(core_service_response) == 0:
                    unexpect_count += 1
                    # 返回体
                    dictData = {
                        "botId": botId,
                        "botType": botType,
                        "replyText":
                        random_reply_generate.unrecognized_reply(),
                        "confidence": random_pro_generate.random_generate(),
                        "notHeard": "N",
                        "unexpectCount": unexpect_count
                    }
                    if unexpect_count == 2:
                        unexpect_count = 0
                    return rep_body.rep_common(200, dictData)

                for j in core_service_response:
                    linklist.append(j["text"])

                replyText = linklist
                print(replyText)
                print(local_useType)
                # 测试窗
                if local_useType == "1":
                    if replyText[0] == '我还比较小' or replyText[0] == '':
                        unexpect_count += 1
                        bot_confidence = random_pro_generate.random_generate()
                        # 返回体
                        dictData = {
                            "botId": botId,
                            "botType": botType,
                            "replyText": [],
                            "confidence": bot_confidence,
                            "notHeard": "N",
                            "unexpectCount": unexpect_count
                        }
                        if unexpect_count == 2:
                            unexpect_count = 0
                        return rep_body.rep_common(200, dictData)
                    else:
                        # 返回体
                        dictData = {
                            "botId": botId,
                            "botType": botType,
                            "replyText": replyText,
                            "confidence": bot_confidence,
                            "notHeard": "N",
                            "unexpectCount": unexpect_count
                        }
                        return rep_body.rep_common(200, dictData)
                # ccone
                elif local_useType == "2":
                    if replyText[0] == '我还比较小' or replyText[0] == '':
                        unexpect_count += 1
                        replyText = []
                        replyText.append(
                            random_reply_generate.unrecognized_reply())
                        replyText.append(
                            random_reply_generate.morehelp_reply())
                        bot_confidence = random_pro_generate.random_generate()
                        # 返回体
                        dictData = {
                            "botId": botId,
                            "botType": botType,
                            "replyText": replyText,
                            "confidence": bot_confidence,
                            "notHeard": "F",
                            "unexpectCount": unexpect_count
                        }

                        if unexpect_count == 2:
                            unexpect_count = 0
                        return rep_body.rep_common(200, dictData)
                    else:
                        # 返回体
                        dictData = {
                            "botId": botId,
                            "botType": botType,
                            "replyText": replyText,
                            "confidence": bot_confidence,
                            "notHeard": "F",
                            "unexpectCount": unexpect_count
                        }
                        return rep_body.rep_common(200, dictData)
        except Exception as e:
            print(e)
        return rep_body.rep_common(201, {})
Exemplo n.º 13
0
def nlp_text_re():
    if request.method == 'POST':
        try:
            # 变量声明
            bot_type = 0
            core_service_url = ''
            global unexpect_count
            if request.form:
                # print("form对象")
                params = request.form.to_dict()
                # print("form对象的params:" % params)
                visitor_id = params['visitorId']
                bot_id = int(params['botId'])
                use_type = params['useType']
            else:
                # print("json对象")
                params = request.json
                # print("json对象的params:" % params)
                visitor_id = params['visitorId']
                bot_id = params['botId']
                use_type = params['useType']
            if type(visitor_id) == str:
                visitor_id = int(visitor_id.replace("voice", ""))
            # print("解析出的对象:" % params)
            input_text = params['inputText']
            # print("input_text" + input_text)
            api_key = params['apiKey']
            # 验证参数
            if api_key == apiKey and bot_id == 1:
                # 获取最新的qa和task模型名称
                base_path = os.path.abspath(
                    os.path.dirname(os.path.dirname(__file__)))
                task_path = base_path + slash + "now_models" + slash + "nlu" + slash + "task"
                task_model_num = 0
                for index, filename in enumerate(os.listdir(task_path)):
                    if index != 0:
                        temp_filename_num = int(filename.replace("task_", ""))
                        if temp_filename_num >= task_model_num:
                            task_model_num = temp_filename_num
                    else:
                        task_model_num = int(filename.replace("task_", ""))

                # AnyQ一问一答返回结果
                anyq_url = anyq_request_url + "?question=" + input_text
                # print("anyq_url:" + anyq_url)
                anyq_response = requests.get(anyq_url)
                if anyq_response.content:
                    anyq_service_response = anyq_response.json()
                    # global anyq_service_response # zhang 声明  anyq_service_response为全局变量 2019/10/23  10:00 添加
                    # print("anyq_service_response:" % anyq_service_response)

                task_model_name = "task_" + str(task_model_num)
                nlu_service_params2 = {
                    "q": input_text,
                    "project": "task",
                    "model": task_model_name
                }
                # print("nlu_request_url:" + nlu_request_url)
                # print("nlu_service_params2:" % nlu_service_params2)
                nlu_service_response2 = requests.post(
                    nlu_request_url,
                    data=json.dumps(nlu_service_params2)).json()

                # print('nlu_service_response2: %s' % nlu_service_response2)

                # 一问一答型机器人置信度
                if anyq_response.content:

                    if len(anyq_service_response) != 0:
                        bot_confidence1 = anyq_service_response[0][
                            "confidence"]
                    else:
                        bot_confidence1 = 0
                else:
                    bot_confidence1 = 0
                # 任务型机器人置信度
                if any(nlu_service_response2):
                    bot_confidence2 = nlu_service_response2["intent"][
                        "confidence"]
                else:
                    return rep_body.rep_common(201, {})

                # # print("qa_intent %s" % nlu_service_response1["intent"])
                # print('task_intent %s' % nlu_service_response2["intent"])

                # 一问一答型机器人回复
                # round 返回浮点数的后几位  ps: round(3.1415,2) ---> 3.14
                if (bot_confidence1 > bot_confidence2) or (round(
                        bot_confidence1, 1) == round(
                            bot_confidence2, 1)) and anyq_response.content:
                    anyq_answer = anyq_service_response[0]["answer"]
                    # print("ananyq_answer:" % anyq_answer)
                    bot_type = 1
                    askfor_data = {
                        "botId": bot_id,
                        "botType": bot_type,
                        "replyText": [anyq_answer],
                        "confidence": bot_confidence1,
                        "notHeard": "N",
                        "unexpectCount": unexpect_count
                    }
                    return rep_body.rep_common(200, askfor_data)

                # 任务型机器人回复
                else:
                    bot_type = 2
                    # 测试窗
                    if use_type == "1":
                        not_heard = "N"
                        # 1.排除测试窗的外呼意图、转人工意图和用户听不清楚的影响
                        if (nlu_service_response2['intent']['name']
                                == 'task_kuandai_huifang'
                                or nlu_service_response2['intent']['name']
                                == 'task_to_people'
                                or nlu_service_response2['intent']['name']
                                == 'task_askfor_again'):
                            unexpect_count += 1
                            reply_text = []
                            reply_text.append(
                                random_reply_generate.unrecognized_reply())
                            reply_text.append(
                                random_reply_generate.morehelp_reply())
                            bot_confidence = random_pro_generate.random_generate(
                            )
                            # 返回体
                            dict_data = {
                                "botId": bot_id,
                                "botType": use_type,
                                "replyText": reply_text,
                                "confidence": bot_confidence,
                                "notHeard": not_heard,
                                "unexpectCount": unexpect_count
                            }
                            if unexpect_count == 2:
                                unexpect_count = 0
                            return rep_body.rep_common(200, dict_data)
                        # 2.测试窗正常处理
                        else:
                            core_service_url = task_request_url
                            bot_confidence = bot_confidence2
                            # 请求task的core服务
                            core_service_params = {
                                "sender": visitor_id,
                                "message": input_text
                            }
                            # print("测试窗的core_service_params:" % core_service_params)
                            core_service_headers = {
                                'content-type': 'application/json'
                            }
                            core_service_response = requests.post(
                                core_service_url,
                                data=json.dumps(core_service_params),
                                headers=core_service_headers).json()
                            # print("core_service_response" % core_service_response)
                            # 机器人响应的处理
                            linklist = []
                            if len(core_service_response) == 0:
                                unexpect_count += 1
                                # 返回体
                                dict_data = {
                                    "botId":
                                    bot_id,
                                    "botType":
                                    bot_type,
                                    "replyText":
                                    random_reply_generate.unrecognized_reply(),
                                    "confidence":
                                    random_pro_generate.random_generate(),
                                    "notHeard":
                                    not_heard,
                                    "unexpectCount":
                                    unexpect_count
                                }
                                if unexpect_count == 2:
                                    unexpect_count = 0
                                return rep_body.rep_common(200, dict_data)

                            for j in core_service_response:
                                linklist.append(j["text"])

                            reply_text = linklist
                            # print("测试窗的reply_text:" % reply_text)
                            if reply_text[0] == '我还比较小' or reply_text[0] == '':
                                unexpect_count += 1
                                reply_text = []
                                reply_text.append(
                                    random_reply_generate.unrecognized_reply())
                                reply_text.append(
                                    random_reply_generate.morehelp_reply())
                                bot_confidence = random_pro_generate.random_generate(
                                )
                                # 返回体
                                dict_data = {
                                    "botId": bot_id,
                                    "botType": bot_type,
                                    "replyText": reply_text,
                                    "confidence": bot_confidence,
                                    "notHeard": not_heard,
                                    "unexpectCount": unexpect_count
                                }
                                if unexpect_count == 2:
                                    unexpect_count = 0
                                return rep_body.rep_common(200, dict_data)
                            else:
                                # 返回体
                                dict_data = {
                                    "botId": bot_id,
                                    "botType": bot_type,
                                    "replyText": reply_text,
                                    "confidence": bot_confidence,
                                    "notHeard": not_heard,
                                    "unexpectCount": unexpect_count
                                }
                                return rep_body.rep_common(200, dict_data)
                    # 语音
                    elif use_type == "2":
                        not_heard = "F"
                        # 1.处理转人工意图
                        if nlu_service_response2['intent'][
                                'name'] == 'task_to_people':
                            reply_text = []
                            not_heard = "P"
                            reply_text.append(
                                random_reply_generate.morehelp_reply())
                            bot_confidence = nlu_service_response2['intent'][
                                'confidence']
                            # 返回体
                            dict_data = {
                                "botId": bot_id,
                                "botType": use_type,
                                "replyText": reply_text,
                                "confidence": bot_confidence,
                                "notHeard": not_heard,
                                "unexpectCount": unexpect_count
                            }
                            return rep_body.rep_common(200, dict_data)

                        # 2.语音出现用户未听清的情况
                        elif nlu_service_response2["intent"][
                                'name'] == "task_askfor_again":
                            not_heard = "T"
                            # 语音出现用户未听清的情况
                            askfor_data = {
                                "botId": bot_id,
                                "botType": bot_type,
                                "replyText": [],
                                "confidence": 0,
                                "notHeard": not_heard,
                                "unexpectCount": unexpect_count
                            }
                            return rep_body.rep_common(200, askfor_data)

                        # 3.用户听清机器人的回复
                        elif nlu_service_response2["intent"][
                                'name'] != "task_askfor_again":
                            # 处理用户的再见意图
                            if nlu_service_response2['intent'][
                                    'name'] == 'goodbye':
                                not_heard = "G"
                            core_service_url = task_request_url
                            bot_confidence = bot_confidence2
                            # 请求task的core服务
                            core_service_params = {
                                "sender": visitor_id,
                                "message": input_text
                            }
                            # print("语音的core_service_params:" % core_service_params)
                            core_service_headers = {
                                'content-type': 'application/json'
                            }
                            core_service_response = requests.post(
                                core_service_url,
                                data=json.dumps(core_service_params),
                                headers=core_service_headers).json()
                            # print("语音的core_service_response:" % core_service_response)

                            # 机器人响应的处理
                            linklist = []
                            if len(core_service_response) == 0:
                                unexpect_count += 1
                                # 返回体
                                dict_data = {
                                    "botId":
                                    bot_id,
                                    "botType":
                                    bot_type,
                                    "replyText":
                                    random_reply_generate.unrecognized_reply(),
                                    "confidence":
                                    random_pro_generate.random_generate(),
                                    "notHeard":
                                    not_heard,
                                    "unexpectCount":
                                    unexpect_count
                                }
                                if unexpect_count == 2:
                                    unexpect_count = 0
                                return rep_body.rep_common(200, dict_data)

                            for j in core_service_response:
                                linklist.append(j["text"])

                            reply_text = linklist
                            # print("语音的reply_text:" % reply_text)
                            if reply_text[0] == '我还比较小' or reply_text[0] == '':
                                unexpect_count += 1
                                reply_text = []
                                reply_text.append(
                                    random_reply_generate.unrecognized_reply())
                                reply_text.append(
                                    random_reply_generate.morehelp_reply())
                                bot_confidence = random_pro_generate.random_generate(
                                )
                                # 返回体
                                dict_data = {
                                    "botId": bot_id,
                                    "botType": bot_type,
                                    "replyText": reply_text,
                                    "confidence": bot_confidence,
                                    "notHeard": not_heard,
                                    "unexpectCount": unexpect_count
                                }

                                if unexpect_count == 2:
                                    unexpect_count = 0
                                return rep_body.rep_common(200, dict_data)
                            else:
                                # 返回体
                                dict_data = {
                                    "botId": bot_id,
                                    "botType": bot_type,
                                    "replyText": reply_text,
                                    "confidence": bot_confidence,
                                    "notHeard": not_heard,
                                    "unexpectCount": unexpect_count
                                }
                                return rep_body.rep_common(200, dict_data)
        except Exception as e:
            # print(type(e))
            # print(e)
            # print('错误')
            return rep_body.rep_common(201, {"err_msg": str(e)})