Exemple #1
0
def predict():
    #a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r):
    #获取url传来的需要进行预测的数据
    result_k = []
    result_line = []
    data = [[
        0, 1.57, 0, 0, 1.58, 0, 0, 99.92, 0, 0, 99.92, 0, 477.79, 348.23, 6.24,
        0, 12.4545, -5.2992
    ],
            [
                0, 1.23, 0, 0, 1.31, 0, 0, 99.92, 0, 0, 99.92, 0, 475.82,
                348.45, 6.19, 0, -0.3895, 0.576
            ],
            [
                60.57, 60.49, 60.5, 60.5, 60.49, 60.49, 99.9, 99.86, 99.9,
                99.9, 99.86, 99.9, 453.07, 351.83, 6.07, 111.4, -1.254, 1.536
            ]]
    #for l in range(3):
    l = random.randint(0, 2)
    line = data[l]
    #line=[a,b,c,d]
    lines = range(3)
    lines = [float(i) for i in line]
    line_train = []
    line_train.append(lines[-2])
    line_train.append(lines[-1])
    #因为我们全量的数据是31列,所以我们要在数据后面增加一个元素
    line_train.append(0)
    COLUMNS = ['29', '30', '31']

    #将数组转换成dataframe[[1,2,3]]

    lines = pd.DataFrame([line_train], columns=COLUMNS)

    #predict_result=train.predict(lines)
    predict_result = execute.predict(sess, lines, model)
    if predict_result[0] == 0:
        k = "正常"
        key = 0
    else:
        k = "有漏油"
        key = 1
    result_k.append(k)
    result_line.append(line)

    return render_template('predict.html',
                           result_k=result_k,
                           result_line=result_line,
                           key=key)
Exemple #2
0
def cnn_predict():
    file = g_config["dataset_path"] + "batches.meta"
    patch_bin_file = open(file, "rb")
    label_names_dict = pickle.load(patch_bin_file)["label_names"]
    global secure_filename
    img = Image.open(os.path.join(app.root_path, secure_filename))
    r, g, b = img.split()
    r_arr = np.array(r)
    g_arr = np.array(g)
    b_arr = np.array(b)
    image = img.reshape([1, 32, 32, 3]) / 255
    predicted_class = execute.predict(image)
    return flask.render_template(
        template_name_or_list="prediction_result.html",
        predicted_class=predicted_class)
def cnn_predict():
    global secure_filename
    # 获取每个图像类别的名称
    filename = config['dataset_path'] + 'batches.meta'
    fp = open(filename, 'rb')
    label_name_dict = pickle.load(fp)['label_names']
    # 读取用户上传的图片
    img = Image.open(os.path.join(app.root_path, secure_filename))
    r, g, b = img.split()
    r_arr = np.array(r)
    g_arr = np.array(g)
    b_arr = np.array(b)
    image = np.concatenate((r_arr, g_arr, b_arr)).reshape((1, 32, 32, 3))/255
    predicted_class = label_name_dict[execute.predict(image)[0]]
    # 将返回的结果用页面渲染出来
    return flask.render_template('prediction_result.html', predicted_class=predicted_class)
Exemple #4
0
def reply():
    #从请求中获取参数信息
    req_msg = request.form['msg']
    #将语句使用结巴分词进行分词
    req_msg = " ".join(jieba.cut(req_msg))
    #调用decode_line对生成回答信息
    res_msg = execute.predict(req_msg)
    #将unk值的词用微笑符号袋贴
    res_msg = res_msg.replace('_UNK', '^_^')
    res_msg = res_msg.strip()

    # 如果接受到的内容为空,则给出相应的回复
    if res_msg == ' ':
        res_msg = '请与我聊聊天吧'

    return jsonify({'text': res_msg})
Exemple #5
0
def CNN_predict():
    global secure_filename

    img = Image.open(
        os.path.join(app.root_path, 'predict_img/' + secure_filename))

    img = img.convert("RGB")

    r, g, b = img.split()
    r_arr = np.array(r)
    g_arr = np.array(g)
    b_arr = np.array(b)
    img = np.concatenate((r_arr, g_arr, b_arr))

    image = img.reshape([1, 32, 32, 3]) / 255

    predicted_class = execute.predict(image)
    print(predicted_class)

    return flask.render_template(
        template_name_or_list="prediction_result.html",
        predicted_class=predicted_class)
Exemple #6
0
def CNN_predict():
    global secure_filename
    #使用PIL中 的Image打开文件并获取图像文件中的信息
    img = Image.open(
        os.path.join(app.root_path, 'predict_img/' + secure_filename))
    img = img.resize([32, 32])
    #将图像文件的格式转换为RGB
    img = img.convert("RGB")
    #分别获取r,g,b三元组的像素数据并进行拼接
    r, g, b = img.split()
    r_arr = np.array(r)
    g_arr = np.array(g)
    b_arr = np.array(b)
    img = np.concatenate((r_arr, g_arr, b_arr))
    #将拼接得到的数据按照模型输入维度需要转换为(32,32,3),并对数据进行归一化
    image = img.reshape([1, 32, 32, 3]) / 255
    #调用execute中的predict方法进行预测
    predicted_class = execute.predict(image)
    print(predicted_class)
    #将预测结果返回并使用模板进行页面渲染
    return flask.render_template(
        template_name_or_list="prediction_result.html",
        predicted_class=predicted_class)
def CNN_predict():
    file = gConfig['dataset_path'] + 'batchs.meta'
    patch_bin_file = open(file ,"rb")
    label_name_dict = pickle.load(patch_bin_file)['label_names']

    global secure_filename 
    img = Image.open(os.path.join(app.root_path,secure_filename))

    r,g,b = img.split()

    img=np.concatenate((
        np.array(r),
        np.array(g),
        np.array(b)
        ))

    image = img.reshape([1,32,32,3])/255

    predicted_class = execute.predict(image)

    return flask.render_template(
            template_name_or_list='prediction_result.html',
            predicted_class = predicted_class)
Exemple #8
0
def CNN_predict():
    #获取图片分类名称存放路径
    file = getConfig['dataset_path'] + 'batches.meta'

    #读取图片分类名称,并保存到一个字典中
    patch_bin_file = open(file, 'rb')

    label_names_dict = pickle.load(patch_bin_file)['label_names']

    #全局声明一个文件名
    global secure_filename

    #从本地目录中读取需要分类的图片
    img = Image.open(os.path.join(app.root_path, secure_filenname))

    #将读取的像素格式转换为RGB,并分别获取RGB通道对应的像素数据
    r, g, b = img.split()

    #分别将获取的像素数据放入数组中
    r_arr = np.array(r)
    g_arr = np.array(g)
    b_arr = np.array(b)

    #将三个数组进行拼接
    img = np.concatenate((r_arr, g_arr, b_arr))

    #对拼接狗的数据进行维度变换和归一化处理
    image = img.reshape([1, 32, 32, 3]) / 255

    #调用执行器execute的predict函数对图像数据进行预测
    predicted_class = execute.predict(image)

    #将返回结果用页面模版渲染出来
    return flask.render_template(
        template_name_or_list='prediction_result.html',
        predicted_class=predicted_class)
Exemple #9
0
    while count < 1000:
        # 读取数据集
        f.readline()  # 读取E无效信息
        question = f.readline()  # 读取问题
        question = question[2:]  # 去掉问题的前缀
        _answer = f.readline()   # 读取参考答案
        _answer = _answer[2:]    # 去前缀

        # 分词
        question_fenci = ' '.join(jieba.cut(question))
        _answer_fenci = ' '.join(jieba.cut(_answer))

        # 与机器人聊天
        print('--------------------------------')
        print('question_fenci: ' + str(question_fenci))
        answer = execute.predict(question_fenci)

        # 答案分词
        answer_fenci = ' '.join(jieba.cut(answer))
        print('_answer_fenci: ' + str(_answer_fenci))
        print('answer_fenci: ' + str(answer_fenci))

        # 计算BLEU
        reference.append(_answer_fenci.split())
        candidate = (answer_fenci.split())
        score1 = sentence_bleu(reference, candidate, weights=(1, 0, 0, 0))
        score2 = sentence_bleu(reference, candidate, weights=(0.5, 0.5, 0, 0))
        score3 = sentence_bleu(reference, candidate, weights=(0.33, 0.33, 0.33, 0))
        score4 = sentence_bleu(reference, candidate, weights=(0.25, 0.25, 0.25, 0.25))
        reference.clear()
        print('Cumulate 1-gram :%f' % score1)