##### confusion matrix #####
    confmat = confusion_matrix(y_true=test, y_pred=pre)
    print(confmat)
    ##### save csv #####
    pre_dict = {'Recording': [], 'Result': []}
    test_dict = {'Recording': [], 'First_label': []}
    count = 0
    for i in range(len(pre)):
        pre_dict['Recording'].append(count)
        pre_dict['Result'].append(pre[i])

        test_dict['Recording'].append(count)
        test_dict['First_label'].append(test[i])
        count += 1

    pre = pd.DataFrame(pre_dict)
    test = pd.DataFrame(test_dict)
    test['Second_label'] = ''
    test['Third_label'] = ''
    pre.to_csv('./Result/1.csv', index=False)
    test.to_csv('./Result/2.csv', index=False)
    score_py3.score('./Result/1.csv', './Result/2.csv')
    ##### save process #####
    log = pd.DataFrame([Accuracy, F1], index=['Accuracy', 'F1'])
    log.to_csv('./Result/log.csv')
    ##### save figure #####
    if Args.show_plot:
        plt.ioff()
        plt.savefig("./Result/result.jpg")
        plt.show()
Exemple #2
0
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data,
                  '| train accuray: %.2f' % accuracy_train,
                  '| test accuracy: %.2f' % accuracy_test)
print('End Training')
pre = pred_test.data.cpu().numpy() + 1
# pre = np.array(pred_test) + 1
test = test_y.data.cpu().numpy() + 1
# test = np.array(test_y.data) + 1

pre_dict = {'Recording': [], 'Result': []}
test_dict = {'Recording': [], 'First_label': []}

count = 0
for i in range(len(pre)):
    pre_dict['Recording'].append(count)
    pre_dict['Result'].append(pre[i])

    test_dict['Recording'].append(count)
    test_dict['First_label'].append(test[i])
    count += 1

pre = pd.DataFrame(pre_dict)
test = pd.DataFrame(test_dict)
# %%
test['Second_label'] = ''
test['Third_label'] = ''
# %%
pre.to_csv('1.csv', index=False)
test.to_csv('2.csv', index=False)
score('1.csv', '2.csv')
    model = load_model("best_model.109-0.87.h5")
    pre_lists = pd.read_csv(path_test_ref)
    print(pre_lists.head())
    pre_lists = np.array(pre_lists)
    pre_datas = np.array([get_feature(item, path_test) for item in pre_lists[:,0]])

    #for Sequential model
    #pre_result = model.predict_classes(pre_datas)

    #for other model
    pre_r = model.predict(pre_datas)
    pre_result = np.argmax(model.predict(pre_datas), axis=1)

    print(pre_result.shape)
    result_label = [x+1 for x in pre_result]

    df1 = np.array([pre_lists[:,0]]).T
    df2 = np.array([result_label]).T
    df = np.hstack((df1, df2))

    header = np.array(['Recording', 'Result'])
    answer = np.vstack((header,df))

    dataframe = pd.DataFrame(answer)
    dataframe.to_csv(path_test_answer, header=False)

    print("predict finish")

    #Performance Evaluation
    score(path_test_answer, path_test_ref)