Beispiel #1
0
    print('3D ResNeXt Test time:', toc7 - tic7)

    print('3D ResNeXt Test score:', loss_and_metrics[0])
    print('3D ResNeXt Test accuracy:', loss_and_metrics[1])

    pred_test = model_ResNeXt.predict(
        x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                       x_test.shape[3], 1)).argmax(axis=1)

    collections.Counter(pred_test)

    gt_test = gt[test_indices] - 1

    overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
    confusion_matrix = metrics.confusion_matrix(pred_test, gt_test[:-VAL_SIZE])
    each_acc, average_acc = averageAccuracy.AA_andEachClassAccuracy(
        confusion_matrix)
    kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])
    KAPPA_3D_ResNeXt.append(kappa)
    OA_3D_ResNeXt.append(overall_acc)
    AA_3D_ResNeXt.append(average_acc)
    TRAINING_TIME_3D_ResNeXt.append(toc6 - tic6)
    TESTING_TIME_3D_ResNeXt.append(toc7 - tic7)
    ELEMENT_ACC_3D_ResNeXt[index_iter, :] = each_acc

    print("3D ResNeXt finished.")
    print("# %d Iteration" % (index_iter + 1))

# save records
modelStatsRecord.outputStats(
    KAPPA_3D_ResNeXt, OA_3D_ResNeXt, AA_3D_ResNeXt, ELEMENT_ACC_3D_ResNeXt,
    TRAINING_TIME_3D_ResNeXt, TESTING_TIME_3D_ResNeXt, history_3d_ResNeXt,
Beispiel #2
0
        # tic7 = time.clock()
        # model.save("models/houston.h5")

        model.load_weights('ckpt/houston.h5')
        pred_test = model.predict([x_test, x_test1]).argmax(axis=1)
        toc7 = time.clock()

        collections.Counter(pred_test)
        # gt_test = gt[test_indices] - 1
        gt_test = gt1[test_indices] - 1
        overall_acc_mss = metrics.accuracy_score(pred_test,
                                                 gt_test[:-VAL_SIZE])
        oa_all[0][num] = overall_acc_mss
        confusion_matrix_mss = metrics.confusion_matrix(
            pred_test, gt_test[:-VAL_SIZE])
        each_acc_mss, average_acc_mss = averageAccuracy.AA_andEachClassAccuracy(
            confusion_matrix_mss)
        aa_all[0][num] = average_acc_mss
        kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])
        kappa_all[0][num] = kappa

    KAPPA_RES_SS4.append(kappa)
    OA_RES_SS4.append(overall_acc_mss)
    AA_RES_SS4.append(average_acc_mss)

    print("training finished.")
    # print('Training Time: ', toc6 - tic6)
    # print('Test time:', toc7 - tic7)
    print("# %d Iteration" % (index_iter + 1))
    print('each_acc', each_acc_mss)
    print("oa", overall_acc_mss)
    print("aa", average_acc_mss)
Beispiel #3
0
model.save_weights('model_weigh/test3_best.h5')

print("^-^-------------testing-------------------^-^")
model.load_weights('ckpt/best.h5')
tic7 = time.clock()
pred_test = model.predict([x_test, x_test1]).argmax(axis=1)
toc7 = time.clock()

collections.Counter(pred_test)
gt_test = gt[test_indices] - 1
gt_train = gt[train_indices] - 1
overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])

confusion_matrix_res4 = metrics.confusion_matrix(pred_test,
                                                 gt_test[:-VAL_SIZE])
each_acc_res4, average_acc_res4 = averageAccuracy.AA_andEachClassAccuracy(
    confusion_matrix_res4)

kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])

collections.Counter(pred_test)
gt_test = gt[test_indices] - 1
gt_train = gt[train_indices] - 1

overaccy = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
confusion_matrix_mss = metrics.confusion_matrix(gt_test[:-VAL_SIZE], pred_test)
print(confusion_matrix_mss)

average_acc = averageAccuracy.AA_andEachClassAccuracy(confusion_matrix_mss)
kappa_value = Kappa.kappa(confusion_matrix_mss)

print("training finished.")
Beispiel #4
0
    print('3D CONV Training Time: ', toc2 - tic2)
    print('3D CONV Test time:', toc3 - tic3)

    print('3D CONV Test score:', loss_and_metrics[0])
    print('3D CONV Test accuracy:', loss_and_metrics[1])

    print(history_conv.history.keys())

    # 预测
    pred_test_conv = model.predict(x_test).argmax(axis=1)
    collections.Counter(pred_test_conv)

    gt_test = gt[test_indices] - 1
    overall_acc_conv = metrics.accuracy_score(pred_test_conv, gt_test)
    confusion_matrix_conv = metrics.confusion_matrix(pred_test_conv, gt_test)
    each_acc_conv, average_acc_conv = averageAccuracy.AA_andEachClassAccuracy(
        confusion_matrix_conv)
    kappa = metrics.cohen_kappa_score(pred_test_conv, gt_test)
    KAPPA_CONV.append(kappa)
    OA_CONV.append(overall_acc_conv)
    AA_CONV.append(average_acc_conv)
    TRAINING_TIME_CONV.append(toc2 - tic2)
    TESTING_TIME_CONV.append(toc3 - tic3)
    ELEMENT_ACC_CONV[index_iter, :] = each_acc_conv

    print("Overall Accuracy:", overall_acc_conv)
    print("Confusion matrix:", confusion_matrix_conv)
    print("Average Accuracy:", average_acc_conv)

    print("Each Class Accuracies are listed as follows:")
    for idx, acc in enumerate(each_acc_conv):
        print("Class %d : %.3e" % (idx + 1, acc))