Beispiel #1
0
def __scores(clf, testset):
    """
    """
    accuracy_ = accuracy(clf, testset)
    precision_, recall_ = precision_recall(clf, testset)
    f1score_ = f1score(precision_, recall_)
    return accuracy_, precision_, recall_, f1score_
def draw_f1(input_file, screenlog_file):
    """
    Draw F1 curve based on information logged to screen.
    
    Args:
        input_file -- str, path to input file
        screenlog_file -- str, path to logged screen file
        
    Returns: None
    """
    size_list = []
    # get network from file
    with open(screenlog_file, "r") as source:
        get_size = False
        for line in source:
            if get_size:
                size_list.append(int(line.strip()))
                get_size = False
            elif line.startswith("Saving network checkpoints to "):
                network_name = line.strip().rsplit("/", 1)[1]
                print(network_name)
            elif line.startswith("Training loss"):
                get_size = True

    # get validation measure
    confusion_lists = parse_txt_confusion(input_file)
    c_lists = []
    # correct for adding the numbers each round
    for i in reversed(range(1, len(confusion_lists))):
        clist = [
            confusion_lists[i][c] - confusion_lists[i - 1][c]
            for c in range(len(confusion_lists[i]))
        ]
        c_lists.append(clist)
    c_lists.append(confusion_lists[0])
    confusion_lists = list(reversed(c_lists))

    precision_recall_lists = [
        metrics.precision_recall(cl[0], cl[1], cl[3]) for cl in confusion_lists
    ]
    n_list = [cl[0] + cl[3] for cl in confusion_lists]
    N_list = [sum(cl) for cl in confusion_lists]
    f1_list = [
        metrics.weighted_f1(precision_recall_lists[prl][0],
                            precision_recall_lists[prl][1], n_list[prl],
                            N_list[prl])
        for prl in range(len(precision_recall_lists))
    ]

    # draw plot
    draw_F1_plot(f1_list, size_list, network_name)

    print("Finished drawing plot.")
def main():
    train_path = sys.argv[1] + '\\train\\'
    test_path = sys.argv[1] + '\\test\\'

    # load training data
    print(f'[INFO] - Loading training data from {train_path}')
    res = read_data(train_path)
    train_data = res[0]
    train_target = res[1]
    print(f'[INFO] - Total train data: {len(train_data)}')

    print(f'[INFO] - Loading testing data from {test_path}')
    res = read_data(test_path)
    test_data = res[0]
    test_target = res[1]
    print(f'[INFO] - Total test data: {len(test_data)}')

    # 10% of training data will go to developer data set
    print(f'[INFO] - Splitting training data into training data and developer data (keeping 10% for training data)')
    res = train_test_split(train_data, train_target, test_size=0.1)
    train_data = res[0]
    train_target = res[2]
    print(f'[INFO] - Total training data after split {len(train_data)}')
    dev_data = res[1]
    dev_target = res[3]
    print(f'[INFO] - Total developer data {len(dev_data)}')

    rf = RandomForest(100, 10)

    accuracy_train = []
    accuracy_test = []

    counter = 1
    for train_size in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
        print(f'\n[INFO] - Iteration No.{counter} (using {int(train_size*100)}% of 90% of train data).')

        if train_size != 1.0:
            res = train_test_split(train_data, train_target, train_size=train_size, shuffle=False)
            fold_data = res[0]
            fold_target = res[2]
        else:
            fold_data = train_data
            fold_target = train_target

        feature_size = 100
        vocabulary = frequent_features(train_data, feature_size)
        print(f'[INFO] - Fitting Random forest classifier using', feature_size, ' features...')
        rf.fit(fold_data, fold_target, vocabulary)

        print(f'[INFO] - Predicting with Random Forest classifier using train data...')
        rf_targets, _ = rf.predict(fold_data, vocabulary)
        accuracy_score = accuracy(fold_target, rf_targets)
        accuracy_train.append(accuracy_score)
        print(f'[INFO] - Accuracy: {accuracy_score}')

        print(f'[INFO] - Predicting with Random Forest classifier using developer data...')
        rf_targets, _ = rf.predict(dev_data, vocabulary)
        accuracy_score = accuracy(dev_target, rf_targets)
        print(f'[INFO] - Accuracy: {accuracy_score}')

        print(f'[INFO] - Predicting with Random Forest classifier using test data...')
        rf_targets, probabilities = rf.predict(test_data, vocabulary)
        accuracy_score = accuracy(test_target, rf_targets)
        accuracy_test.append(accuracy_score)
        print(f'[INFO] - Accuracy: {accuracy_score}')

        counter += 1

    learning_curves_plot = plt.figure(1)
    plt.plot([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], accuracy_train, label='train')
    plt.plot([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], accuracy_test, label='test')
    plt.title('Learning Curves (Multinomial Naive Bayes)')
    plt.legend(loc='lower right')
    plt.xlabel('Number of Train Data')
    plt.ylabel('Accuracy')

    precision_recall_plot = plt.figure(2)
    average_precision, average_recall, thresholds = precision_recall(probabilities, test_target, 10)
    plt.step(average_recall, average_precision, where='post')
    plt.title('Precision-Recall Curve (Multinomial Naive Bayes)')
    plt.xlabel('Recall')
    plt.ylabel('Precision')

    f1_plot = plt.figure(3)
    f1_score = f1(average_precision, average_recall)
    plt.plot(thresholds, f1_score)
    plt.title('F1 Curve (Multinomial Naive Bayes)')
    plt.xlabel('Thresholds')
    plt.ylabel('F1 Measure')

    plt.show()
Beispiel #4
0
def auc(modelpath, idxlist, datasetpath, for_all=False):
    model = torch.load(modelpath).cuda().eval()

    dataset = dataset_class.Loading_dataset(idxlist, datasetpath, False, False)
    my_dataset_loader = torch.utils.data.DataLoader(dataset=dataset,
                                                    batch_size=1,
                                                    shuffle=False,
                                                    num_workers=1)

    eth = 0.5
    print("testing..")
    precisions = [0.0]
    recalls = [1.0]
    while (eth < 0.95):
        idfix = 0
        ious = 0
        kappas = 0
        accs = 0
        tps = 0
        fps = 0
        precs = 0
        recs = 0
        for sample in my_dataset_loader:
            pts_tensor = sample['pts'].float().permute(0, 3, 1, 2)
            normals_tensor = sample['normals'].float().permute(0, 3, 1, 2)
            colors_tensor = sample['colors'].float().permute(0, 3, 1, 2)
            target_tensor = sample['gt']
            input_tensor = torch.cat([pts_tensor, normals_tensor], 1).cuda()
            # input_tensor = torch.cat([pts_tensor,normals_tensor,colors_tensor],1).cuda()
            # input_tensor  = colors_tensor.cuda()
            # input_tensor = pts_tensor.cuda()
            target_tensor = target_tensor.to(device="cuda", dtype=torch.long)

            with torch.no_grad():
                out = model(input_tensor)
            softmax_mx = out.detach().permute(0, 2, 3, 1).cpu().numpy()[0]

            softmax_mx[softmax_mx[:, :, 1] < eth] = 0.0
            softmax_mx[softmax_mx[:, :, 1] != 0.0] = 1.0

            pts = pts_tensor.detach().permute(0, 2, 3, 1).numpy()[0]
            pts2 = pts.reshape((pts.shape[0] * pts.shape[1], 3))
            idxs = np.any(pts2 != [0.0, 0.0, 0.0], axis=-1)

            pred = softmax_mx[:, :, 1]
            gta = target_tensor.cpu().numpy()[0]
            pred = pred.reshape((pred.shape[0] * pred.shape[1]))
            gta = gta.reshape((gta.shape[0] * gta.shape[1]))
            pred = pred[idxs]
            gta = gta[idxs]

            union = np.logical_or(pred, gta)
            intersection = np.logical_and(pred, gta)
            iou = intersection.sum() / union.sum()

            ious = ious + iou
            kappa = metrics.Kappa_cohen(pred, gta)
            acc = metrics.Accuracy(pred, gta)
            something = metrics.precision_recall(pred, gta)
            tps = something[2] / (something[2] + something[5]) + tps
            fps = something[3] / (something[4] + something[3]) + fps
            precs = precs + something[0]
            recs = recs + something[1]
            kappas = kappas + kappa
            accs = accs + acc
            idfix = idfix + 1

        precisions.append(precs / idfix)
        recalls.append(recs / idfix)
        print(accs / idfix, kappas / idfix, ious / idfix)
        eth = eth + 0.05
    precisions.append(1.0)
    recalls.append(0.0)
    precisions = np.array(precisions)
    recalls = np.array(recalls)
    print(precisions, recalls)
    print(m.auc(recalls, precisions))
    print("TPS", tps / idfix)
    print("FPS", fps / idfix)
    print("MIoU", ious / idfix)
    print("kappas", kappas / idfix)
    print("accuracy", accs / idfix)
    print("precision", precs / idfix)
    print("recall", recs / idfix)