示例#1
0
def test(model, train, validation, test_data):

    loss, accuracy = model.evaluate_generator(validation, steps=math.ceil(validation.n/config.BATCH_SIZE))
    train_loss, train_accuracy = model.evaluate_generator(train, steps=math.ceil(train.n/config.BATCH_SIZE))

    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_loss = 0
    test_accuracy = 0
    test_probabilities = []
    test_labels = []
    if len(test_data.classes) != 0:
        test_loss, test_accuracy = model.evaluate_generator(test_data, steps=math.ceil(test_data.n/config.BATCH_SIZE))
        test_results = evaluate.get_results(model, test_data)
        test_probabilities = list(evaluate.transform_binary_probabilities(test_results))
        test_labels = list(evaluate.get_labels(test_data))

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
    }
示例#2
0
def predict(df):
    basepath = '/media/user1/preprocessed'
    features = []
    truth_label = []
    df.sort()
    print(df)
    label = np.zeros((1, ))
    for name in df:
        print(name)
        label[0] = 0
        vector = np.zeros(800)
        if 'COR' in name or 'SUB' in name:
            label[0] = 1

        labels_df = pd.read_csv('./Book1.csv')
        path = labels_df['ID'].tolist()

        if name in path:
            label[0] = 1

        basedir = os.path.normpath(basepath)
        print(basedir)
        files = glob(basedir + '/' + name + '*.npy')
        files.sort()
        l = len(files)

        if l == 0:
            break

        for i in range(l):
            max_pro = 0
            img = np.load(files[i])
            s = img.shape

            if s[0] != 224:
                img = misc.imresize(img, (224, 224), 'bilinear')
            img = np.stack((img, img, img), axis=2)
            img = img[np.newaxis, :, :, :]

            test_generator = Dataset(
                img,
                label,
                augment=False,
                shuffle=False,
                input_form='t1',
                seed=seed,
            )

            test_generator.reset()
            test_results = evaluate.get_results(model, test_generator)
            probabilities = list(
                evaluate.transform_binary_probabilities(test_results))
            vector[i] = probabilities[0]

        features.append(vector)
        truth_label.append(label[0])
    return features, truth_label
示例#3
0
def test_model(model, train, validation, test):

    loss, accuracy = model.evaluate_generator(
        validation, steps=math.ceil(len(validation) / config.BATCH_SIZE))
    train_loss, train_accuracy = model.evaluate_generator(
        train, steps=math.ceil(len(train) / config.BATCH_SIZE))
    test_loss, test_accuracy = model.evaluate_generator(
        test, steps=math.ceil(len(test) / config.BATCH_SIZE))

    # think you mean train here
    train.reset()
    validation.reset()
    test.reset()

    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_results = evaluate.get_results(model, test)
    test_probabilities = list(
        evaluate.transform_binary_probabilities(test_results))
    test_labels = list(evaluate.get_labels(test))

    # think you mean train here
    train.reset()
    validation.reset()
    test.reset()

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
    }
示例#4
0
def test_model(model, train, validation, test):  # , holdout_test):

    train_loss, train_accuracy = model.evaluate_generator(
        train, steps=math.ceil(len(train) / config.BATCH_SIZE))
    loss, accuracy = model.evaluate_generator(
        validation, steps=math.ceil(len(validation) / config.BATCH_SIZE))
    test_loss, test_accuracy = model.evaluate_generator(
        test, steps=math.ceil(len(test) / config.BATCH_SIZE))
    # holdout_test_loss, holdout_test_accuracy = model.evaluate_generator(holdout_test, steps=math.ceil(len(holdout_test)/config.BATCH_SIZE))

    train.reset()
    validation.reset()
    test.reset()
    # holdout_test.reset()

    # labels - ground truths
    # results - predicted results from model
    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_results = evaluate.get_results(model, test)
    test_probabilities = list(
        evaluate.transform_binary_probabilities(test_results))
    test_labels = list(evaluate.get_labels(test))

    # holdout_test_results = evaluate.get_results(model, holdout_test)
    # holdout_test_probabilities = list(evaluate.transform_binary_probabilities(holdout_test_results))
    # holdout_test_labels = list(evaluate.get_labels(holdout_test))

    train.reset()
    validation.reset()
    test.reset()
    # holdout_test.reset()

    # get binary predictions
    # holdout_binary_predictions = list(evaluate.transform_binary_predictions(holdout_test_results))
    test_binary_predictions = list(
        evaluate.transform_binary_predictions(test_results))
    # get f1 score
    test_f1_result = f1_score(test_labels, test_binary_predictions)
    # holdout_f1_result = f1_score(holdout_test_labels, holdout_binary_predictions)

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        # "holdout_test_accuracy": float(holdout_test_accuracy),
        # "holdout_test_loss": float(holdout_test_loss),
        "holdout_test_accuracy": float(0),
        "holdout_test_loss": float(0),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
        # "holdout_test_probabilities": holdout_test_probabilities,
        # "holdout_test_labels": holdout_test_labels,
        "holdout_test_probabilities": 'na',
        "holdout_test_labels": 'na',
        "test_f1_result": test_f1_result,
        # "holdout_f1_result": holdout_f1_result,
        "holdout_f1_result": float(0),
    }
示例#5
0
test_features, test_label = relist(predict(test_set))
#test_features = np.load('./COR_test.npy')
#test_label = np.load('./COR_test_label.npy')

test_generator = Dataset(
    test_features,
    test_label,
    augment=False,
    shuffle=False,
    input_form='t1',
    seed=seed,
)

test_generator.reset()
test_results = evaluate.get_results(model, test_generator)
probabilities = list(evaluate.transform_binary_probabilities(test_results))
np.save('./test_slice_pro.npy', probabilities)
#test 5628
#validation 4593
#train 34585
lg_pred = np.zeros((len(probabilities)))
for i in range(len(probabilities)):
    if probabilities[i] < 0.5:
        lg_pred[i] = 0
    else:
        lg_pred[i] = 1

print("Accuracy: " + repr(accuracy_score(test_label, lg_pred)))
print("Average Precision Score: " +
      repr(average_precision_score(test_label, lg_pred)))
print("Kappa: " + repr(cohen_kappa_score(test_label, lg_pred)))