Beispiel #1
0
def test(model, train, validation, test_data):

    loss, accuracy = model.evaluate_generator(validation, steps=math.ceil(validation.n/config.BATCH_SIZE))
    train_loss, train_accuracy = model.evaluate_generator(train, steps=math.ceil(train.n/config.BATCH_SIZE))

    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_loss = 0
    test_accuracy = 0
    test_probabilities = []
    test_labels = []
    if len(test_data.classes) != 0:
        test_loss, test_accuracy = model.evaluate_generator(test_data, steps=math.ceil(test_data.n/config.BATCH_SIZE))
        test_results = evaluate.get_results(model, test_data)
        test_probabilities = list(evaluate.transform_binary_probabilities(test_results))
        test_labels = list(evaluate.get_labels(test_data))

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
    }
Beispiel #2
0
def test_model(model, train, validation, test):

    loss, accuracy = model.evaluate_generator(
        validation, steps=math.ceil(len(validation) / config.BATCH_SIZE))
    train_loss, train_accuracy = model.evaluate_generator(
        train, steps=math.ceil(len(train) / config.BATCH_SIZE))
    test_loss, test_accuracy = model.evaluate_generator(
        test, steps=math.ceil(len(test) / config.BATCH_SIZE))

    # think you mean train here
    train.reset()
    validation.reset()
    test.reset()

    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_results = evaluate.get_results(model, test)
    test_probabilities = list(
        evaluate.transform_binary_probabilities(test_results))
    test_labels = list(evaluate.get_labels(test))

    # think you mean train here
    train.reset()
    validation.reset()
    test.reset()

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
    }
Beispiel #3
0
def test_model(model, train, validation, test):  # , holdout_test):

    train_loss, train_accuracy = model.evaluate_generator(
        train, steps=math.ceil(len(train) / config.BATCH_SIZE))
    loss, accuracy = model.evaluate_generator(
        validation, steps=math.ceil(len(validation) / config.BATCH_SIZE))
    test_loss, test_accuracy = model.evaluate_generator(
        test, steps=math.ceil(len(test) / config.BATCH_SIZE))
    # holdout_test_loss, holdout_test_accuracy = model.evaluate_generator(holdout_test, steps=math.ceil(len(holdout_test)/config.BATCH_SIZE))

    train.reset()
    validation.reset()
    test.reset()
    # holdout_test.reset()

    # labels - ground truths
    # results - predicted results from model
    results = evaluate.get_results(model, validation)
    probabilities = list(evaluate.transform_binary_probabilities(results))
    labels = list(evaluate.get_labels(validation))

    test_results = evaluate.get_results(model, test)
    test_probabilities = list(
        evaluate.transform_binary_probabilities(test_results))
    test_labels = list(evaluate.get_labels(test))

    # holdout_test_results = evaluate.get_results(model, holdout_test)
    # holdout_test_probabilities = list(evaluate.transform_binary_probabilities(holdout_test_results))
    # holdout_test_labels = list(evaluate.get_labels(holdout_test))

    train.reset()
    validation.reset()
    test.reset()
    # holdout_test.reset()

    # get binary predictions
    # holdout_binary_predictions = list(evaluate.transform_binary_predictions(holdout_test_results))
    test_binary_predictions = list(
        evaluate.transform_binary_predictions(test_results))
    # get f1 score
    test_f1_result = f1_score(test_labels, test_binary_predictions)
    # holdout_f1_result = f1_score(holdout_test_labels, holdout_binary_predictions)

    return {
        "train_accuracy": float(train_accuracy),
        "train_loss": float(train_loss),
        "accuracy": float(accuracy),
        "loss": float(loss),
        "test_accuracy": float(test_accuracy),
        "test_loss": float(test_loss),
        # "holdout_test_accuracy": float(holdout_test_accuracy),
        # "holdout_test_loss": float(holdout_test_loss),
        "holdout_test_accuracy": float(0),
        "holdout_test_loss": float(0),
        "probabilities": probabilities,
        "labels": labels,
        "test_probabilities": test_probabilities,
        "test_labels": test_labels,
        # "holdout_test_probabilities": holdout_test_probabilities,
        # "holdout_test_labels": holdout_test_labels,
        "holdout_test_probabilities": 'na',
        "holdout_test_labels": 'na',
        "test_f1_result": test_f1_result,
        # "holdout_f1_result": holdout_f1_result,
        "holdout_f1_result": float(0),
    }