"Post-process: Ensemble + Z-Filtering - Test Foreground IoU (merge into complete image): {}"
    .format(ens_zfil_iou_per_image))
print(
    "Post-process: Ensemble + Z-Filtering - Test Overall IoU (merge into complete image): {}"
    .format(ens_zfil_ov_iou_per_image))

print("Test Foreground IoU (merge with 50% overlap): {}".format(iou_50ov))
print("Test Overall IoU (merge with 50% overlap): {}".format(ov_iou_50ov))
print(
    "Post-process: Ensemble - Test Foreground IoU (merge with 50% overlap): {}"
    .format(ens_iou_50ov))
print("Post-process: Ensemble - Test Overall IoU (merge with 50% overlap): {}".
      format(ens_ov_iou_50ov))
print(
    "Post-process: Ensemble + Z-Filtering - Test Foreground IoU (merge with 50% overlap): {}"
    .format(ens_zfil_iou_50ov))
print(
    "Post-process: Ensemble + Z-Filtering - Test Overall IoU (merge with 50% overlap): {}"
    .format(ens_zfil_ov_iou_50ov))

if not load_previous_weights:
    scores = {}
    for name in dir():
        if not name.startswith('__') and ("_per_crop" in name or "_50ov" in name\
        or "_per_image" in name or "_full" in name):
            scores[name] = eval(name)

    create_plots(results, job_identifier, char_dir, metric=metric)

print("FINISHED JOB {} !!".format(job_identifier))
Ejemplo n.º 2
0
        spu_score_full))
print("Post-process: Spurious Detection - Overall IoU (full): {}".format(
    spu_ov_iou_full))
print("Post-process: Spurious Detection - DET (full): {}".format(spu_det_full))
print("Post-process: Watershed - Test Foreground IoU (full): {}".format(
    wa_score_full))
print(
    "Post-process: Watershed - Overall IoU (full): {}".format(wa_ov_iou_full))
print("Post-process: Watershed - DET (full): {}".format(wa_det_full))
print(
    "Post-process: Spurious + Watershed + Z-Filtering - Test Foreground IoU (full): {}"
    .format(spu_wa_zfil_score_full))
print(
    "Post-process: Spurious + Watershed + Z-Filtering - Test Overall IoU (full): {}"
    .format(spu_wa_zfil_ov_iou_full))
print("Post-process: Spurious + Watershed + Z-Filtering - Test DET (full): {}".
      format(spu_wa_zfil_det_full))

if not load_previous_weights:
    scores = {}
    for name in dir():
        if not name.startswith('__') and ("_per_crop" in name or "_50ov" in name\
        or "_per_image" in name or "_full" in name):
            scores[name] = eval(name)

    store_history(results, scores, time_callback, args.result_dir,
                  job_identifier)
    create_plots(results, job_identifier, char_dir)

print("FINISHED JOB {} !!".format(job_identifier))
Ejemplo n.º 3
0
def main():
    # TRAINING HYPERPARAMETERS
    # Modify the following lines to change the training hyperparameters.

    # Regularisation strength
    reg_lambda = 0.0

    # Learning rate
    learning_rate = 0.07

    # Number of training iterations
    niterations = 100

    # Loss function to use (select one and comment out the other)
    loss_function = LogisticLoss()
    # loss_function = HingeLoss()

    # Type of regularisation to use (select one and comment out the other)
    # regulariser = L1Regulariser()
    regulariser = L2Regulariser()

    # This should only be enabled once you've decided on a final set of hyperparameters
    enable_test_set_scoring = True

    # Controls to use perceptron or not
    enable_perceptron = True

    # Type of features to use. This can be set to 'bigram' or 'unigram+bigram' to use
    # bigram features instead of or in addition to unigram features.
    # Not required for assignment.
    feature_type = 'unigram'

    # END OF HYPERPARAMETERS

    # First test the parts to be implemented and warn if something's wrong.
    print('=============')
    print('SANITY CHECKS')
    print('=============')
    print()

    util.run_tests()

    # Load the data.

    print()
    print('===================')
    print('CLASSIFIER TRAINING')
    print('===================')
    print()
    print('Loading data sets...')

    data_dir = './poldata/poldata.zip'
    data = util.load_movie_data(data_dir)

    data.select_feature_type(feature_type)

    # Split the data set randomly into training, validation and test sets.
    training_data, val_data, test_data = data.train_val_test_split()

    # Train the classifier.
    print('Starting training.')
    if enable_perceptron:
        weights, bias, training_log = train_perceptron(training_data, val_data,
                                                       niterations)
    else:
        weights, bias, training_log = train(training_data, val_data,
                                            loss_function, regulariser,
                                            reg_lambda, learning_rate,
                                            niterations)
    print('Training completed.')

    print()
    print('=====================')
    print('MODEL CHARACTERISTICS')
    print('=====================')
    print()

    # Display some useful statistics about the model and the training process.
    title = 'Data set: %s - Regulariser: %g - Learning rate: %g' % (
        data.name, reg_lambda, learning_rate)

    print()
    util.show_stats(title,
                    training_log,
                    weights,
                    bias,
                    data.vocabulary,
                    top_n=20)
    util.create_plots(title,
                      training_log,
                      weights,
                      log_keys=['training_loss_reg', 'val_loss'])

    if enable_test_set_scoring:
        # Check the performance on the test set.
        test_loss = loss_function.unregularised_loss(weights, bias, test_data)
        test_predictions = predict(weights, bias, test_data)
        test_accuracy = accuracy(test_data.labels, test_predictions)

        print()
        print('====================')
        print('TEST SET PERFORMANCE')
        print('====================')
        print()
        print('Test loss: %g' % test_loss)
        print('Test accuracy: %g' % test_accuracy)
def main(reg_lambda,
         learning_rate,
         loss_function,
         regulariser,
         niterations=10,
         enable_test_set_scoring=False,
         **kwargs):
    global data

    # Type of features to use. This can be set to 'bigram' or 'unigram+bigram' to use
    # bigram features instead of or in addition to unigram features.
    # Not required for assignment.
    feature_type = 'unigram'

    # First test the parts to be implemented and warn if something's wrong.
    print('=============')
    print('SANITY CHECKS')
    print('=============')
    print()

    util.run_tests()

    # Load the data.
    training_data, val_data, test_data, data = load_data(feature_type)

    # Train the classifier.
    print('Starting training.')
    weights, bias, training_log = train(training_data, val_data, loss_function,
                                        regulariser, reg_lambda, learning_rate,
                                        niterations)
    print('Training completed.')

    print()
    print('=====================')
    print('MODEL CHARACTERISTICS')
    print('=====================')
    print()

    # Display some useful statistics about the model and the training process.
    title = 'Data set: %s - Regulariser(%s): %g - Learning rate: %g ' \
            '- Loss Function: %s' % (
                data.name, regulariser, reg_lambda, learning_rate,
                loss_function)

    print()

    # Get final accuracy
    val_predictions = predict(weights, bias, val_data)
    val_accuracy = accuracy(val_data.labels, val_predictions)

    print('Accuracy: %g' % val_accuracy)

    util.show_stats(title,
                    training_log,
                    weights,
                    bias,
                    data.vocabulary,
                    top_n=1,
                    write_to_file="results.csv",
                    configuration={
                        'reg_lambda': reg_lambda,
                        'learning_rate': learning_rate,
                        'loss_function': loss_function,
                        'regulariser': regulariser,
                        'niterations': niterations,
                        'val_accuracy': val_accuracy
                    })

    util.create_plots(title,
                      training_log,
                      weights,
                      log_keys=['training_loss_reg', 'val_loss'])

    if enable_test_set_scoring:
        # Check the performance on the test set.
        test_loss = loss_function.unregularised_loss(weights, bias, test_data)
        test_predictions = predict(weights, bias, test_data)
        test_accuracy = accuracy(test_data.labels, test_predictions)

        print()
        print('====================')
        print('TEST SET PERFORMANCE')
        print('====================')
        print()
        print('Test loss: %g' % test_loss)
        print('Test accuracy: %g' % test_accuracy)
Ejemplo n.º 5
0
import util
import numpy as np

ws1 = util.get_data("data/ws1.dat")
ws2 = util.get_data("data/ws2.dat")
ws3 = util.get_data("data/ws3.dat")

util.hist(ws1, "ws1 hist")
util.hist(ws2, "ws2 hist")
util.hist(ws3, "ws3 hist")

util.create_plots(ws1, "weibull_min", "ws1")
util.create_plots(ws2, "weibull_min", "ws2")
util.create_plots(ws3, "weibull_min", "ws3")

m = np.mean(ws1)
util.qqplot(ws1, "skewnorm", (m), "ws1_skewnorm")
m = np.mean(ws2)
util.qqplot(ws2, "skewnorm", (m), "ws2_skewnorm")
m = np.mean(ws3)
util.qqplot(ws3, "skewnorm", (m), "ws3_skewnorm")