Beispiel #1
0
def train_model_and_plot_stats(model,
                               error,
                               learning_rule,
                               train_data,
                               valid_data,
                               num_epochs,
                               stats_interval,
                               notebook=True):

    # As well as monitoring the error over training also monitor classification
    # accuracy i.e. proportion of most-probable predicted classes being equal to targets
    data_monitors = {'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()}

    # Use the created objects to initialise a new Optimiser instance.
    optimiser = Optimiser(model,
                          error,
                          learning_rule,
                          train_data,
                          valid_data,
                          data_monitors,
                          notebook=notebook)

    # Run the optimiser for 5 epochs (full passes through the training set)
    # printing statistics every epoch.
    stats, keys, run_time = optimiser.train(num_epochs=num_epochs,
                                            stats_interval=stats_interval)

    return stats, keys
Beispiel #2
0
def train_model_and_plot_stats(
        model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True):
    
    # As well as monitoring the error over training also monitor classification
    # accuracy i.e. proportion of most-probable predicted classes being equal to targets
    data_monitors={'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()}

    # Use the created objects to initialise a new Optimiser instance.
    optimiser = Optimiser(
        model, error, learning_rule, train_data, valid_data, data_monitors, notebook=notebook)

    # Run the optimiser for 5 epochs (full passes through the training set)
    # printing statistics every epoch.
    stats, keys, run_time = optimiser.train(patience=10, max_num_epochs=num_epochs, stats_interval=stats_interval)

    # # Plot the change in the validation and training set error over training.
    # fig_1 = plt.figure(figsize=(8, 4))
    # ax_1 = fig_1.add_subplot(111)
    # for k in ['error(train)', 'error(valid)']:
    #     ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval, 
    #               stats[1:, keys[k]], label=k)
    # ax_1.legend(loc=0)
    # ax_1.set_xlabel('Epoch number')

    # # Plot the change in the validation and training set accuracy over training.
    # fig_2 = plt.figure(figsize=(8, 4))
    # ax_2 = fig_2.add_subplot(111)
    # for k in ['acc(train)', 'acc(valid)']:
    #     ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval, 
    #               stats[1:, keys[k]], label=k)
    # ax_2.legend(loc=0)
    # ax_2.set_xlabel('Epoch number')
    
    return stats, keys, #run_time, fig_1, ax_1, fig_2, ax_2
Beispiel #3
0
    # to ensure reproducibility of results
    rng.seed(seed)
    train_data.reset()
    valid_data.reset()
    print('Regularisation: {0}'.format(weights_penalty))
    model = MultipleLayerModel([
        AffineLayer(input_dim, hidden_dim, weights_init, biases_init,
                    weights_penalty),
        ReluLayer(),
        AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init,
                    weights_penalty),
        ReluLayer(),
        AffineLayer(hidden_dim, output_dim, weights_init, biases_init,
                    weights_penalty)
    ])
    optimiser = Optimiser(model, error, learning_rule, train_data, valid_data,
                          data_monitors)
    run_info[weights_penalty] = optimiser.train(num_epochs, stats_interval)
    models[weights_penalty] = model

# In[5]:

import matplotlib.pyplot as plt

get_ipython().magic(u'matplotlib inline')
plt.style.use('ggplot')

fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
for weight_penalty, run in run_info.items():
    stats, keys, run_time = run
Beispiel #4
0
from mlp.optimisers import SGDOptimiser
from support import train_dp_flat, valid_dp_flat, create_one_hid_model, rng
from mlp.costs import CECost
from mlp.layers import MLP, Softmax, Sigmoid
from mlp.schedulers import LearningRateFixed
from noise_tries import show_mnist_images
import numpy as np
from mlp.dataset import MNISTDataProvider
from mlp.optimisers import Optimiser

tsk3_2_optimiser = Optimiser()
tsk3_2_model = create_one_hid_model()


def tsk_3_2_draw_handler(cur_layer_id, cur_model, get_inputs):
    if cur_layer_id != 0:
        return
    mnist_dp = MNISTDataProvider(dset='valid',
                                 batch_size=4,
                                 max_num_examples=4,
                                 randomize=False)
    for batch in mnist_dp:
        features, targets = batch

        inputs, pure = get_inputs(features)
        output_dc = cur_model.fprop(inputs)
        num_imgs = features.shape[0]
        imgs = features.reshape(num_imgs, 28, 28)
        #images noisy
        imgs_ns = inputs.reshape(num_imgs, 28, 28)
        #images decoded