def train_model_and_plot_stats(model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True): # As well as monitoring the error over training also monitor classification # accuracy i.e. proportion of most-probable predicted classes being equal to targets data_monitors = {'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()} # Use the created objects to initialise a new Optimiser instance. optimiser = Optimiser(model, error, learning_rule, train_data, valid_data, data_monitors, notebook=notebook) # Run the optimiser for 5 epochs (full passes through the training set) # printing statistics every epoch. stats, keys, run_time = optimiser.train(num_epochs=num_epochs, stats_interval=stats_interval) return stats, keys
def train_model_and_plot_stats( model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True): # As well as monitoring the error over training also monitor classification # accuracy i.e. proportion of most-probable predicted classes being equal to targets data_monitors={'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()} # Use the created objects to initialise a new Optimiser instance. optimiser = Optimiser( model, error, learning_rule, train_data, valid_data, data_monitors, notebook=notebook) # Run the optimiser for 5 epochs (full passes through the training set) # printing statistics every epoch. stats, keys, run_time = optimiser.train(patience=10, max_num_epochs=num_epochs, stats_interval=stats_interval) # # Plot the change in the validation and training set error over training. # fig_1 = plt.figure(figsize=(8, 4)) # ax_1 = fig_1.add_subplot(111) # for k in ['error(train)', 'error(valid)']: # ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval, # stats[1:, keys[k]], label=k) # ax_1.legend(loc=0) # ax_1.set_xlabel('Epoch number') # # Plot the change in the validation and training set accuracy over training. # fig_2 = plt.figure(figsize=(8, 4)) # ax_2 = fig_2.add_subplot(111) # for k in ['acc(train)', 'acc(valid)']: # ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval, # stats[1:, keys[k]], label=k) # ax_2.legend(loc=0) # ax_2.set_xlabel('Epoch number') return stats, keys, #run_time, fig_1, ax_1, fig_2, ax_2
train_data.reset() valid_data.reset() print('Regularisation: {0}'.format(weights_penalty)) model = MultipleLayerModel([ AffineLayer(input_dim, hidden_dim, weights_init, biases_init, weights_penalty), ReluLayer(), AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init, weights_penalty), ReluLayer(), AffineLayer(hidden_dim, output_dim, weights_init, biases_init, weights_penalty) ]) optimiser = Optimiser(model, error, learning_rule, train_data, valid_data, data_monitors) run_info[weights_penalty] = optimiser.train(num_epochs, stats_interval) models[weights_penalty] = model # In[5]: import matplotlib.pyplot as plt get_ipython().magic(u'matplotlib inline') plt.style.use('ggplot') fig = plt.figure(figsize=(12, 6)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) for weight_penalty, run in run_info.items(): stats, keys, run_time = run ax1.plot(np.arange(1, stats.shape[0]) * stats_interval,