Example #1
0
 def run(self, user_range, mode='-s'):
     assert mode in ('-s', '-p', '-e')
     ids = select_experiments(user_range, self.exp_record_dict)
     if len(ids) > 1 and mode == '-p':
         import multiprocessing
         p = multiprocessing.Pool(processes=multiprocessing.cpu_count())
         p.map(partial(run_experiment_ignoring_errors, **self.run_args),
               ids)
     else:
         for experiment_identifier in ids:
             load_experiment(experiment_identifier).run(
                 raise_exceptions=mode == '-e', **self.run_args)
     if self.close_after_run:
         return self.QUIT
     else:
         _warn_with_prompt('Finished running {} experiment{}.'.format(
             len(ids), '' if len(ids) == 1 else 's'))
Example #2
0
def compare_results(results, names=None):

    e_ops = {}
    e_args = {}
    e_energies = {}
    training_scores = {}
    test_scores = {}
    e_epochs = {}
    for exp_name, (learning_curves, op_count_info) in results.iteritems():
        infos, ops = zip(*op_count_info)
        e_epochs[exp_name] = [info.epoch for info in infos]
        ops = np.array(ops)
        ops[:,
            1, :] = 0  # Because we don't actually have to do the backward pass of the first layer.
        ops = ops.sum(axis=2).sum(axis=1)
        e_ops[exp_name] = ops
        arg = load_experiment(exp_name).get_args()
        e_args[exp_name] = arg
        if arg['swap_mlp']:
            e_energies[exp_name] = estimate_energy_cost(n_ops=ops,
                                                        op='mult-add',
                                                        dtype='int')
            training_scores[exp_name] = learning_curves.get_values(
                subset='train', prediction_function=None, score_measure=None)
            test_scores[exp_name] = learning_curves.get_values(
                subset='test', prediction_function=None, score_measure=None)
        else:
            e_energies[exp_name] = estimate_energy_cost(n_ops=ops,
                                                        op='add',
                                                        dtype='int')
            training_scores[exp_name] = learning_curves.get_values(
                subset='train',
                prediction_function='herded',
                score_measure=None)
            test_scores[exp_name] = learning_curves.get_values(
                subset='test',
                prediction_function='herded',
                score_measure=None)

    with hstack_plots(ylabel='Temporal MNIST Score', grid=True,
                      ylim=(85, 102)):
        ax = add_subplot()
        for exp_name in results:
            plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            plt.plot(e_epochs[exp_name],
                     training_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Training')
            plt.plot(e_epochs[exp_name],
                     test_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Test')
            plt.xlabel('Epoch')
            plt.legend()
            plt.xlim(0, 50)

        ax = add_subplot()
        for exp_name in results:
            plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            plt.plot(e_ops[exp_name] / 1e9,
                     training_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Training')
            plt.plot(e_ops[exp_name] / 1e9,
                     test_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Test')
            plt.xlabel('GOps')
            plt.legend()

        ax = add_subplot()
        for exp_name in results:
            plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            plt.plot(e_energies[exp_name] / 1e9,
                     training_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Training')
            plt.plot(e_energies[exp_name] / 1e9,
                     test_scores[exp_name],
                     color=colour,
                     label=names[exp_name] + ':Test')
            plt.xlabel('Energies')
            plt.legend()

    plt.show()
    pass
Example #3
0
def do_the_figure(mnist_results,
                  temporal_mnist_results,
                  names=None,
                  predict_on='herded',
                  remove_prefix=True,
                  title=None):

    # args = {exp_name: load_experiment(exp_name).get_args() for exp_name in results}

    plots_shape = (2, 3)
    plt.figure(figsize=(6, 4))
    plt.subplots_adjust(wspace=0, hspace=0, top=0.92, right=0.95)

    for row, dataset in enumerate(['MNIST', 'Temporal MNIST']):

        results = mnist_results if dataset == 'MNIST' else temporal_mnist_results

        e_ops = {}
        e_args = {}
        e_energies = {}
        training_scores = {}
        test_scores = {}
        e_epochs = {}
        for exp_name, (learning_curves, op_count_info) in results.iteritems():
            infos, ops = zip(*op_count_info)
            e_epochs[exp_name] = [info.epoch for info in infos]
            ops = np.array(ops)
            ops[:,
                1, :] = 0  # Because we don't actually have to do the backward pass of the first layer.
            ops = ops.sum(axis=2).sum(axis=1)
            e_ops[exp_name] = ops
            arg = load_experiment(exp_name).get_args()
            e_args[exp_name] = arg
            if arg['swap_mlp']:
                e_energies[exp_name] = estimate_energy_cost(n_ops=ops,
                                                            op='mult-add',
                                                            dtype='int')
                training_scores[exp_name] = learning_curves.get_values(
                    subset='train',
                    prediction_function=None,
                    score_measure=None)
                test_scores[exp_name] = learning_curves.get_values(
                    subset='test',
                    prediction_function=None,
                    score_measure=None)
            else:
                e_energies[exp_name] = estimate_energy_cost(n_ops=ops,
                                                            op='add',
                                                            dtype='int')
                training_scores[exp_name] = learning_curves.get_values(
                    subset='train',
                    prediction_function=predict_on,
                    score_measure=None)
                test_scores[exp_name] = learning_curves.get_values(
                    subset='test',
                    prediction_function=predict_on,
                    score_measure=None)
            print 'Scores for {}\n  Training: {}\n  Test: {}\n  GOps: {}'.format(
                exp_name, np.max(training_scores[exp_name]),
                np.max(test_scores[exp_name]),
                ops.sum() / 1e9)

        ylim_args = (91, 101)
        ax = plt.subplot2grid(plots_shape, (row, 0))
        for i, exp_name in enumerate(results):
            # plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            # colour='b'
            plt.semilogx(e_epochs[exp_name],
                         training_scores[exp_name],
                         color=get_line_color(i),
                         label=names[exp_name] + ':Training',
                         linestyle='--')
            plt.semilogx(e_epochs[exp_name],
                         test_scores[exp_name],
                         color=get_line_color(i, 'dark'),
                         label=names[exp_name] + ':Test')

            ax.grid(True)
            # plt.legend()
            plt.xlim(0, 50)
            if row == 0:
                plt.tick_params('x', labelbottom='off')
            else:
                plt.xlabel('Epoch')
            plt.ylabel('% Score on \n{}'.format(dataset))
            plt.ylim(*ylim_args)
            plt.xlim(0.5, 50)

        plt.subplot2grid(plots_shape, (row, 1))
        for i, exp_name in enumerate(results):
            # plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            # colour = 'b'
            plt.semilogx(e_ops[exp_name] / 1e9,
                         training_scores[exp_name],
                         color=get_line_color(i),
                         label=names[exp_name] + ':Training',
                         linestyle='--')
            plt.semilogx(e_ops[exp_name] / 1e9,
                         test_scores[exp_name],
                         color=get_line_color(i, 'dark'),
                         label=names[exp_name] + ':Test')
            # plt.legend()
            if row == 0:
                plt.tick_params('x', labelbottom='off')
            else:
                plt.xlabel('GOps')
            plt.grid(True)
            plt.tick_params('y', labelleft='off')
            plt.ylim(*ylim_args)
            plt.xlim(5, 1000)

        plt.subplot2grid(plots_shape, (row, 2))
        for i, exp_name in enumerate(results):
            # plt.plot()
            colour = next(plt.gca()._get_lines.prop_cycler)['color']
            # colour = 'b'
            plt.semilogx(e_energies[exp_name] / 1e9,
                         training_scores[exp_name],
                         color=get_line_color(i),
                         label=names[exp_name] + ':Training',
                         linestyle='--')
            plt.semilogx(e_energies[exp_name] / 1e9,
                         test_scores[exp_name],
                         color=get_line_color(i, 'dark'),
                         label=names[exp_name] + ':Test')
            # plt.legend()
            if row == 0:
                plt.tick_params('x', labelbottom='off')
            else:
                plt.xlabel('Energies (mJ)')
            plt.grid(True)
            plt.tick_params('y', labelleft='off')
            plt.ylim(*ylim_args)
            plt.xlim(.2, 2000)

    handles, labels = plt.gca().get_legend_handles_labels()
    plt.legend(handles,
               labels,
               bbox_to_anchor=(1, 1),
               bbox_transform=plt.gcf().transFigure,
               ncol=len(handles[::-1]),
               loc='upper right')
    plt.show()
    pass
Example #4
0
 def call(self, user_range):
     ids = select_experiments(user_range, self.exp_record_dict)
     for experiment_identifier in ids:
         load_experiment(experiment_identifier)()
     if self.close_after_run:
         return self.QUIT
Example #5
0
 def test(self, user_range):
     ids = select_experiments(user_range, self.exp_record_dict)
     for experiment_identifier in ids:
         load_experiment(experiment_identifier).test()