Пример #1
0
def test_experiment_interface():

    register_experiment(name='my_test_experiment',
                        function=_run_experiment,
                        description="See if this thing works",
                        conclusion="It does.")

    exp_rec = run_experiment('my_test_experiment', keep_record=True)
    print get_experiment_info('my_test_experiment')
    assert exp_rec.get_log() == 'aaa\nbbb\n'
    same_exp_rec = load_experiment(
        get_latest_experiment_identifier(name='my_test_experiment'))
    assert same_exp_rec.get_log() == 'aaa\nbbb\n'
    same_exp_rec.delete()
Пример #2
0
    # Train and periodically report the test score.
    results = assess_online_predictor(
        dataset=dataset,
        predictor=predictor,
        evaluation_function='percent_argmax_correct',
        test_epochs=sqrtspace(0, n_epochs, n_test_points),
        minibatch_size=minibatch_size
    )

    plot_learning_curves(results)


register_experiment(
    name = 'mnist-multinomial-regression',
    function = lambda: demo_mnist_online_regression(regressor_type='multinomial'),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = 'Gets to about 92.5'
    )

register_experiment(
    name = 'mnist-multinomial-regression-nobias',
    function = lambda: demo_mnist_online_regression(regressor_type='multinomial', include_biases=False),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = "Also gets to about 92.5.  So at least for MNIST you don't really need a bias term."
    )

register_experiment(
    name = 'mnist-linear-regression',
    function = lambda: demo_mnist_online_regression(regressor_type='linear', learning_rate=0.01),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = 'Requires a lower learning rate for stability, and then only makes it to around 86%'
Пример #3
0
    results = assess_online_predictor(
        dataset=dataset,
        predictor=predictor,
        evaluation_function='percent_argmax_correct',
        test_epochs=sqrtspace(0, n_epochs, n_test_points),
        minibatch_size=minibatch_size,
        test_callback=vis_callback if visualize_params else None
    )

    if plot:
        plot_learning_curves(results)


register_experiment(
    name = 'MNIST-tanh-MLP[300,10]',
    function = lambda: demo_mnist_mlp(hidden_sizes=[300], hidden_activation='tanh', learning_rate=0.03),
    description='Baseline.  Gets 97.45% test score within 10 epochs.'
    )

register_experiment(
    name = 'MNIST1000_onelayer_minibatch-20',
    function = lambda: demo_mnist_mlp(learning_rate= 0.03, hidden_sizes = [], minibatch_size=20, max_training_samples=1000, n_epochs=1000),
    description='How does a single-layer (logistic-regression) net do on MNIST-1000?',
    conclusion='Gets to about 87.5% before overfitting its way down to 86.7.  100% on training.'
    )

register_experiment(
    name = 'MNIST1000_MLP[300,10]_minibatch-20',
    function = lambda: demo_mnist_mlp(learning_rate= 0.03, minibatch_size=20, max_training_samples=1000, n_epochs=1000)
    )
Пример #4
0
                                lin_dtp,
                                initial_mag=2):
    return DifferenceTargetMLP(layers=[
        PerceptronLayer.from_initializer(n_in,
                                         n_out,
                                         initial_mag=initial_mag,
                                         lin_dtp=lin_dtp)
        for n_in, n_out in zip([input_size] + hidden_sizes, hidden_sizes +
                               [output_size])
    ],
                               output_cost_function=None).compile()


register_experiment(
    name='single-level-perceptron-DTP',
    function=lambda: demo_perceptron_dtp(hidden_sizes=[], n_epochs=60),
    description=
    "Just to verify we're not crazy.  This should be equiv to a single layer perceptron (without biases though)",
    conclusion="Hovers kind of noisily just below 85%, as expected.")

register_experiment(
    name='multi-level-perceptron-DTP',
    function=lambda: demo_perceptron_dtp(
        hidden_sizes=[400], n_epochs=60, lin_dtp=False),
    description=
    "Try DTP with one hidden layer using sign-activation units and the perceptron learning rule",
    conclusion="Doesn't work at all at all.")

register_experiment(
    name='multi-level-perceptron-LinDTP',
    function=lambda: demo_perceptron_dtp(
        hidden_sizes=[400], n_epochs=60, lin_dtp=True),
Пример #5
0
                manifold_means, _ = decoder_mean_fcn()
            dbplot(
                manifold_means.reshape(manifold_grid_size, manifold_grid_size,
                                       28, 28),
                'First 2-dimensions of manifold.')
        if i % calculation_interval == 0:
            training_lower_bound = lower_bound_fcn(training_data)
            test_lower_bound = lower_bound_fcn(test_data)
            print 'Epoch: %s, Training Lower Bound: %s, Test Lower bound: %s' % \
                (i*minibatch_size/float(len(training_data)), training_lower_bound, test_lower_bound)


register_experiment(
    name='mnist-vae-20d-binary_in',
    function=lambda: demo_simple_vae_on_mnist(
        z_dim=20, hidden_sizes=[200], binary_x=True),
    description="Try encoding MNIST with a variational autoencoder.",
    conclusion=
    "Looks good.  Within about 20 epochs we're getting reasonablish samples, lower bound of -107."
)

register_experiment(
    name='mnist-vae-20d-continuous_in',
    function=lambda: demo_simple_vae_on_mnist(
        z_dim=20, hidden_sizes=[200], binary_x=False, gaussian_min_var=0.01),
    description=
    "Try encoding MNIST with a variational autoencoder, this time treating the input as a continuous variable",
    conclusion=
    "Need to set minimum variance.  Recognieseable digits come out, but then instabilities."
)

register_experiment(
Пример #6
0

def run_and_plot(training_scheme):
    learning_curves = training_scheme()
    plot_learning_curves(learning_curves)


register_experiment(
    'standard_dtp',
    function=partial(demo_dtp_varieties, predictors=['MLP', 'DTP']),
    description=
    """Train Difference Target Propagation on MNIST using standard settings, compare to backprop.  This will "
        be used as a baseline agains other experiments.""",
    versions={
        '10_epoch': dict(n_epochs=10),
        '20_epoch': dict(n_epochs=20)
    },
    current_version='10_epoch',
    conclusion="""
        After 10 epochs:
            MLP: 97.32
            DTP: 96.61

        """)

register_experiment(
    'hidden_types',
    function=partial(demo_dtp_varieties,
                     predictors=['MLP', 'DTP'],
                     n_epochs=20),
    description=
Пример #7
0
        print 'aaa'
        plt.figure('sensible defaults')
        dat = np.random.randn(4, 5)
        plt.subplot(211)
        plt.imshow(dat)
        plt.subplot(212)
        plt.imshow(dat, interpolation='nearest', cmap='gray')
        plt.show()
        print 'bbb'
        plt.figure()
        plt.plot(np.random.randn(10))
        plt.show()


register_experiment(name='test_experiment',
                    description="Testing the experiment framework",
                    function=_run_experiment,
                    conclusion="Nothing to mention")


def test_experiment_with():

    delete_experiment_with_id('test_exp')

    with record_experiment(identifier='test_exp',
                           print_to_console=True) as exp_rec:
        _run_experiment()

    assert exp_rec.get_log() == 'aaa\nbbb\n'
    figs = exp_rec.show_figures()
    assert len(exp_rec.get_figure_locs()) == 2