Пример #1
0
def test_get_latest():
    experiment_1 = run_experiment('test_experiment', keep_record=True)
    time.sleep(0.01)
    experiment_2 = run_experiment('test_experiment', keep_record=True)
    identifier = get_latest_experiment_identifier('test_experiment')
    assert identifier == experiment_2.get_identifier()

    atexit.register(lambda: shutil.rmtree(experiment_1.get_dir()))
    atexit.register(lambda: shutil.rmtree(experiment_2.get_dir()))
Пример #2
0
def test_run_and_show():
    """
    This is nice because it no longer required that an experiment be run and shown in a
    single session - each experiment just has a unique identifier that can be used to show
    its results whenevs.
    """
    experiment_record = run_experiment('test_experiment', keep_record=True)
    show_experiment(experiment_record.get_identifier())

    # Delay cleanup otherwise the show complains that file does not exist due to race condition.
    atexit.register(lambda: shutil.rmtree(experiment_record.get_dir()))
Пример #3
0
def test_experiment_interface():

    register_experiment(name='my_test_experiment',
                        function=_run_experiment,
                        description="See if this thing works",
                        conclusion="It does.")

    exp_rec = run_experiment('my_test_experiment', keep_record=True)
    print get_experiment_info('my_test_experiment')
    assert exp_rec.get_log() == 'aaa\nbbb\n'
    same_exp_rec = load_experiment(
        get_latest_experiment_identifier(name='my_test_experiment'))
    assert same_exp_rec.get_log() == 'aaa\nbbb\n'
    same_exp_rec.delete()
Пример #4
0
    name = 'mnist-multinomial-regression',
    function = lambda: demo_mnist_online_regression(regressor_type='multinomial'),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = 'Gets to about 92.5'
    )

register_experiment(
    name = 'mnist-multinomial-regression-nobias',
    function = lambda: demo_mnist_online_regression(regressor_type='multinomial', include_biases=False),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = "Also gets to about 92.5.  So at least for MNIST you don't really need a bias term."
    )

register_experiment(
    name = 'mnist-linear-regression',
    function = lambda: demo_mnist_online_regression(regressor_type='linear', learning_rate=0.01),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = 'Requires a lower learning rate for stability, and then only makes it to around 86%'
    )

register_experiment(
    name = 'mnist-logistic-regression',
    function = lambda: demo_mnist_online_regression(regressor_type='logistic'),
    description = 'Simple multinomial regression (a.k.a. One-layer neural network) on MNIST',
    conclusion = 'Gets just over 92%'
    )

if __name__ == '__main__':

    run_experiment('mnist-linear-regression')
Пример #5
0
    versions = dict(
        bent = dict(alpha = 0.5),
        abs = dict(alpha = -1),
        relu = dict(alpha = 0),
        small = dict(alpha = 0.05),
        deep_relu = dict(alpha = 0.05, hidden_sizes = [300, 300]),
        deep_abs = dict(alpha = -1, hidden_sizes = [300, 300]),

    ),
    current_version = 'deep_abs',
    conclusion="""
        relu: 97.78
        bent: 96.04
        abs: 98.16
        small: 97.67

        deep_relu: 97.76
        deep_abs: 97.91

    """
    )


if __name__ == '__main__':

    which_experiment = 'mnist_mlp_leaky_relu'
    set_test_mode(False)

    logging.getLogger().setLevel(logging.INFO)
    run_experiment(which_experiment)
Пример #6
0
    ),
    description="Now try with normalized-relu units",
    conclusion=
    "Works, kind of, gets to like 93.5%.  Most hidden units seem to die.  At least it doesn't explode."
)

register_experiment(
    name='all-softplus-dtp',
    function=lambda: demo_run_dtp_on_mnist(
        input_activation='softplus',
        hidden_activation='softplus',
        output_activation='softplus',
        optimizer_constructor=lambda: SimpleGradientDescent(eta=0.01),
    ),
    description=
    "DTP with an entirely softplus network.  It's known that RELUs have some problems as autoencoders, so we try softplus",
    conclusion=
    "Works badly for a while, and then explodes and doesn't work at all.")
"""
Other experiments done by changing code temporarily (and so not available here)

all-relu-dtp-nobias
We try removing biases from Difference Target propagation with RELU units.  This
causes the explosion to happen every time, and after achieving about 91% score.  We can
compensate by reducing the learning rate to 0.001, but then it takes forever to converge.
There's basically no middle ground - if you want a bearable learning rate, you get explosions.
"""

if __name__ == '__main__':
    run_experiment('all-softplus-dtp')
Пример #7
0

register_experiment(
    name='mnist-vae-20d-binary_in',
    function=lambda: demo_simple_vae_on_mnist(
        z_dim=20, hidden_sizes=[200], binary_x=True),
    description="Try encoding MNIST with a variational autoencoder.",
    conclusion=
    "Looks good.  Within about 20 epochs we're getting reasonablish samples, lower bound of -107."
)

register_experiment(
    name='mnist-vae-20d-continuous_in',
    function=lambda: demo_simple_vae_on_mnist(
        z_dim=20, hidden_sizes=[200], binary_x=False, gaussian_min_var=0.01),
    description=
    "Try encoding MNIST with a variational autoencoder, this time treating the input as a continuous variable",
    conclusion=
    "Need to set minimum variance.  Recognieseable digits come out, but then instabilities."
)

register_experiment(
    name='mnist-vae-2latent',
    function=lambda: demo_simple_vae_on_mnist(
        z_dim=2, hidden_sizes=[400, 200], binary_x=True),
    description='Try a deeper network with just a 2-dimensional latent space.')

if __name__ == '__main__':

    run_experiment('mnist-vae-2latent')
Пример #8
0
    )


def run_and_plot(training_scheme):
    learning_curves = training_scheme()
    plot_learning_curves(learning_curves)


def get_experiments():
    training_schemes = {
        'adamax-showdown': mnist_adamax_showdown,
        'mlp-normalization': mlp_normalization,
    }
    experiments = {
        name: lambda sc=scheme: run_and_plot(sc)
        for name, scheme in training_schemes.iteritems()
    }
    return experiments


if __name__ == '__main__':

    test_mode = False
    experiment = 'adamax-showdown'

    set_test_mode(test_mode)
    run_experiment(experiment,
                   exp_dict=get_experiments(),
                   show_figs=None,
                   print_to_console=True)