Пример #1
0
                noise = 1,
            ).compile()
            },
        minibatch_size = minibatch_size,
        test_epochs = sqrtspace(0, n_epochs, n_tests),
        evaluation_function = percent_argmax_correct,
        )


def run_and_plot(training_scheme):
    learning_curves = training_scheme()
    plot_learning_curves(learning_curves)


def get_experiments():
    training_schemes = {
        'adamax-showdown': mnist_adamax_showdown,
        'mlp-normalization': mlp_normalization,
        }
    experiments = {name: lambda sc=scheme: run_and_plot(sc) for name, scheme in training_schemes.iteritems()}
    return experiments


if __name__ == '__main__':

    test_mode = False
    experiment = 'adamax-showdown'

    set_test_mode(test_mode)
    run_experiment(experiment, exp_dict=get_experiments(), show_figs = None, print_to_console=True)
Пример #2
0
    for exp_name, exp in get_experiments().iteritems():
        print 'Running %s' % exp_name
        exp()


def test_demo_mnist_mlp():
    demo_mnist_mlp(test_mode = True)


def test_demo_dbn_mnist():
    demo_dbn_mnist(plot = True, test_mode = True)


def test_demo_rbm_mnist():
    demo_rbm_mnist(plot = True, test_mode = True)


def test_demo_prediction_example():
    compare_example_predictors(test_mode = True)


if __name__ == '__main__':
    set_test_mode(True)

    test_demo_compare_optimizers()
    test_demo_prediction_example()
    test_demo_mnist_mlp()
    test_demo_rbm_mnist()
    test_demo_dbn_mnist()
Пример #3
0
                function = MultiLayerPerceptron.from_init(
                    layer_sizes=[dataset.input_size, 500, dataset.n_categories],
                    hidden_activation='sig',  # Sigmoidal hidden units
                    output_activation='softmax',  # Softmax output unit, since we're doing multinomial classification
                    w_init = 0.01,
                    rng = 5
                ),
                cost_function = negative_log_likelihood_dangerous,  # "Dangerous" because it doesn't check to see that output is normalized, but we know it is because it comes from softmax.
                optimizer = SimpleGradientDescent(eta = 0.1),
                ).compile(),  # .compile() returns an IPredictor
            },
        offline_predictors={
            'RF': RandomForestClassifier(n_estimators = 40)
            },
        minibatch_size = minibatch_size,
        test_epochs = sqrtspace(0, n_epochs, n_tests),
        evaluation_function = percent_argmax_correct  # Compares one-hot
        )
    # Results is a LearningCurveData object
    return learning_curve_data


if __name__ == '__main__':

    set_test_mode(False)
    records = compare_example_predictors(
        n_epochs=30,
        minibatch_size=20,
        )
    plot_learning_curves(records)
            'normalize': make_mlp(normalize=True, scale = False),
            'normalize and scale': make_mlp(normalize=True, scale = True),
            },
        minibatch_size = minibatch_size,
        test_epochs = sqrtspace(0, n_epochs, n_tests),
        evaluation_function = percent_argmax_correct
        )


def run_and_plot(training_scheme):
    learning_curves = training_scheme()
    plot_learning_curves(learning_curves)


def get_experiments():
    training_schemes = {
        'adamax-showdown': mnist_adamax_showdown,
        'mlp-normalization': mlp_normalization
        }
    experiments = {name: lambda sc=scheme: run_and_plot(sc) for name, scheme in training_schemes.iteritems()}
    return experiments


if __name__ == '__main__':

    test_mode = False
    experiment = 'mlp-normalization'

    set_test_mode(test_mode)
    run_experiment(experiment, exp_dict=get_experiments(), show_figs = True, print_to_console=True)
Пример #5
0
def test_mnist_relu_vs_spiking():
    set_test_mode(True)
    ExperimentLibrary.mnist_relu_vs_spiking.run()
Пример #6
0
def test_try_hyperparams():
    set_test_mode(True)
    ExperimentLibrary.try_hyperparams.run()
Пример #7
0
    for exp_name, exp in get_experiments().iteritems():
        print 'Running %s' % exp_name
        exp()


def test_demo_mnist_mlp():
    demo_mnist_mlp(test_mode=True)


def test_demo_dbn_mnist():
    demo_dbn_mnist(plot=True, test_mode=True)


def test_demo_rbm_mnist():
    demo_rbm_mnist(plot=True, test_mode=True)


def test_demo_prediction_example():
    compare_example_predictors(test_mode=True)


if __name__ == '__main__':
    set_test_mode(True)

    test_demo_compare_optimizers()
    test_demo_prediction_example()
    test_demo_mnist_mlp()
    test_demo_rbm_mnist()
    test_demo_dbn_mnist()