コード例 #1
0
def fit_autoencoder(image_size=(28, 28),
                    n_image_channels=1,
                    datasets='../data/mnist.pkl.gz',
                    outpath='../output/mnist_autoencoder.params',
                    n_visible=784,
                    n_hidden=500,
                    learning_rate=0.01,
                    corruption_level=0.0,
                    n_epochs=1000,
                    batch_size=20,
                    patience=10000,
                    patience_increase=2,
                    improvement_threshold=0.995):

    index = T.lscalar()
    x = T.dmatrix(name='input')

    encoder = AutoEncoder(np_rng=rng.RandomState(SEED),
                          input=x,
                          th_rng=None,
                          n_visible=n_visible,
                          n_hidden=n_hidden,
                          corruption_level=corruption_level)
    learner = UnsupervisedMSGD(index, x, batch_size, learning_rate,
                               load_data(datasets), outpath, encoder,
                               encoder.cost)
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold)
    display_results(best_validation_error, elapsed_time, epoch)

    return learner
コード例 #2
0
ファイル: autoencoders.py プロジェクト: 121onto/theano-demos
def fit_autoencoder(image_size=(28, 28), n_image_channels=1,
            datasets='../data/mnist.pkl.gz', outpath='../output/mnist_autoencoder.params',
            n_visible=784, n_hidden=500,
            learning_rate=0.01, corruption_level=0.0,
            n_epochs=1000, batch_size=20, patience=10000,
            patience_increase=2, improvement_threshold=0.995):

    index = T.lscalar()
    x = T.dmatrix(name='input')

    encoder = AutoEncoder(
        np_rng=rng.RandomState(SEED),
        input=x,
        th_rng=None,
        n_visible=n_visible,
        n_hidden=n_hidden,
        corruption_level=corruption_level
    )
    learner = UnsupervisedMSGD(
        index,
        x,
        batch_size,
        learning_rate,
        load_data(datasets),
        outpath,
        encoder,
        encoder.cost
    )
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs,
        patience=patience,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_error, elapsed_time, epoch)

    return learner
コード例 #3
0
def fit_stacked_autoencoder(image_size=(28, 28), n_out=10,
            datasets='../data/mnist.pkl.gz', outpath='../output/mnist_autoencoder.params',
            hidden_layer_sizes=[500, 500, 500], corruption_levels=[0.1, 0.2, 0.3],
            learning_rate_encoder=0.001, learning_rate_full=0.1,
            n_epochs_encoder=15, n_epochs_full=1000,
            batch_size_encoder=1, batch_size_full=20,
            patience_encoder=5000, patience_full=5000,
            patience_increase=2, improvement_threshold=0.995):

    n_inputs = reduce(np.multiply, image_size)
    index = T.lscalar(name='input')
    x = T.matrix(name='x')
    y = T.ivector(name='y')
    datasets = load_data(datasets)

    stacked_encoder = StackedAutoEncoder(
        x=x,
        y=y,
        np_rng=rng,
        th_rng=None,
        n_inputs=n_inputs,
        hidden_layer_sizes=hidden_layer_sizes,
        corruption_levels=corruption_levels,
        n_out=n_out
    )
    cost = stacked_encoder.cost(y)

    # pretrain
    for i, encoder in enumerate(stacked_encoder.encoder_layers):
        print("Pre-training encoder layer %i" % i)
        learner = UnsupervisedMSGD(
            index,
            x,
            batch_size_encoder,
            learning_rate_encoder,
            datasets,
            None,
            encoder,
            encoder.cost
        )
        best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
            n_epochs=n_epochs_encoder,
            patience=patience_encoder,
            patience_increase=patience_increase,
            improvement_threshold=improvement_threshold
        )
        print("resuts for pre-training encoder %i" % i)
        display_results(best_validation_error, elapsed_time, epoch)

    print("Fitting full model")
    learner = SupervisedMSGD(
        index,
        x,
        y,
        batch_size_full,
        learning_rate_full,
        datasets,
        outpath,
        stacked_encoder,
        cost
    )
    best_validation_error, best_iter, epoch, elapsed_time = learner.fit(
        n_epochs=n_epochs_full,
        patience=patience_full,
        patience_increase=patience_increase,
        improvement_threshold=improvement_threshold
    )
    display_results(best_validation_error, elapsed_time, epoch)