Beispiel #1
0
def run_mlp():
    # # define the model layers
    # layer1 = Dense(input_size=784, output_size=1000, activation='rectifier')
    # layer2 = Dense(inputs_hook=(1000, layer1.get_outputs()), output_size=1000, activation='rectifier')
    # classlayer3 = SoftmaxLayer(inputs_hook=(1000, layer2.get_outputs()), output_size=10, out_as_probs=False)
    # # add the layers to the prototype
    # mlp = Prototype(layers=[layer1, layer2, classlayer3])

    # test the new way to automatically fill in inputs_hook for models
    mlp = Prototype()
    mlp.add(Dense(input_size=784, output_size=1000, activation='rectifier', noise='dropout'))
    mlp.add(Dense(output_size=1500, activation='tanh', noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
Beispiel #2
0
def run_mlp():
    # # define the model layers
    # layer1 = BasicLayer(input_size=784, output_size=1000, activation='rectifier')
    # layer2 = BasicLayer(inputs_hook=(1000, layer1.get_outputs()), output_size=1000, activation='rectifier')
    # classlayer3 = SoftmaxLayer(inputs_hook=(1000, layer2.get_outputs()), output_size=10, out_as_probs=False)
    # # add the layers to the prototype
    # mlp = Prototype(layers=[layer1, layer2, classlayer3])

    # test the new way to automatically fill in inputs_hook for models
    mlp = Prototype()
    mlp.add(BasicLayer(input_size=784, output_size=1000, activation="rectifier", noise="dropout"))
    mlp.add(BasicLayer(output_size=1500, activation="tanh", noise="dropout"))
    mlp.add(SoftmaxLayer(output_size=10))

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=10)
    optimizer.train()

    test_data, test_labels = mnist.getSubset(subset=TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()
    # use the run function!
    yhat = mlp.run(test_data)
    print("-------")
    print("Prediction: %s" % str(yhat))
    print("Actual:     %s" % str(test_labels.astype("int32")))
def build_model():
    # add layers one-by-one to a Prototype container to build neural net
    # inputs_hook created automatically by Prototype; thus, no need to specify
    mlp = Prototype()
    mlp.add(BasicLayer(input_size=28*28, output_size=512, activation='rectifier', noise='dropout'))
    mlp.add(BasicLayer(output_size=512, activation='rectifier', noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    return mlp
Beispiel #4
0
def sequential_add_layers():
    # This method is to demonstrate adding layers one-by-one to a Prototype container.
    # As you can see, inputs_hook are created automatically by Prototype so we don't need to specify!
    mlp = Prototype()
    mlp.add(BasicLayer(input_size=28*28, output_size=1000, activation='rectifier', noise='dropout', noise_level=0.5))
    mlp.add(BasicLayer(output_size=512, activation='rectifier', noise='dropout', noise_level=0.5))
    mlp.add(SoftmaxLayer(output_size=10))

    return mlp
Beispiel #5
0
def main():
    # First, let's create a simple feedforward MLP with one hidden layer as a Prototype.
    mlp = Prototype()
    mlp.add(
        BasicLayer(input_size=28 * 28,
                   output_size=1000,
                   activation='rectifier',
                   noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    # Now, we get to choose what values we want to monitor, and what datasets we would like to monitor on!
    # Each Model (in our case, the Prototype), has a get_monitors method that will return a useful
    # dictionary of {string_name: monitor_theano_expression} for various computations of the model we might
    # care about. By default, this method returns an empty dictionary - it was the model creator's job to
    # include potential monitor values.
    mlp_monitors = mlp.get_monitors()
    mlp_channel = MonitorsChannel(name="error")
    for name, expression in mlp_monitors.items():
        mlp_channel.add(
            Monitor(name=name,
                    expression=expression,
                    train=True,
                    valid=True,
                    test=True))

    # create some monitors for statistics about the hidden and output weights!
    # let's look at the mean, variance, and standard deviation of the weights matrices.
    weights_channel = MonitorsChannel(name="weights")
    hiddens_1 = mlp[0].get_params()[0]
    hiddens1_mean = T.mean(hiddens_1)
    weights_channel.add(
        Monitor(name="hiddens_mean", expression=hiddens1_mean, train=True))

    hiddens_2 = mlp[1].get_params()[0]
    hiddens2_mean = T.mean(hiddens_2)
    weights_channel.add(
        Monitor(name="out_mean", expression=hiddens2_mean, train=True))

    # create our plot object to do live plotting!
    plot = Plot(bokeh_doc_name="Monitor Tutorial",
                monitor_channels=[mlp_channel, weights_channel],
                open_browser=True)

    # use SGD optimizer
    optimizer = SGD(model=mlp,
                    dataset=MNIST(concat_train_valid=False),
                    n_epoch=500,
                    save_frequency=100,
                    batch_size=600,
                    learning_rate=.01,
                    lr_decay=False,
                    momentum=.9,
                    nesterov_momentum=True)

    # train, with the plot!
    optimizer.train(plot=plot)
def create_mlp():
    # define the model layers
    relu_layer1 = BasicLayer(input_size=784, output_size=1000, activation='rectifier')
    relu_layer2 = BasicLayer(inputs_hook=(1000, relu_layer1.get_outputs()), output_size=1000, activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()), output_size=10, out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer.train()

    test_data, test_labels = mnist.getSubset(TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s",str(preds))
    log.info("actual:    %s",str(test_labels.astype('int32')))
Beispiel #7
0
def sequential_add_layers():
    # This method is to demonstrate adding layers one-by-one to a Prototype container.
    # As you can see, inputs_hook are created automatically by Prototype so we don't need to specify!
    mlp = Prototype()
    mlp.add(
        BasicLayer(input_size=28 * 28,
                   output_size=1000,
                   activation='rectifier',
                   noise='dropout',
                   noise_level=0.5))
    mlp.add(
        BasicLayer(output_size=512,
                   activation='rectifier',
                   noise='dropout',
                   noise_level=0.5))
    mlp.add(SoftmaxLayer(output_size=10))

    return mlp
Beispiel #8
0
def add_list_layers():
    # You can also add lists of layers at a time (or as initialization) to a Prototype! This lets you specify
    # more complex interactions between layers!
    hidden1 = BasicLayer(input_size=28 * 28,
                         output_size=512,
                         activation='rectifier',
                         noise='dropout')

    hidden2 = BasicLayer(inputs_hook=(512, hidden1.get_outputs()),
                         output_size=512,
                         activation='rectifier',
                         noise='dropout')

    class_layer = SoftmaxLayer(inputs_hook=(512, hidden2.get_outputs()),
                               output_size=10)

    mlp = Prototype([hidden1, hidden2, class_layer])
    return mlp
def main():
    # First, let's create a simple feedforward MLP with one hidden layer as a Prototype.
    mlp = Prototype()
    mlp.add(BasicLayer(input_size=28*28, output_size=1000, activation='rectifier', noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    # Now, we get to choose what values we want to monitor, and what datasets we would like to monitor on!
    # Each Model (in our case, the Prototype), has a get_monitors method that will return a useful
    # dictionary of {string_name: monitor_theano_expression} for various computations of the model we might
    # care about. By default, this method returns an empty dictionary - it was the model creator's job to
    # include potential monitor values.
    mlp_monitors = mlp.get_monitors()
    mlp_channel = MonitorsChannel(name="error")
    for name, expression in mlp_monitors.items():
        mlp_channel.add(Monitor(name=name, expression=expression, train=True, valid=True, test=True))

    # create some monitors for statistics about the hidden and output weights!
    # let's look at the mean, variance, and standard deviation of the weights matrices.
    weights_channel = MonitorsChannel(name="weights")
    hiddens_1 = mlp[0].get_params()[0]
    hiddens1_mean = T.mean(hiddens_1)
    weights_channel.add(Monitor(name="hiddens_mean", expression=hiddens1_mean, train=True))

    hiddens_2 = mlp[1].get_params()[0]
    hiddens2_mean = T.mean(hiddens_2)
    weights_channel.add(Monitor(name="out_mean", expression=hiddens2_mean, train=True))

    # create our plot object to do live plotting!
    plot = Plot(bokeh_doc_name="Monitor Tutorial", monitor_channels=[mlp_channel, weights_channel], open_browser=True)

    # use SGD optimizer
    optimizer = SGD(model=mlp,
                    dataset=MNIST(concat_train_valid=False),
                    n_epoch=500,
                    save_frequency=100,
                    batch_size=600,
                    learning_rate=.01,
                    lr_decay=False,
                    momentum=.9,
                    nesterov_momentum=True)

    # train, with the plot!
    optimizer.train(plot=plot)
Beispiel #10
0
    config_root_logger()

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = Dense(inputs=((None, 28 * 28), matrix("x")),
                   outputs=1000,
                   activation='linear')
    layer1_act = Activation(inputs=((None, 1000), layer1.get_outputs()),
                            activation='relu')
    # create the softmax classifier
    layer2 = Softmax(inputs=((None, 1000), layer1_act.get_outputs()),
                     outputs=10,
                     out_as_probs=True)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer1_act, layer2])
    # define the loss function
    loss = Neg_LL(inputs=mlp.get_outputs(),
                  targets=vector("y", dtype="int64"),
                  one_hot=False)

    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer)

    # test it on some images!
    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    from opendeep.log import config_root_logger
    config_root_logger()

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = Dense(inputs=((None, 28*28), matrix("x")),
                   outputs=1000,
                   activation='linear')
    layer1_act = Activation(inputs=((None, 1000), layer1.get_outputs()), activation='relu')
    # create the softmax classifier
    layer2 = Softmax(inputs=((None, 1000), layer1_act.get_outputs()),
                     outputs=10,
                     out_as_probs=True)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer1_act, layer2])
    # define the loss function
    loss = Neg_LL(inputs=mlp.get_outputs(), targets=vector("y", dtype="int64"), one_hot=False)

    #plot the loss
    if BOKEH_AVAILABLE:
        plot = Plot("mlp_mnist", monitor_channels=Monitor("loss", loss.get_loss()), open_browser=True)
    else:
        plot = None

    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer, plot=plot)
Beispiel #12
0
    logger.config_root_logger()
    log = logging.getLogger(__name__)
    log.info("Creating MLP!")

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = BasicLayer(input_size=28 * 28,
                        output_size=1000,
                        activation='relu')
    # create the softmax classifier
    layer2 = SoftmaxLayer(inputs_hook=(1000, layer1.get_outputs()),
                          output_size=10,
                          out_as_probs=False)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer2])
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    # perform training!
    optimizer.train()
    # test it on some images!
    test_data, test_labels = mnist.getSubset(subset=TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()
    # use the run function!
    preds = mlp.run(test_data)
    print('-------')
    print(preds)
    print(test_labels.astype('int32'))
    print()
    print()
Beispiel #13
0
    import logging
    from opendeep.log import config_root_logger
    config_root_logger()
    log = logging.getLogger(__name__)
    log.info("Creating MLP!")

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = Dense(input_size=28 * 28, output_size=1000, activation='relu')
    # create the softmax classifier
    layer2 = SoftmaxLayer(inputs_hook=(1000, layer1.get_outputs()),
                          output_size=10,
                          out_as_probs=False)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer2])
    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer)

    # test it on some images!
    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    preds = mlp.run(test_data)
    print('-------')
    print(preds)
Beispiel #14
0
    # set up the logging environment to display outputs (optional)
    # although this is recommended over print statements everywhere
    import logging
    import opendeep.log.logger as logger
    logger.config_root_logger()
    log = logging.getLogger(__name__)
    log.info("Creating MLP!")

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = BasicLayer(input_size=28*28, output_size=1000, activation='relu')
    # create the softmax classifier
    layer2 = SoftmaxLayer(inputs_hook=(1000, layer1.get_outputs()), output_size=10, out_as_probs=False)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer2])
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    # perform training!
    optimizer.train()
    # test it on some images!
    test_data, test_labels = mnist.getSubset(subset=TEST)
    test_data = test_data[:25].eval()
    test_labels = test_labels[:25].eval()
    # use the run function!
    preds = mlp.run(test_data)
    print('-------')
    print(preds)
    print(test_labels.astype('int32'))
    print()
    print()
Beispiel #15
0
    # set up the logging environment to display outputs (optional)
    # although this is recommended over print statements everywhere
    import logging
    import opendeep.log.logger as logger
    logger.config_root_logger()
    log = logging.getLogger(__name__)
    log.info("Creating MLP!")

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = BasicLayer(input_size=28*28, output_size=1000, activation='relu')
    # create the softmax classifier
    layer2 = SoftmaxLayer(inputs_hook=(1000, layer1.get_outputs()), output_size=10, out_as_probs=False)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer2])
    # make an optimizer to train it (AdaDelta is a good default)
    optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    # perform training!
    optimizer.train()
    # test it on some images!
    test_data = mnist.getDataByIndices(indices=range(25), subset=TEST)
    # use the predict function!
    preds = mlp.predict(test_data)
    print '-------'
    print preds
    print mnist.getLabelsByIndices(indices=range(25), subset=TEST).astype('int32')
    print
    print
    del mnist
    del mlp
    # set up the logging environment to display outputs (optional)
    # although this is recommended over print statements everywhere
    import logging
    from opendeep.log import config_root_logger
    config_root_logger()
    log = logging.getLogger(__name__)
    log.info("Creating MLP!")

    # grab the MNIST dataset
    mnist = MNIST()
    # create the basic layer
    layer1 = Dense(input_size=28*28, output_size=1000, activation='relu')
    # create the softmax classifier
    layer2 = SoftmaxLayer(inputs_hook=(1000, layer1.get_outputs()), output_size=10, out_as_probs=False)
    # create the mlp from the two layers
    mlp = Prototype(layers=[layer1, layer2])
    # make an optimizer to train it (AdaDelta is a good default)
    # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20)
    optimizer = AdaDelta(dataset=mnist, epochs=20)
    # perform training!
    # optimizer.train()
    mlp.train(optimizer)

    # test it on some images!
    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    preds = mlp.run(test_data)
    print('-------')
    print(preds)