Exemplo n.º 1
0
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784,
                        output_size=1000,
                        activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()),
                        output_size=1000,
                        activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()),
                                output_size=10,
                                out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s", str(preds))
    log.info("actual:    %s", str(test_labels.astype('int32')))
Exemplo n.º 2
0
def main(size_key):

    out_vector = True

    # grab the data for this step size, window, and max frequency
    heartbeats = HeartSound(basedir, size_key, one_hot=out_vector)

    # define our model! we are using lstm with mean-pooling and softmax as classification
    hidden_size = int(max(math.floor(sizes[size_key]*.4), 1))
    n_classes = 5

    lstm_layer = LSTM(input_size=sizes[size_key],
                      hidden_size=hidden_size,
                      output_size=1,  # don't care about output size
                      hidden_activation='tanh',
                      inner_hidden_activation='sigmoid',
                      weights_init='uniform',
                      weights_interval='montreal',
                      r_weights_init='orthogonal',
                      clip_recurrent_grads=5.,
                      noise='dropout',
                      noise_level=0.6,
                      direction='bidirectional')

    # mean of the hiddens across timesteps (reduces ndim by 1)
    mean_pooling = T.mean(lstm_layer.get_hiddens(), axis=0)

    # last hiddens as an alternative to mean pooling over time
    last_hiddens = lstm_layer.get_hiddens()[-1]

    # now the classification layer
    softmax_layer = SoftmaxLayer(inputs_hook=(hidden_size, mean_pooling),
                                 output_size=n_classes,
                                 out_as_probs=out_vector)

    # make it into a prototype!
    model = Prototype(layers=[lstm_layer, softmax_layer], outdir='outputs/prototype%s' % size_key)

    # optimizer
    optimizer = RMSProp(dataset=heartbeats,
                        model=model,
                        n_epoch=500,
                        batch_size=10,
                        save_frequency=10,
                        learning_rate=1e-4, #1e-6
                        grad_clip=None,
                        hard_clip=False
                        )

    # monitors
    errors = Monitor(name='error', expression=model.get_monitors()['softmax_error'], train=True, valid=True, test=True)

    # plot the monitor
    plot = Plot('heartbeat %s' % size_key, monitor_channels=[errors], open_browser=True)

    optimizer.train(plot=plot)
Exemplo n.º 3
0
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
Exemplo n.º 4
0
    def testAutoEncoder(self):
        try:
            s = (None, 3)
            x = matrix('xs')
            e = Dense(inputs=(s, x), outputs=int(s[1]*2), activation='sigmoid')
            W = e.get_param("W")
            d = Dense(inputs=e, outputs=s[1], params={'W': W.T}, activation='sigmoid')
            ae = Prototype([e, d])

            x2 = matrix('xs1')
            W2 = d.get_param("W")
            e2 = Dense(inputs=(s, x2), outputs=int(s[1]*2), params={"W": W2.T, "b": e.get_param('b')}, activation='sigmoid')
            W3 = e2.get_param("W")
            d2 = Dense(inputs=e2, outputs=s[1], params={"W": W3.T, 'b': d.get_param('b')}, activation='sigmoid')
            ae2 = Prototype([e2, d2])

            aerun1 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run1 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            self.assertTrue(np.array_equal(aerun1, ae2run1))

            data = np.ones((10,3), dtype='float32')*.1
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.2])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.3])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.4])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.5])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.6])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.7])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.8])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*.9])
            data = np.vstack([data, np.ones((10,3), dtype='float32')*0])
            dataset = NumpyDataset(data)
            sgd = SGD(dataset=dataset, model=ae, loss=BinaryCrossentropy(inputs=ae.get_outputs(), targets=x), epochs=5)
            sgd.train()

            aerun2 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run2 = ae2.run(np.array([[.1,.5,.9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun2, aerun1))
            self.assertFalse(np.array_equal(ae2run2, ae2run1))
            self.assertTrue(np.array_equal(aerun2, ae2run2))

            sgd2 = SGD(dataset=dataset, model=ae2, loss=BinaryCrossentropy(inputs=ae2.get_outputs(), targets=x2), epochs=5)
            sgd2.train()

            aerun3 = ae.run(np.array([[.1,.5,.9]], dtype='float32'))
            ae2run3 = ae2.run(np.array([[.1,.5,.9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun3, aerun2))
            self.assertFalse(np.array_equal(ae2run3, ae2run2))
            self.assertTrue(np.array_equal(aerun3, ae2run3))


        finally:
            del x, e, d, ae, x2, e2, d2, ae2
Exemplo n.º 5
0
def run_mlp():
    # test the new way to automatically fill in inputs for models
    mlp = Prototype()
    x = ((None, 784), matrix("x"))
    mlp.add(Dense(inputs=x, outputs=1000, activation='rectifier'))
    mlp.add(Dense, outputs=1500, activation='tanh')
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    # define our loss to optimize for the model (and the target variable)
    # targets from MNIST are int64 numbers 0-9
    y = lvector('y')
    loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=y, one_hot=False)

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, loss=loss, dataset=mnist, epochs=10)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs, mnist.test_targets
    test_data = test_data[:25]
    test_labels = test_labels[:25]
    # use the run function!
    yhat = mlp.run(test_data)
    print('-------')
    print('Prediction: %s' % str(yhat))
    print('Actual:     %s' % str(test_labels.astype('int32')))
Exemplo n.º 6
0
def sequential_add_layers():
    # This method is to demonstrate adding layers one-by-one to a Prototype container.
    # As you can see, inputs_hook are created automatically by Prototype so we don't need to specify!
    mlp = Prototype()
    mlp.add(Dense(input_size=28*28, output_size=1000, activation='rectifier', noise='dropout', noise_level=0.5))
    mlp.add(Dense(output_size=512, activation='rectifier', noise='dropout', noise_level=0.5))
    mlp.add(SoftmaxLayer(output_size=10))

    return mlp
def create_mlp():
    # define the model layers
    relu_layer1 = Dense(input_size=784, output_size=1000, activation='rectifier')
    relu_layer2 = Dense(inputs_hook=(1000, relu_layer1.get_outputs()), output_size=1000, activation='rectifier')
    class_layer3 = SoftmaxLayer(inputs_hook=(1000, relu_layer2.get_outputs()), output_size=10, out_as_probs=False)
    # add the layers as a Prototype
    mlp = Prototype(layers=[relu_layer1, relu_layer2, class_layer3])

    mnist = MNIST()

    optimizer = AdaDelta(model=mlp, dataset=mnist, epochs=20)
    optimizer.train()

    test_data, test_labels = mnist.test_inputs[:25], mnist.test_targets[:25]

    # use the run function!
    preds = mlp.run(test_data)
    log.info('-------')
    log.info("predicted: %s",str(preds))
    log.info("actual:    %s",str(test_labels.astype('int32')))
def main():
    # First, let's create a simple feedforward MLP with one hidden layer as a Prototype.
    mlp = Prototype()
    mlp.add(
        Dense(input_size=28 * 28,
              output_size=1000,
              activation='rectifier',
              noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    # Now, we get to choose what values we want to monitor, and what datasets we would like to monitor on!
    # Each Model (in our case, the Prototype), has a get_monitors method that will return a useful
    # dictionary of {string_name: monitor_theano_expression} for various computations of the model we might
    # care about. By default, this method returns an empty dictionary - it was the model creator's job to
    # include potential monitor values.
    mlp_monitors = mlp.get_monitors()
    mlp_channel = MonitorsChannel(name="error")
    for name, expression in mlp_monitors.items():
        mlp_channel.add(
            Monitor(name=name,
                    expression=expression,
                    train=True,
                    valid=True,
                    test=True))

    # create some monitors for statistics about the hidden and output weights!
    # let's look at the mean, variance, and standard deviation of the weights matrices.
    weights_channel = MonitorsChannel(name="weights")
    hiddens_1 = mlp[0].get_params()[0]
    hiddens1_mean = T.mean(hiddens_1)
    weights_channel.add(
        Monitor(name="hiddens_mean", expression=hiddens1_mean, train=True))

    hiddens_2 = mlp[1].get_params()[0]
    hiddens2_mean = T.mean(hiddens_2)
    weights_channel.add(
        Monitor(name="out_mean", expression=hiddens2_mean, train=True))

    # create our plot object to do live plotting!
    plot = Plot(bokeh_doc_name="Monitor Tutorial",
                monitor_channels=[mlp_channel, weights_channel],
                open_browser=True)

    # use SGD optimizer
    optimizer = SGD(model=mlp,
                    dataset=MNIST(concat_train_valid=False),
                    epochs=500,
                    save_freq=100,
                    batch_size=600,
                    learning_rate=.01,
                    lr_decay=False,
                    momentum=.9,
                    nesterov_momentum=True)

    # train, with the plot!
    optimizer.train(plot=plot)
Exemplo n.º 9
0
def add_list_layers():
    # You can also add lists of layers at a time (or as initialization) to a Prototype! This lets you specify
    # more complex interactions between layers!
    hidden1 = Dense(input_size=28*28,
                         output_size=512,
                         activation='rectifier',
                         noise='dropout')

    hidden2 = Dense(inputs_hook=(512, hidden1.get_outputs()),
                         output_size=512,
                         activation='rectifier',
                         noise='dropout')

    class_layer = SoftmaxLayer(inputs_hook=(512, hidden2.get_outputs()),
                               output_size=10)

    mlp = Prototype([hidden1, hidden2, class_layer])
    return mlp
def main():
    # First, let's create a simple feedforward MLP with one hidden layer as a Prototype.
    mlp = Prototype()
    mlp.add(Dense(input_size=28*28, output_size=1000, activation='rectifier', noise='dropout'))
    mlp.add(SoftmaxLayer(output_size=10))

    # Now, we get to choose what values we want to monitor, and what datasets we would like to monitor on!
    # Each Model (in our case, the Prototype), has a get_monitors method that will return a useful
    # dictionary of {string_name: monitor_theano_expression} for various computations of the model we might
    # care about. By default, this method returns an empty dictionary - it was the model creator's job to
    # include potential monitor values.
    mlp_monitors = mlp.get_monitors()
    mlp_channel = MonitorsChannel(name="error")
    for name, expression in mlp_monitors.items():
        mlp_channel.add(Monitor(name=name, expression=expression, train=True, valid=True, test=True))

    # create some monitors for statistics about the hidden and output weights!
    # let's look at the mean, variance, and standard deviation of the weights matrices.
    weights_channel = MonitorsChannel(name="weights")
    hiddens_1 = mlp[0].get_params()[0]
    hiddens1_mean = T.mean(hiddens_1)
    weights_channel.add(Monitor(name="hiddens_mean", expression=hiddens1_mean, train=True))

    hiddens_2 = mlp[1].get_params()[0]
    hiddens2_mean = T.mean(hiddens_2)
    weights_channel.add(Monitor(name="out_mean", expression=hiddens2_mean, train=True))

    # create our plot object to do live plotting!
    plot = Plot(bokeh_doc_name="Monitor Tutorial", monitor_channels=[mlp_channel, weights_channel], open_browser=True)

    # use SGD optimizer
    optimizer = SGD(model=mlp,
                    dataset=MNIST(concat_train_valid=False),
                    epochs=500,
                    save_freq=100,
                    batch_size=600,
                    learning_rate=.01,
                    lr_decay=False,
                    momentum=.9,
                    nesterov_momentum=True)

    # train, with the plot!
    optimizer.train(plot=plot)
Exemplo n.º 11
0
from opendeep.models.utils import Noise, Pool2D
from opendeep.monitor import Monitor
from opendeep.optimization.loss import Neg_LL
from opendeep.data import MNIST
from opendeep.data.stream import ModifyStream
from opendeep.optimization import AdaDelta

if __name__ == '__main__':
    # some debugging output to see what is going on under the hood
    config_root_logger()

    #########
    # Model #
    #########
    # build a Prototype container to easily add layers and make a cohesive model!
    lenet = Prototype()
    # need to define a variable for the inputs to this model, as well as the shape
    # (batch, channel, row, col)
    images = T.tensor4('xs')
    images_shape = (None, 1, 28, 28)
    # conv/pool/droput layer 1
    lenet.add(
        Conv2D(inputs=(images_shape, images),
               n_filters=20,
               filter_size=(5, 5),
               border_mode='full',
               activation='relu'))
    lenet.add(Pool2D, size=(2, 2))
    lenet.add(Noise, noise='dropout', noise_level=0.5)
    # conv/pool/droput layer 2
    lenet.add(Conv2D,
Exemplo n.º 12
0
import numpy as np
import scipy.ndimage
import theano.tensor as T
from opendeep import config_root_logger
from opendeep.models import Prototype, Conv2D, Dense, Softmax
from opendeep.models.utils import Noise, Pool2D
from opendeep.optimization.loss import Neg_LL
from opendeep.optimization import AdaDelta
from opendeep.monitor import Monitor
from opendeep.data.stream import ModifyStream
from opendeep.data import ImageDataset
import filter_test

#config_root_logger()

lenet = Prototype()

images = T.tensor4('xs')
images_shape = (None, 1, 480, 640)

lenet.add(Conv2D(inputs=(images_shape, images), n_filters=6, filter_size=(5, 5), border_mode="full", activation="relu"))
lenet.add(Pool2D, size=(2, 2))
lenet.add(Noise, noise="dropout", noise_level=0.5)

lenet.add(Conv2D, n_filters=20, filter_size=(5, 5), border_mode="full", activation="relu")
lenet.add(Pool2D, size=(2, 2))
lenet.add(Noise, noise="dropout", noise_level=0.5)

dense_input = lenet.models[-1].get_outputs().flatten(2)
dense_in_shape = lenet.models[-1].output_size[:1] + (np.prod(lenet.models[-1].output_size[1:]), )
Exemplo n.º 13
0
    def testAutoEncoder(self):
        try:
            s = (None, 3)
            x = matrix('xs')
            e = Dense(inputs=(s, x),
                      outputs=int(s[1] * 2),
                      activation='sigmoid')
            W = e.get_param("W")
            d = Dense(inputs=e,
                      outputs=s[1],
                      params={'W': W.T},
                      activation='sigmoid')
            ae = Prototype([e, d])

            x2 = matrix('xs1')
            W2 = d.get_param("W")
            e2 = Dense(inputs=(s, x2),
                       outputs=int(s[1] * 2),
                       params={
                           "W": W2.T,
                           "b": e.get_param('b')
                       },
                       activation='sigmoid')
            W3 = e2.get_param("W")
            d2 = Dense(inputs=e2,
                       outputs=s[1],
                       params={
                           "W": W3.T,
                           'b': d.get_param('b')
                       },
                       activation='sigmoid')
            ae2 = Prototype([e2, d2])

            aerun1 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run1 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            self.assertTrue(np.array_equal(aerun1, ae2run1))

            data = np.ones((10, 3), dtype='float32') * .1
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .2])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .3])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .4])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .5])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .6])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .7])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .8])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * .9])
            data = np.vstack([data, np.ones((10, 3), dtype='float32') * 0])
            dataset = NumpyDataset(data)
            sgd = SGD(dataset=dataset,
                      model=ae,
                      loss=BinaryCrossentropy(inputs=ae.get_outputs(),
                                              targets=x),
                      epochs=5)
            sgd.train()

            aerun2 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run2 = ae2.run(np.array([[.1, .5, .9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun2, aerun1))
            self.assertFalse(np.array_equal(ae2run2, ae2run1))
            self.assertTrue(np.array_equal(aerun2, ae2run2))

            sgd2 = SGD(dataset=dataset,
                       model=ae2,
                       loss=BinaryCrossentropy(inputs=ae2.get_outputs(),
                                               targets=x2),
                       epochs=5)
            sgd2.train()

            aerun3 = ae.run(np.array([[.1, .5, .9]], dtype='float32'))
            ae2run3 = ae2.run(np.array([[.1, .5, .9]], dtype='float32'))

            self.assertFalse(np.array_equal(aerun3, aerun2))
            self.assertFalse(np.array_equal(ae2run3, ae2run2))
            self.assertTrue(np.array_equal(aerun3, ae2run3))

        finally:
            del x, e, d, ae, x2, e2, d2, ae2
Exemplo n.º 14
0
from opendeep.models.utils import Noise, Pool2D
from opendeep.monitor import Monitor
from opendeep.optimization.loss import Neg_LL
from opendeep.data import MNIST
from opendeep.data.stream import ModifyStream
from opendeep.optimization import AdaDelta

if __name__ == '__main__':
    # some debugging output to see what is going on under the hood
    config_root_logger()

    #########
    # Model #
    #########
    # build a Prototype container to easily add layers and make a cohesive model!
    lenet = Prototype()
    # need to define a variable for the inputs to this model, as well as the shape
    # (batch, channel, row, col)
    images = T.tensor4('xs')
    images_shape = (None, 1, 28, 28)
    # conv/pool/droput layer 1
    lenet.add(
        Conv2D(inputs=(images_shape, images),
               n_filters=20, filter_size=(5, 5), border_mode='full', activation='relu')
    )
    lenet.add(Pool2D, size=(2, 2))
    lenet.add(Noise, noise='dropout', noise_level=0.5)
    # conv/pool/droput layer 2
    lenet.add(Conv2D, n_filters=50, filter_size=(5, 5), border_mode='full', activation='relu')
    lenet.add(Pool2D, size=(2, 2))
    lenet.add(Noise, noise='dropout', noise_level=0.5)
Exemplo n.º 15
0
    def testLeNet(self):
        try:
            # quick and dirty way to create a model from arbitrary layers
            lenet = Prototype(outdir=None)
            # our input is going to be 4D tensor of images with shape (batch_size, 1, 28, 28)
            x = ((None, 1, 28, 28), ftensor4('x'))
            # our first convolutional layer
            lenet.add(
                Conv2D(inputs=x, n_filters=20, filter_size=(5, 5),
                       outdir=None))
            # our first pooling layer, automatically hooking inputs to the previous convolutional outputs
            lenet.add(Pool2D, size=(2, 2))
            # our second convolutional layer
            lenet.add(Conv2D, n_filters=50, filter_size=(5, 5), outdir=None)
            # our second pooling layer
            lenet.add(Pool2D, size=(2, 2))
            # now we need to flatten the 4D convolution outputs into 2D matrix (just flatten the trailing dimensions)
            lenet.add(Flatten, ndim=2)
            # one dense hidden layer
            lenet.add(Dense, outputs=500, activation='tanh', outdir=None)
            # hook a softmax classification layer, outputting the probabilities.
            lenet.add(Softmax, outputs=10, out_as_probs=True, outdir=None)

            # Grab the MNIST dataset
            data = MNIST(path="../../../datasets/{!s}".format(mnist_name),
                         concat_train_valid=False,
                         flatten=False)
            # define our loss to optimize for the model (and the target variable)
            # targets from MNIST are int64 numbers 0-9
            y = lvector('y')
            loss = Neg_LL(inputs=lenet.get_outputs(), targets=y, one_hot=False)
            # monitor
            error_monitor = Monitor(
                name='error',
                expression=mean(neq(lenet.models[-1].y_pred, y)),
                valid=True,
                test=True,
                out_service=FileService('outputs/lenet_error.txt'))
            # optimize our model to minimize loss given the dataset using SGD
            optimizer = SGD(model=lenet,
                            dataset=data,
                            loss=loss,
                            epochs=10,
                            batch_size=128,
                            learning_rate=.1,
                            momentum=False)
            print("Training LeNet...")
            optimizer.train(monitor_channels=error_monitor)

            def test_subset(filename, expected, conf=0.001):
                with open(filename, 'r') as f:
                    errs = [float(err) for err in f]
                for i, (err, exp) in enumerate(zip(errs, expected)):
                    if i == 0:
                        c = conf * 10
                    else:
                        c = conf
                    self.assertTrue(
                        exp - c < round(err, 4) < exp + c,
                        "Errors: {!s} and Expected: {!s} -- Error at {!s} and {!s}"
                        .format(errs, expected, err, exp))

            test_subset('outputs/lenet_error_train.txt', [
                .0753, .0239, .0159, .0113, .0088, .0064, .0050, .0037, .0026,
                .0019
            ])
            test_subset('outputs/lenet_error_valid.txt', [
                .0283, .0209, .0170, .0151, .0139, .0129, .0121, .0118, .0112,
                .0113
            ])
            test_subset('outputs/lenet_error_test.txt', [
                .0319, .0213, .0167, .0134, .0122, .0119, .0116, .0107, .0104,
                .0105
            ])
            shutil.rmtree('outputs/')

        finally:
            if 'lenet' in locals():
                del lenet
            if 'data' in locals():
                del data
            if 'y' in locals():
                del y
            if 'x' in locals():
                del x
            if 'loss' in locals():
                del loss
            if 'optimizer' in locals():
                del optimizer
Exemplo n.º 16
0
from opendeep.data import Dataset

print "Getting data..."
# data = MNIST()
df = pd.read_csv("train.csv")
features = np.array(df.ix[:,1:])
targets = np.array(df["label"])
features_train, features_test, targets_train, targets_test = train_test_split(features,targets, test_size=0.4, random_state=4)

data = Dataset(features_train, train_targets=targets_train,
                 test_inputs=features_test, test_targets=targets_test)

print "Creating model..."
in_shape = (None, 28*28)
in_var = matrix('xs')
mlp = Prototype()
mlp.add(Dense(inputs=(in_shape, in_var), outputs=512, activation='relu'))
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Dense, outputs=512, activation='relu')
mlp.add(Noise, noise='dropout', noise_level=0.5)
mlp.add(Softmax, outputs=10, out_as_probs=False)

print "Training..."
target_var = lvector('ys')
loss = Neg_LL(inputs=mlp.models[-1].p_y_given_x, targets=target_var, one_hot=False)

optimizer = AdaDelta(model=mlp, loss=loss, dataset=data, epochs=10)
optimizer.train()

print "Predicting..."
predictions = mlp.run(data.test_inputs)
Exemplo n.º 17
0
from opendeep.models import Prototype, Dense, Softmax
from opendeep.models.utils import Noise
from opendeep.monitor import Monitor
from opendeep.optimization.loss import Neg_LL
from opendeep.data import MNIST
from opendeep.optimization import AdaDelta

if __name__ == '__main__':
    # some debugging output to see what is going on under the hood
    config_root_logger()

    #########
    # Model #
    #########
    # build a Prototype container to easily add layers and make a cohesive model!
    mlp = Prototype()
    # need to define a variable for the inputs to this model, as well as the shape
    # we are doing minibatch training (where we don't know the minibatch size), and the image is a (784,) array.
    x = T.matrix('xs')
    x_shape = (None, 28*28)
    # add our first dense (fully-connected) layer!
    mlp.add(Dense(inputs=(x_shape, x), outputs=500, activation='tanh'))
    # noise is used to regularize the layer from overfitting to data (helps generalization)
    # when adding subsequent layers, we can simply provide the class type and any other kwargs
    # (omitting the `inputs` kwarg) and it will route the previous layer's outputs as the current
    # layer's inputs.
    mlp.add(Noise, noise='dropout', noise_level=0.5)
    # add our classification layer
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    ################
Exemplo n.º 18
0
def build_lenet():
    # quick and dirty way to create a model from arbitrary layers
    lenet = Prototype()

    # our input is going to be 4D tensor of images with shape (batch_size, 1, 28, 28)
    x = ((None, 1, 28, 28), tensor4('x'))

    # our first convolutional layer
    lenet.add(
        Conv2D(inputs=x, n_filters=20, filter_size=(5, 5))
    )
    # our first pooling layer, automatically hooking inputs to the previous convolutional outputs
    lenet.add(
        Pool2D, size=(2, 2)
    )
    # our second convolutional layer
    lenet.add(
        Conv2D, n_filters=50, filter_size=(5, 5)
    )
    # our second pooling layer
    lenet.add(
        Pool2D, size=(2, 2)
    )
    # now we need to flatten the 4D convolution outputs into 2D matrix (just flatten the trailing dimensions)
    lenet.add(
        Flatten, ndim=2
    )
    # one dense hidden layer
    lenet.add(
        Dense, outputs=500, activation='tanh'
    )
    # hook a softmax classification layer, outputting the probabilities.
    lenet.add(
        Softmax, outputs=10, out_as_probs=True
    )

    return lenet
Exemplo n.º 19
0
from opendeep.models import Prototype, Dense, Softmax
from opendeep.models.utils import Noise
from opendeep.monitor import Monitor
from opendeep.optimization.loss import Neg_LL
from opendeep.data import MNIST
from opendeep.optimization import AdaDelta

if __name__ == '__main__':
    # some debugging output to see what is going on under the hood
    config_root_logger()

    #########
    # Model #
    #########
    # build a Prototype container to easily add layers and make a cohesive model!
    mlp = Prototype()
    # need to define a variable for the inputs to this model, as well as the shape
    # we are doing minibatch training (where we don't know the minibatch size), and the image is a (784,) array.
    x = T.matrix('xs')
    x_shape = (None, 28 * 28)
    # add our first dense (fully-connected) layer!
    mlp.add(Dense(inputs=(x_shape, x), outputs=500, activation='tanh'))
    # noise is used to regularize the layer from overfitting to data (helps generalization)
    # when adding subsequent layers, we can simply provide the class type and any other kwargs
    # (omitting the `inputs` kwarg) and it will route the previous layer's outputs as the current
    # layer's inputs.
    mlp.add(Noise, noise='dropout', noise_level=0.5)
    # add our classification layer
    mlp.add(Softmax, outputs=10, out_as_probs=False)

    ################
Exemplo n.º 20
0
    def testLeNet(self):
        try:
            # quick and dirty way to create a model from arbitrary layers
            lenet = Prototype(outdir=None)
            # our input is going to be 4D tensor of images with shape (batch_size, 1, 28, 28)
            x = ((None, 1, 28, 28), ftensor4('x'))
            # our first convolutional layer
            lenet.add(
                Conv2D(inputs=x, n_filters=20, filter_size=(5, 5), outdir=None)
            )
            # our first pooling layer, automatically hooking inputs to the previous convolutional outputs
            lenet.add(
                Pool2D, size=(2, 2)
            )
            # our second convolutional layer
            lenet.add(
                Conv2D, n_filters=50, filter_size=(5, 5), outdir=None
            )
            # our second pooling layer
            lenet.add(
                Pool2D, size=(2, 2)
            )
            # now we need to flatten the 4D convolution outputs into 2D matrix (just flatten the trailing dimensions)
            lenet.add(
                Flatten, ndim=2
            )
            # one dense hidden layer
            lenet.add(
                Dense, outputs=500, activation='tanh', outdir=None
            )
            # hook a softmax classification layer, outputting the probabilities.
            lenet.add(
                Softmax, outputs=10, out_as_probs=True, outdir=None
            )

            # Grab the MNIST dataset
            data = MNIST(path="../../../datasets/{!s}".format(mnist_name), concat_train_valid=False, flatten=False)
            # define our loss to optimize for the model (and the target variable)
            # targets from MNIST are int64 numbers 0-9
            y = lvector('y')
            loss = Neg_LL(inputs=lenet.get_outputs(), targets=y, one_hot=False)
            # monitor
            error_monitor = Monitor(name='error', expression=mean(neq(lenet.models[-1].y_pred, y)), valid=True,
                                    test=True, out_service=FileService('outputs/lenet_error.txt'))
            # optimize our model to minimize loss given the dataset using SGD
            optimizer = SGD(model=lenet,
                            dataset=data,
                            loss=loss,
                            epochs=10,
                            batch_size=128,
                            learning_rate=.1,
                            momentum=False)
            print("Training LeNet...")
            optimizer.train(monitor_channels=error_monitor)

            def test_subset(filename, expected, conf=0.001):
                with open(filename, 'r') as f:
                    errs = [float(err) for err in f]
                for i, (err, exp) in enumerate(zip(errs, expected)):
                    if i == 0:
                        c = conf*10
                    else:
                        c = conf
                    self.assertTrue(exp-c < round(err, 4) < exp+c,
                                    "Errors: {!s} and Expected: {!s} -- Error at {!s} and {!s}".format(
                                        errs, expected, err, exp)
                                    )

            test_subset('outputs/lenet_error_train.txt',
                        [.0753,
                         .0239,
                         .0159,
                         .0113,
                         .0088,
                         .0064,
                         .0050,
                         .0037,
                         .0026,
                         .0019]
                        )
            test_subset('outputs/lenet_error_valid.txt',
                        [.0283,
                         .0209,
                         .0170,
                         .0151,
                         .0139,
                         .0129,
                         .0121,
                         .0118,
                         .0112,
                         .0113]
                        )
            test_subset('outputs/lenet_error_test.txt',
                        [.0319,
                         .0213,
                         .0167,
                         .0134,
                         .0122,
                         .0119,
                         .0116,
                         .0107,
                         .0104,
                         .0105]
                        )
            shutil.rmtree('outputs/')

        finally:
            if 'lenet' in locals():
                del lenet
            if 'data' in locals():
                del data
            if 'y' in locals():
                del y
            if 'x' in locals():
                del x
            if 'loss' in locals():
                del loss
            if 'optimizer' in locals():
                del optimizer
Exemplo n.º 21
0
def build_lenet():
    # quick and dirty way to create a model from arbitrary layers
    lenet = Prototype()

    # our input is going to be 4D tensor of images with shape (batch_size, 1, 28, 28)
    x = ((None, 1, 28, 28), tensor4('x'))

    # our first convolutional layer
    lenet.add(Conv2D(inputs=x, n_filters=20, filter_size=(5, 5)))
    # our first pooling layer, automatically hooking inputs to the previous convolutional outputs
    lenet.add(Pool2D, size=(2, 2))
    # our second convolutional layer
    lenet.add(Conv2D, n_filters=50, filter_size=(5, 5))
    # our second pooling layer
    lenet.add(Pool2D, size=(2, 2))
    # now we need to flatten the 4D convolution outputs into 2D matrix (just flatten the trailing dimensions)
    lenet.add(Flatten, ndim=2)
    # one dense hidden layer
    lenet.add(Dense, outputs=500, activation='tanh')
    # hook a softmax classification layer, outputting the probabilities.
    lenet.add(Softmax, outputs=10, out_as_probs=True)

    return lenet
Exemplo n.º 22
0
def build_lenet():
    # quick and dirty way to create a model from arbitrary layers
    lenet = Prototype()

    # our input is going to be 4D tensor of images with shape (batch_size, 1, 28, 28)
    x = ((None, 1, 28, 28), tensor4('x'))

    # our first convolutional layer
    lenet.add(
        Conv2D(inputs=x, n_filters=20, filter_size=(5, 5))
    )
    # our first pooling layer, automatically hooking inputs to the previous convolutional outputs
    lenet.add(
        Pool2D, size=(2, 2)
    )
    # our second convolutional layer
    lenet.add(
        Conv2D, n_filters=50, filter_size=(5, 5)
    )
    # our second pooling layer
    lenet.add(
        Pool2D, size=(2, 2)
    )

    # now we need to flatten the 4D convolution outputs into 2D matrix (just flatten the trailing dimensions)
    dense_input = lenet.models[-1].get_outputs().flatten(2)
    # redefine the size appropriately for flattening (since we are doing a Theano modification)
    dense_input_shape = (None, np.prod(lenet.models[-1].output_size[1:]))
    # pass this flattened matrix as the input to a Dense layer!
    lenet.add(
        Dense(
            inputs=[(dense_input_shape, dense_input)],
            outputs=500,
            activation='tanh'
        )
    )
    # automatically hook a softmax classification layer, outputting the probabilities.
    lenet.add(
        Softmax, outputs=10, out_as_probs=True
    )

    return lenet