Beispiel #1
0
 def test_computational_graph3(self):
     # validate the number of updates found by ComputationGraph
     X = K.placeholder(shape=(None, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=K.relu),
         N.Flatten(outdim=2),
         N.Dense(16),
         N.BatchNorm(),
         N.Dense(10)
     ])
     K.set_training(True)
     y_train = f(X)
     K.set_training(False)
     y_score = f(X)
     self.assertTrue(
         K.get_shape(y_train) == K.get_shape(y_score)
         and K.get_shape(y_score) == (None, 10))
     cc_train = K.ComputationGraph(y_train)
     cc_score = K.ComputationGraph(y_score)
     self.assertTrue(len(cc_score.updates) == 0)
     self.assertTrue(len(cc_train.updates) == 4)
     # create real function
     fn_train = K.function(X, y_train)
     fn_score = K.function(X, y_score)
     shape1 = fn_train(np.random.rand(12, 28, 28, 3)).shape
     shape2 = fn_score(np.random.rand(12, 28, 28, 3)).shape
     self.assertTrue(shape1 == shape2 and shape1 == (12, 10))
Beispiel #2
0
    def test_batch_norm(self):
        K.set_training(True)
        x = K.placeholder((None, 8, 12))
        y = N.BatchNorm()(x)
        f = K.function(x, y)
        z = f(np.random.rand(25, 8, 12))
        self.assertEquals(z.shape, (25, 8, 12))

        # ====== Not training ====== #
        K.set_training(False)
        x = K.placeholder((None, 8, 12))
        y = N.BatchNorm()(x)
        f = K.function(x, y)
        z = f(np.random.rand(25, 8, 12))
        self.assertEquals(z.shape, (25, 8, 12))
Beispiel #3
0
    def test_dropout(self):
        K.set_training(True)
        x = K.placeholder((4, 6))
        f1 = N.Dropout(level=0.5, noise_dims=0, rescale=True)
        y = f1(x)
        f = K.function(x, y)
        z = f(np.ones((4, 6)))
        z = z.tolist()
        self.assertTrue(all(i == z[0] for i in z))

        f1 = N.Dropout(level=0.5, noise_dims=1, rescale=True)
        y = f1(x)
        f = K.function(x, y)
        z = f(np.ones((4, 6)))
        z = z.T.tolist()
        self.assertTrue(all(i == z[0] for i in z))
Beispiel #4
0
    def test_noise(self):
        K.set_training(True)
        x = K.placeholder((2, 3))
        f1 = N.Noise(level=0.5, noise_dims=0, noise_type='gaussian')
        y = f1(x)
        f = K.function(x, y)
        z = f(np.ones((2, 3)))
        z = z.tolist()
        self.assertTrue(all(i == z[0] for i in z))

        f1 = N.Noise(level=0.5, noise_dims=1, noise_type='gaussian')
        y = f1(x)
        f = K.function(x, y)
        z = f(np.ones((2, 3)))
        z = z.T.tolist()
        self.assertTrue(all(i == z[0] for i in z))
Beispiel #5
0
    def test_load_save2(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))

        f = N.Dense(128, activation=K.relu)
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 1, 28, 28)

        self.assertEqual(f1(x).sum(), f3(x).sum())
        self.assertEqual(f2(x).sum(), f4(x).sum())
Beispiel #6
0
    def test_load_save2(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))

        f = N.Dense(128, activation=K.relu)
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 1, 28, 28)

        self.assertEqual(f1(x).sum(), f3(x).sum())
        self.assertEqual(f2(x).sum(), f4(x).sum())
Beispiel #7
0
    def test_load_save1(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))
        f = N.Dense(128, activation=K.relu)
        y = f(X)
        W, b = [K.get_value(p).sum() for p in K.ComputationGraph(y).parameters]
        num_units = f.num_units
        W_init = f.W_init
        b_init = f.b_init
        activation = f.activation

        f = cPickle.loads(cPickle.dumps(f))
        W1, b1 = [K.get_value(p).sum() for p in f.parameters]
        num_units1 = f.num_units
        W_init1 = f.W_init
        b_init1 = f.b_init
        activation1 = f.activation

        self.assertEqual(W1, W)
        self.assertEqual(b1, b)
        self.assertEqual(num_units1, num_units)
        self.assertEqual(W_init1.__name__, W_init.__name__)
        self.assertEqual(b_init.__name__, b_init1.__name__)
        self.assertEqual(activation1, activation)
Beispiel #8
0
    def test_load_save1(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))
        f = N.Dense(128, activation=K.relu)
        y = f(X)
        W, b = [K.get_value(p).sum() for p in K.ComputationGraph(y).parameters]
        num_units = f.num_units
        W_init = f.W_init
        b_init = f.b_init
        activation = f.activation

        f = cPickle.loads(cPickle.dumps(f))
        W1, b1 = [K.get_value(p).sum() for p in f.parameters]
        num_units1 = f.num_units
        W_init1 = f.W_init
        b_init1 = f.b_init
        activation1 = f.activation

        self.assertEqual(W1, W)
        self.assertEqual(b1, b)
        self.assertEqual(num_units1, num_units)
        self.assertEqual(W_init1.__name__, W_init.__name__)
        self.assertEqual(b_init.__name__, b_init1.__name__)
        self.assertEqual(activation1, activation)
Beispiel #9
0
    def test_seq(self):
        X = K.placeholder((None, 28, 28, 1))
        f = N.Sequence([
            N.Conv(8, (3, 3), strides=1, pad='same'),
            N.Dimshuffle(pattern=(0, 3, 1, 2)),
            N.FlattenLeft(outdim=2),
            N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
            N.Dense(128, activation=K.relu),
            N.Dropout(level=0.3, noise_dims=None),
            N.Dense(10, activation=K.softmax)
        ])
        K.set_training(True)
        y = f(X)
        K.set_training(False)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        K.set_training(True)
        y = f(X)
        K.set_training(False)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 28, 28, 1)

        self.assertEquals(f1(x).shape, (2688, 10))
        self.assertEquals(f3(x).shape, (2688, 10))
        self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4))
        self.assertEquals(K.get_shape(y), (None, 10))

        self.assertEquals(f2(x).shape, (12, 28, 28, 1))
        self.assertEquals(f4(x).shape, (12, 28, 28, 1))
        self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4])
        self.assertEquals(K.get_shape(yT), (None, 28, 28, 1))
Beispiel #10
0
    N.Dimshuffle(pattern=(0, 2, 3, 1)),
    N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'),
    N.Dropout(level=0.25),
    N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'),
    N.Dropout(level=0.25),
    N.Flatten(outdim=2),
    N.Dense(512, activation=K.relu),
    N.Dropout(level=0.5),
    N.Dense(10, activation=K.softmax)
],
               debug=True)
K.set_training(True)
y_train = f(X)
K.set_training(False)
y_pred = f(X)

cost_train = K.mean(K.categorical_crossentropy(y_train, y_true))
cost_pred = K.mean(K.categorical_accuracy(y_pred, y_true))
cost_eval = K.mean(K.categorical_crossentropy(y_pred, y_true))
parameters = f.parameters
print('Parameters:', [p.name for p in parameters])

optz = K.optimizers.RMSProp()
updates = optz.get_updates(cost_train, parameters)

print("Build training function ...")
f_train = K.function([X, y_true], cost_train, updates=updates)
Beispiel #11
0
from __future__ import print_function, division, absolute_import

import os
os.environ['ODIN'] = 'float32,gpu,tensorflow'

import numpy as np

from odin import nnet as N, backend as K, fuel as F


# ===========================================================================
# Data and const
# ===========================================================================
ds = F.load_mnist()
print(ds)

# ===========================================================================
# Model
# ===========================================================================
input_desc = [
    N.VariableDesc(shape=(None, 28, 28), dtype='float32', name='X'),
    N.VariableDesc(shape=(None,), dtype='float32', name='y')
]
model = N.get_model_descriptor('ladder1')
K.set_training(True); y_train, cost = model(input_desc)
K.set_training(False); y_score = model()
Beispiel #12
0
from __future__ import print_function, division, absolute_import

import os
os.environ['ODIN'] = 'theano,float32,cpu,seed=12082518'
from six.moves import cPickle

import numpy as np

from odin import backend as K
from odin import nnet as N
from odin import utils as U

np.random.seed(1208)
X = K.placeholder((None, 28, 28, 1))
x = np.random.rand(12, 28, 28, 1)
K.set_training(True)


def create():
    f = N.Sequence([
        N.Conv(8, (3, 3), strides=1, pad='same'),
        N.Dimshuffle(pattern=(0, 3, 1, 2)),
        N.FlattenLeft(outdim=2),
        N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
        N.Dense(128, activation=K.relu),
        N.Dropout(level=0.3, noise_dims=None),
        N.Dense(10, activation=K.softmax)
    ],
                   debug=True)
    y = f(X)
    yT = f.T(y)