Пример #1
0
def gender(X, f, **kwargs):
    nb_gender = kwargs.get('nb_gender', 4)
    if f is None:
        f = N.Sequence([
            N.Dimshuffle(pattern=(0, 1, 2, 'x')),
            N.Conv(num_filters=32,
                   filter_size=3,
                   strides=1,
                   b_init=None,
                   pad='valid'),
            N.BatchNorm(activation=K.relu),
            N.Pool(pool_size=2, mode='avg'),
            N.Conv(num_filters=64,
                   filter_size=3,
                   strides=1,
                   b_init=None,
                   pad='valid'),
            N.BatchNorm(activation=K.relu),
            N.Pool(pool_size=2, mode='avg'),
            N.Flatten(outdim=3),
            N.Dense(num_units=512, b_init=None),
            N.BatchNorm(axes=(0, 1)),
            N.AutoRNN(num_units=128,
                      rnn_mode='gru',
                      num_layers=2,
                      input_mode='linear',
                      direction_mode='unidirectional'),
            N.Flatten(outdim=2),
            N.Dense(num_units=nb_gender, activation=K.softmax)
        ],
                       debug=True)
    return f(X), f
Пример #2
0
def cnn(X, y):
  nb_classes = y.shape.as_list()[-1]
  with N.args_scope(['Conv', dict(b_init=None, activation=K.linear)],
                    ['BatchNorm', dict(activation=K.relu)]):
    f = N.Sequence([
        N.Dimshuffle(pattern=(0, 2, 3, 1)),
        N.Conv(32, (3, 3), pad='same', stride=(1, 1)),
        N.BatchNorm(),
        N.Conv(32, (3, 3), pad='same', stride=(1, 1),
               b_init=0, activation=K.relu),
        N.Pool(pool_size=(2, 2), strides=None, mode='max'),
        N.Dropout(level=0.25),
        #
        N.Conv(64, (3, 3), pad='same', stride=(1, 1)),
        N.BatchNorm(),
        N.Conv(64, (3, 3), pad='same', stride=(1, 1),
               b_init=0., activation=K.relu),
        N.Pool(pool_size=(2, 2), strides=None, mode='max'),
        N.Dropout(level=0.25),
        #
        N.Flatten(outdim=2),
        N.Dense(512, activation=K.relu),
        N.Dropout(level=0.5),
        N.Dense(nb_classes, activation=K.linear)
    ], debug=1)
  logit = f(X)
  prob = tf.nn.softmax(logit)
  return {'logit': logit, 'prob': prob}
Пример #3
0
    def test_seq(self):
        X = K.placeholder((None, 28, 28, 1))
        f = N.Sequence([
            N.Conv(8, (3, 3), strides=1, pad='same'),
            N.Dimshuffle(pattern=(0, 3, 1, 2)),
            N.Flatten(outdim=2),
            N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
            N.Dense(128, activation=tf.nn.relu),
            N.Dropout(level=0.3, noise_dims=None),
            N.Dense(10, activation=tf.nn.softmax)
        ])
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y, defaults={K.is_training(): True})
        f2 = K.function(X, yT, defaults={K.is_training(): False})

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y, defaults={K.is_training(): True})
        f4 = K.function(X, yT, defaults={K.is_training(): False})

        x = np.random.rand(12, 28, 28, 1)

        self.assertEquals(f1(x).shape, (2688, 10))
        self.assertEquals(f3(x).shape, (2688, 10))
        self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4))
        self.assertEquals(y.shape.as_list(), (None, 10))

        self.assertEquals(f2(x).shape, (12, 28, 28, 1))
        self.assertEquals(f4(x).shape, (12, 28, 28, 1))
        self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4])
        self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
Пример #4
0
 def test_computational_graph3(self):
     # validate the number of updates found by ComputationGraph
     X = K.placeholder(shape=(None, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=K.relu),
         N.Flatten(outdim=2),
         N.Dense(16),
         N.BatchNorm(),
         N.Dense(10)
     ])
     K.set_training(True)
     y_train = f(X)
     K.set_training(False)
     y_score = f(X)
     self.assertTrue(
         K.get_shape(y_train) == K.get_shape(y_score)
         and K.get_shape(y_score) == (None, 10))
     cc_train = K.ComputationGraph(y_train)
     cc_score = K.ComputationGraph(y_score)
     self.assertTrue(len(cc_score.updates) == 0)
     self.assertTrue(len(cc_train.updates) == 4)
     # create real function
     fn_train = K.function(X, y_train)
     fn_score = K.function(X, y_score)
     shape1 = fn_train(np.random.rand(12, 28, 28, 3)).shape
     shape2 = fn_score(np.random.rand(12, 28, 28, 3)).shape
     self.assertTrue(shape1 == shape2 and shape1 == (12, 10))
Пример #5
0
 def odin_net2():
     "CNN"
     f = N.Sequence([
         N.Dimshuffle((0, 1, 2, 'x')),
         N.Conv(12, (3, 3), strides=(1, 1), pad='same',
             untie_biases=False,
             W_init=random(3, 3, 1, 12),
             activation=K.relu),
         N.Pool(pool_size=(2, 2), strides=None, mode='max',
                ignore_border=True),
         N.Conv(16, (3, 3), strides=(1, 1), pad='same',
             untie_biases=False,
             W_init=random(3, 3, 12, 16),
             activation=K.sigmoid),
         N.Dimshuffle((0, 3, 1, 2))
     ])
     return X1, f(X1)
Пример #6
0
    def test_dilatedConv(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), dilation=(2, 2))
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 24, 24, 16))
        self.assertEquals(y.shape.as_list(), [None, 24, 24, 16])
Пример #7
0
    def test_conv3D(self):
        x = K.placeholder((None, 28, 28, 28, 3))
        f1 = N.Conv(16, (3, 3, 3), strides=1, pad='valid')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 28, 3))

        self.assertEquals(z.shape, (12, 26, 26, 26, 16))
        self.assertEquals(y.shape.as_list(), [None, 26, 26, 26, 16])
Пример #8
0
    def test_conv_deconv_transpose(self):
        def feval(X, y):
            f = K.function(X, y)
            shape = (np.random.randint(8, 18), ) + tuple(X.shape.as_list()[1:])
            x = np.random.rand(*shape)
            return f(x)

        prog = Progbar(target=2 * 3 * 3 * 2 * 2, print_report=True)
        for X in (K.placeholder(shape=(None, 13, 12, 25)),
                  K.placeholder(shape=(None, 13, 12, 8, 25))):
            for strides in (1, 2, 3):
                for filter_size in (3, 4, 5):
                    for num_filters in (8, 25):
                        for pad in ("same", "valid"):
                            for dilation in (1, ):
                                # ====== progress ====== #
                                prog['test'] = "#Dim:%d;Stride:%d;Filter:%d;Channel:%d;Pad:%s" % \
                                    (X.shape.ndims, strides, filter_size, num_filters, pad)
                                prog.add(1)
                                # ====== test Conv ====== #
                                f = N.Conv(num_filters=num_filters,
                                           filter_size=filter_size,
                                           pad=pad,
                                           strides=strides,
                                           activation=tf.nn.relu,
                                           dilation=dilation)
                                fT = f.T
                                y = f(X)
                                self.assertEqual(
                                    feval(X, y).shape[1:],
                                    tuple(y.shape.as_list()[1:]))
                                yT = fT(y)
                                self.assertEqual(
                                    feval(X, yT).shape[1:],
                                    tuple(yT.shape.as_list()[1:]))
                                self.assertEqual(X.shape.as_list(),
                                                 yT.shape.as_list())
                                # ====== test Transpose ====== #
                                f = N.TransposeConv(num_filters=num_filters,
                                                    filter_size=filter_size,
                                                    pad=pad,
                                                    strides=strides,
                                                    activation=K.relu,
                                                    dilation=dilation)
                                fT = f.T
                                y = f(X)
                                self.assertEqual(
                                    feval(X, y).shape[1:],
                                    tuple(y.shape.as_list()[1:]))
                                yT = fT(y)
                                self.assertEqual(
                                    feval(X, yT).shape[1:],
                                    tuple(yT.shape.as_list()[1:]))
                                self.assertEqual(X.shape.as_list(),
                                                 yT.shape.as_list())
Пример #9
0
 def test_slice_ops(self):
     X = K.placeholder(shape=(None, 28, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=tf.nn.relu),
         N.Flatten(outdim=4)[:, 8:12, 18:25, 13:],
     ])
     y = f(X)
     fn = K.function(X, y)
     self.assertTrue(
         fn(np.random.rand(12, 28, 28, 28, 3)).shape[1:] == tuple(
             y.shape.as_list()[1:]))
     self.assertEqual(y.shape.as_list()[1:], [4, 7, 883])
Пример #10
0
    def test_conv2D(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), strides=(2, 2), pad='same')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 14, 14, 16))
        self.assertEquals(y.shape.as_list(), [None, 14, 14, 16])

        # ====== transpose convolution ====== #
        y = f1.T(y)
        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))
        self.assertEquals(z.shape, (12, 28, 28, 3))
        self.assertEquals(y.shape.as_list(), [None, 28, 28, 3])
Пример #11
0
    def test_load_save3(self):
        X = K.placeholder(shape=(None, 28, 28))
        ops = N.Sequence([
            N.Dimshuffle(pattern=(0, 1, 2, 'x')),
            N.Conv(8, (3, 3), strides=(1, 1), pad='same', activation=K.relu),
            K.pool2d,
            N.Flatten(outdim=2),
            N.Dense(64, activation=K.relu),
            N.Dense(10, activation=K.softmax)
        ])
        y = ops(X)
        f1 = K.function(X, y)

        ops_ = cPickle.loads(
            cPickle.dumps(ops, protocol=cPickle.HIGHEST_PROTOCOL))
        y_ = ops_(X)
        f2 = K.function(X, y_)

        x = np.random.rand(32, 28, 28)
        self.assertEqual(np.sum(f1(x) - f2(x)), 0.)
Пример #12
0
def create():
    f = N.Sequence([
        N.Conv(8, (3, 3), strides=1, pad='same'),
        N.Dimshuffle(pattern=(0, 3, 1, 2)),
        N.FlattenLeft(outdim=2),
        N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
        N.Dense(128, activation=K.relu),
        N.Dropout(level=0.3, noise_dims=None),
        N.Dense(10, activation=K.softmax)
    ],
                   debug=True)
    y = f(X)
    yT = f.T(y)
    f1 = K.function(X, y)
    f2 = K.function(X, yT)
    cPickle.dump(f, open(U.get_modelpath('dummy.ai', override=True), 'w'))

    _ = f1(x)
    print(_.shape, _.sum())
    _ = f2(x)
    print(_.shape, _.sum())
Пример #13
0
E = tk.embed(embedding)
# these numbers must be the same for all time
print('Tokenizer:', np.sum(E), np.sum(X_train), np.sum(y_train),
      np.sum(X_valid), np.sum(y_valid))
# ===========================================================================
# Building model
# ===========================================================================
X = K.placeholder(shape=(None, MAX_SEQ_LEN), dtype='int32', name='X')
y = K.placeholder(shape=(None, nb_labels), dtype='float32', name='y')

f = N.Sequence([
    N.Embedding(tk.nb_words, embedding_dims, W_init=E),
    N.Dimshuffle(pattern=(0, 1, 'x', 2)),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
           strides=1,
           pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
           strides=1,
           pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
           strides=1,
           pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(35, 1), pad='valid', mode='max'),
Пример #14
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError(
            "You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32,
                   filter_size=3,
                   strides=1,
                   pad='valid',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Conv(num_filters=64,
                   filter_size=5,
                   strides=2,
                   pad='same',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Dropout(level=0.1),
            N.Flatten(outdim=2),
            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ],
                                 debug=True,
                                 name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64,
                            filter_size=3,
                            strides=1,
                            pad='valid',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=32,
                            filter_size=5,
                            strides=2,
                            pad='same',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=1,
                            filter_size=13,
                            strides=3,
                            pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),
            N.Flatten(outdim=3)
        ],
                                  debug=True,
                                  name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)),
               name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)
Пример #15
0
                        name='X_train')
X_score = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:],
                        name='X_score')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')

# ===========================================================================
# Build model
# ===========================================================================
f = N.Sequence(
    [
        N.Embedding(max_features, embedding_size),
        N.Dropout(0.25),
        N.Dimshuffle(pattern=(0, 1, 'x', 2)),  # convolution on time dimension
        N.Conv(nb_filter,
               filter_size=(filter_length, 1),
               pad='valid',
               stride=(1, 1),
               activation=K.relu),
        N.Pool(pool_size=(pool_length, 1), mode='max'),
        N.Flatten(outdim=3),
        N.Merge(
            [
                N.Dense(lstm_output_size, activation=K.linear,
                        name='ingate'),  # input-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,
                        name='forgetgate'),  # forget-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,
                        name='cellupdate'),  # cell-update
                N.Dense(lstm_output_size, activation=K.linear,
Пример #16
0
# ===========================================================================
# Create model
# ===========================================================================
inputs = [K.placeholder(shape=(None,) + shape[1:], dtype='float32', name='input%d' % i)
          for i, shape in enumerate(as_tuple_of_shape(train.shape))]
X = inputs[0]
y = inputs[1]
print("Inputs:", ctext(inputs, 'cyan'))
# ====== create the networks ====== #
with N.args_scope(
    [('Conv', 'Dense'), dict(b_init=None, activation=K.linear, pad='same')],
        ['BatchNorm', dict(activation=K.relu)]):
  f = N.Sequence([
      N.Dimshuffle(pattern=(0, 1, 2, 'x')),

      N.Conv(num_filters=32, filter_size=(9, 7)), N.BatchNorm(),
      N.Pool(pool_size=(3, 2), strides=2),
      N.Conv(num_filters=64, filter_size=(5, 3)), N.BatchNorm(),
      N.Pool(pool_size=(3, 1), strides=(2, 1), name='PoolOutput1'),
      N.Conv(num_filters=64, filter_size=(5, 3)), N.BatchNorm(),
      N.Pool(pool_size=(3, 2), strides=(2, 2), name='PoolOutput2'),

      N.Flatten(outdim=2),

      N.Dense(512, name="LatentDense"), N.BatchNorm(),
      N.Dense(512), N.BatchNorm(),

      N.Dense(n_classes)
  ], debug=1)
# ====== create outputs ====== #
y_logit = f(X)
Пример #17
0
train.set_recipes(recipes)
valid.set_recipes(recipes)
test.set_recipes(recipes)
# ===========================================================================
# Create model
# ===========================================================================
inputs = [
    K.placeholder(shape=(None, ) + shape[1:],
                  dtype='float32',
                  name='input%d' % i) for i, shape in enumerate(train.shape)
]
print("Inputs:", ctext(inputs, 'cyan'))
# ====== create the network ====== #
f_encoder = N.Sequence([
    N.Dimshuffle(pattern=(0, 1, 2, 'x')),
    N.Conv(
        num_filters=32, filter_size=(7, 7), b_init=None, activation=K.linear),
    N.BatchNorm(),
    N.Pool(pool_size=(3, 2), strides=2),
],
                       debug=True,
                       name='Encoder')
f_latent = N.Sequence([
    N.Flatten(outdim=3),
    N.CudnnRNN(
        num_units=128, num_layers=1, is_bidirectional=False, rnn_mode='lstm'),
],
                      debug=True,
                      name='Latent')
f_decoder = N.Sequence([
    N.Flatten(outdim=2),
    N.Dense(num_units=1024, b_init=None, activation=K.linear),
Пример #18
0
valid_feeder.set_recipes(recipes)
print('Feature shape:', train_feeder.shape)
feat_shape = (None, ) + train_feeder.shape[1:]

X = K.placeholder(shape=feat_shape, name='X')
y = K.placeholder(shape=(None, ), dtype='int32', name='y')

# ===========================================================================
# Create network
# ===========================================================================
f = N.Sequence(
    [
        N.Dimshuffle(pattern=(0, 1, 2, 'x')),
        N.Conv(num_filters=32,
               filter_size=3,
               pad='same',
               strides=1,
               activation=K.linear),
        N.BatchNorm(activation=K.relu),
        N.Conv(num_filters=64,
               filter_size=3,
               pad='same',
               strides=1,
               activation=K.linear),
        N.BatchNorm(activation=K.relu),
        N.Pool(pool_size=2, strides=None, pad='valid', mode='max'),
        N.Flatten(outdim=3),

        # ====== RNN ====== #
        N.AutoRNN(128,
                  rnn_mode='lstm',
Пример #19
0
    ds = F.CIFAR10.get_dataset()
    USE_MNIST_DATA = False
print(ds)

X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')
y_onehot = tf.one_hot(y, depth=10)
# ===========================================================================
# Build network
# ===========================================================================
if not arg.rnn:
    ops = N.Sequence([
        N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle(
            (0, 2, 3, 1)),
        N.BatchNorm(axes='auto'),
        N.Conv(32, (3, 3), strides=(1, 1), pad='same', activation=tf.nn.relu),
        N.Pool(pool_size=(2, 2), strides=None),
        N.Conv(64, (3, 3), strides=(1, 1), pad='same', activation=tf.nn.relu),
        N.Pool(pool_size=(2, 2), strides=None),
        N.Dropout(level=0.5),
        N.Flatten(outdim=2),
        N.Dense(256, activation=tf.nn.relu),
        N.Dense(10, activation=K.linear)
    ],
                     debug=True)
else:
    ops = N.Sequence([
        N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle(
            (0, 2, 3, 1)),
        N.Conv(32, filter_size=3, strides=1, pad='same', activation=K.linear),
        N.BatchNorm(axes='auto', activation=K.relu),
Пример #20
0
inputs = [
    K.placeholder(shape=(None, ) + shape[1:],
                  dtype='float32',
                  name='input%d' % i)
    for i, shape in enumerate(as_tuple_of_shape(train.shape))
]
X = inputs[0]
y = inputs[1]
print("Inputs:", ctext(inputs, 'cyan'))
# ====== create the networks ====== #
with N.args_scope([('Conv', 'Dense'),
                   dict(b_init=None, activation=K.linear, pad='same')],
                  ['BatchNorm', dict(activation=K.relu)]):
    f = N.Sequence([
        N.Dimshuffle(pattern=(0, 1, 2, 'x')),
        N.Conv(num_filters=32, filter_size=(9, 7)),
        N.BatchNorm(),
        N.Pool(pool_size=(3, 2), strides=2),
        N.Conv(num_filters=64, filter_size=(5, 3)),
        N.BatchNorm(),
        N.Pool(pool_size=(3, 1), strides=(2, 1), name='PoolOutput1'),
        N.Conv(num_filters=64, filter_size=(5, 3)),
        N.BatchNorm(),
        N.Pool(pool_size=(3, 2), strides=(2, 2), name='PoolOutput2'),
        N.Flatten(outdim=2),
        N.Dense(512, name="LatentDense"),
        N.BatchNorm(),
        N.Dense(512),
        N.BatchNorm(),
        N.Dense(n_classes)
    ],
Пример #21
0
ds = F.load_cifar10()
print(ds)
X_learn = ds['X_train'][:].astype('float32') / 255.
y_learn = ds['y_train']
X_test = ds['X_test'][:].astype('float32') / 255.
y_test = ds['y_test']

# ===========================================================================
# Create network
# ===========================================================================
X = K.placeholder(shape=(None, ) + X_learn.shape[1:], name='X')
y_true = K.placeholder(shape=(None, ), name='y_true', dtype='int32')

f = N.Sequence([
    N.Dimshuffle(pattern=(0, 2, 3, 1)),
    N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'),
    N.Dropout(level=0.25),
    N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu),
    N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'),
    N.Dropout(level=0.25),
    N.Flatten(outdim=2),
    N.Dense(512, activation=K.relu),
    N.Dropout(level=0.5),
    N.Dense(10, activation=K.softmax)
],
               debug=True)
K.set_training(True)
y_train = f(X)
Пример #22
0
if USE_MNIST_DATA:
    ds = fuel.load_mnist()
else:
    ds = fuel.load_cifar10()
print(ds)

X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')

# ===========================================================================
# Build network
# ===========================================================================
ops = N.Sequence([
    N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle(
        (0, 2, 3, 1)),
    N.Conv(32, filter_size=3, strides=1, pad='same', activation=K.linear),
    N.BatchNorm(axes='auto', activation=K.relu),
    N.Pool(pool_size=2, strides=None),
    N.Dimshuffle(pattern=(0, 3, 1, 2)),
    N.Flatten(outdim=3),
    N.CudnnRNN(18,
               initial_states=None,
               rnn_mode='lstm',
               num_layers=2,
               input_mode='linear',
               direction_mode='unidirectional',
               params_split=False),
    N.Flatten(outdim=2),
    N.Dense(128, activation=K.relu),
    N.Dense(10, activation=K.softmax)
],
Пример #23
0
# ====== create basic tensor ====== #
X = K.placeholder(shape=(None,) + input_shape[1:], name='X_input')
y = K.placeholder(shape=(None,), name='y_input')
# ===========================================================================
# Create the network
# ===========================================================================
LATENT_DROPOUT = 0.3
if args.cnn:
  with N.args_scope(([N.Conv, N.Dense], dict(b_init=None, activation=K.linear)),
                    (N.BatchNorm, dict(activation=tf.nn.elu)),
                    (N.Pool, dict(mode='max', pool_size=2))):
    f_encoder = N.Sequence([
        N.Dropout(level=0.5),
        N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')),

        N.Conv(num_filters=32, filter_size=3, pad='valid'),
        N.Pool(),
        N.BatchNorm(),

        N.Conv(num_filters=64, filter_size=3, pad='same'),
        N.BatchNorm(),

        N.Conv(num_filters=64, filter_size=3, pad='valid'),
        N.BatchNorm(activation=tf.nn.elu),
        N.Pool(),

        N.Flatten(outdim=2),
        N.Dense(num_units=args.dim)
    ], debug=True, name='EncoderNetwork')

    f_decoder = N.Sequence([
Пример #24
0
# ===========================================================================
# ODIN
# ===========================================================================
X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:],
                  name='X',
                  dtype='int32')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')

net_odin = N.Sequence(
    [
        N.Embedding(input_size=max_features, output_size=embedding_size),
        N.Dropout(level=0.25),
        N.Dimshuffle(pattern=(0, 1, 'x', 2)),
        N.Conv(nb_filter, (filter_length, 1),
               strides=1,
               pad='valid',
               activation=K.relu),
        N.Pool(pool_size=(pool_length, 1), pad='valid', mode='max'),
        N.Flatten(outdim=3),
        # ====== LSTM ====== #
        N.Merge(
            [
                N.Dense(lstm_output_size, activation=K.linear,
                        name='ingate'),  # input-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,
                        name='forgetgate'),  # forget-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,
                        name='cellupdate'),  # cell-update
                N.Dense(lstm_output_size, activation=K.linear,