def gender(X, f, **kwargs): nb_gender = kwargs.get('nb_gender', 4) if f is None: f = N.Sequence([ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, strides=1, b_init=None, pad='valid'), N.BatchNorm(activation=K.relu), N.Pool(pool_size=2, mode='avg'), N.Conv(num_filters=64, filter_size=3, strides=1, b_init=None, pad='valid'), N.BatchNorm(activation=K.relu), N.Pool(pool_size=2, mode='avg'), N.Flatten(outdim=3), N.Dense(num_units=512, b_init=None), N.BatchNorm(axes=(0, 1)), N.AutoRNN(num_units=128, rnn_mode='gru', num_layers=2, input_mode='linear', direction_mode='unidirectional'), N.Flatten(outdim=2), N.Dense(num_units=nb_gender, activation=K.softmax) ], debug=True) return f(X), f
def test_seq(self): X = K.placeholder((None, 28, 28, 1)) f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=tf.nn.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=tf.nn.softmax) ]) y = f(X) yT = f.T(y) f1 = K.function(X, y, defaults={K.is_training(): True}) f2 = K.function(X, yT, defaults={K.is_training(): False}) f = cPickle.loads(cPickle.dumps(f)) y = f(X) yT = f.T(y) f3 = K.function(X, y, defaults={K.is_training(): True}) f4 = K.function(X, yT, defaults={K.is_training(): False}) x = np.random.rand(12, 28, 28, 1) self.assertEquals(f1(x).shape, (2688, 10)) self.assertEquals(f3(x).shape, (2688, 10)) self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4)) self.assertEquals(y.shape.as_list(), (None, 10)) self.assertEquals(f2(x).shape, (12, 28, 28, 1)) self.assertEquals(f4(x).shape, (12, 28, 28, 1)) self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4]) self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
def cnn(X, y): nb_classes = y.shape.as_list()[-1] with N.args_scope(['Conv', dict(b_init=None, activation=K.linear)], ['BatchNorm', dict(activation=K.relu)]): f = N.Sequence([ N.Dimshuffle(pattern=(0, 2, 3, 1)), N.Conv(32, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(32, (3, 3), pad='same', stride=(1, 1), b_init=0, activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Conv(64, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(64, (3, 3), pad='same', stride=(1, 1), b_init=0., activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(nb_classes, activation=K.linear) ], debug=1) logit = f(X) prob = tf.nn.softmax(logit) return {'logit': logit, 'prob': prob}
def odin_net2(): "CNN" f = N.Sequence([ N.Dimshuffle((0, 1, 2, 'x')), N.Conv(12, (3, 3), strides=(1, 1), pad='same', untie_biases=False, W_init=random(3, 3, 1, 12), activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max', ignore_border=True), N.Conv(16, (3, 3), strides=(1, 1), pad='same', untie_biases=False, W_init=random(3, 3, 12, 16), activation=K.sigmoid), N.Dimshuffle((0, 3, 1, 2)) ]) return X1, f(X1)
def test_shape(self): x = K.variable(np.ones((25, 8, 12))) def test_func(func): y = func(x) yT = func.T(func(x)) self.assertEquals(K.eval(y).shape, tuple(y.shape.as_list())) self.assertEquals(K.eval(yT).shape, (25, 8, 12)) self.assertEquals(K.eval(yT).shape, tuple(yT.shape.as_list())) test_func(N.Flatten(outdim=2)) test_func(N.Flatten(outdim=1)) test_func(N.Reshape((25, 4, 2, 6, 2))) test_func(N.Dimshuffle((2, 0, 1)))
def test_load_save3(self): X = K.placeholder(shape=(None, 28, 28)) ops = N.Sequence([ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(8, (3, 3), strides=(1, 1), pad='same', activation=K.relu), K.pool2d, N.Flatten(outdim=2), N.Dense(64, activation=K.relu), N.Dense(10, activation=K.softmax) ]) y = ops(X) f1 = K.function(X, y) ops_ = cPickle.loads( cPickle.dumps(ops, protocol=cPickle.HIGHEST_PROTOCOL)) y_ = ops_(X) f2 = K.function(X, y_) x = np.random.rand(32, 28, 28) self.assertEqual(np.sum(f1(x) - f2(x)), 0.)
def create(): f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.FlattenLeft(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=K.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=K.softmax) ], debug=True) y = f(X) yT = f.T(y) f1 = K.function(X, y) f2 = K.function(X, yT) cPickle.dump(f, open(U.get_modelpath('dummy.ai', override=True), 'w')) _ = f1(x) print(_.shape, _.sum()) _ = f2(x) print(_.shape, _.sum())
print("Valid shape:", ctext(X_valid.shape, 'cyan')) print("Test shape:", ctext(X_test.shape, 'cyan')) # ====== create basic tensor ====== # X = K.placeholder(shape=(None,) + input_shape[1:], name='X_input') y = K.placeholder(shape=(None,), name='y_input') # =========================================================================== # Create the network # =========================================================================== LATENT_DROPOUT = 0.3 if args.cnn: with N.args_scope(([N.Conv, N.Dense], dict(b_init=None, activation=K.linear)), (N.BatchNorm, dict(activation=tf.nn.elu)), (N.Pool, dict(mode='max', pool_size=2))): f_encoder = N.Sequence([ N.Dropout(level=0.5), N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, pad='valid'), N.Pool(), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='valid'), N.BatchNorm(activation=tf.nn.elu), N.Pool(), N.Flatten(outdim=2), N.Dense(num_units=args.dim) ], debug=True, name='EncoderNetwork')
# =========================================================================== inputs = [ K.placeholder(shape=(None, ) + shape[1:], dtype='float32', name='input%d' % i) for i, shape in enumerate(as_tuple_of_shape(train.shape)) ] X = inputs[0] y = inputs[1] print("Inputs:", ctext(inputs, 'cyan')) # ====== create the networks ====== # with N.args_scope([('Conv', 'Dense'), dict(b_init=None, activation=K.linear, pad='same')], ['BatchNorm', dict(activation=K.relu)]): f = N.Sequence([ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=(9, 7)), N.BatchNorm(), N.Pool(pool_size=(3, 2), strides=2), N.Conv(num_filters=64, filter_size=(5, 3)), N.BatchNorm(), N.Pool(pool_size=(3, 1), strides=(2, 1), name='PoolOutput1'), N.Conv(num_filters=64, filter_size=(5, 3)), N.BatchNorm(), N.Pool(pool_size=(3, 2), strides=(2, 2), name='PoolOutput2'), N.Flatten(outdim=2), N.Dense(512, name="LatentDense"), N.BatchNorm(), N.Dense(512), N.BatchNorm(), N.Dense(n_classes)
# =========================================================================== ds = F.load_cifar10() print(ds) X_learn = ds['X_train'][:].astype('float32') / 255. y_learn = ds['y_train'] X_test = ds['X_test'][:].astype('float32') / 255. y_test = ds['y_test'] # =========================================================================== # Create network # =========================================================================== X = K.placeholder(shape=(None, ) + X_learn.shape[1:], name='X') y_true = K.placeholder(shape=(None, ), name='y_true', dtype='int32') f = N.Sequence([ N.Dimshuffle(pattern=(0, 2, 3, 1)), N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'), N.Dropout(level=0.25), N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'), N.Dropout(level=0.25), N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(10, activation=K.softmax) ], debug=True) K.set_training(True)
print('X_train:', X_train.shape, 'y_train:', y_train.shape) print('X_valid:', X_valid.shape, 'y_valid:', y_valid.shape) E = tk.embed(embedding) # these numbers must be the same for all time print('Tokenizer:', np.sum(E), np.sum(X_train), np.sum(y_train), np.sum(X_valid), np.sum(y_valid)) # =========================================================================== # Building model # =========================================================================== X = K.placeholder(shape=(None, MAX_SEQ_LEN), dtype='int32', name='X') y = K.placeholder(shape=(None, nb_labels), dtype='float32', name='y') f = N.Sequence([ N.Embedding(tk.nb_words, embedding_dims, W_init=E), N.Dimshuffle(pattern=(0, 1, 'x', 2)), N.Conv(num_filters=128, filter_size=(5, 1), strides=1, pad='valid', activation=K.relu), N.Pool(pool_size=(5, 1), pad='valid', mode='max'), N.Conv(num_filters=128, filter_size=(5, 1), strides=1, pad='valid', activation=K.relu), N.Pool(pool_size=(5, 1), pad='valid', mode='max'), N.Conv(num_filters=128, filter_size=(5, 1), strides=1,
# =========================================================================== USE_MNIST_DATA = True if 'mnist' in arg['ds'].lower() else False if USE_MNIST_DATA: ds = fuel.load_mnist() else: ds = fuel.load_cifar10() X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X') y = K.placeholder(shape=(None, ), name='y', dtype='int32') # =========================================================================== # Build network # =========================================================================== ops = N.Sequence([ N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else None, N.BatchNorm(axes='auto'), N.Conv(32, (3, 3), strides=(1, 1), pad='same', activation=K.relu), N.Pool(pool_size=(2, 2), strides=None), N.Conv(64, (3, 3), strides=(1, 1), pad='same', activation=K.relu), N.Pool(pool_size=(2, 2), strides=None), N.Flatten(outdim=2), N.Dense(256, activation=K.relu), N.Dense(10, activation=K.softmax) ], debug=True) ops = cPickle.loads(cPickle.dumps(ops)) # test if the ops is pickle-able K.set_training(True) y_pred_train = ops(X) K.set_training(False)
ds = F.load_imdb(nb_words=max_features, maxlen=maxlen) X_train = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X_train') X_score = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X_score') y = K.placeholder(shape=(None, ), name='y', dtype='int32') # =========================================================================== # Build model # =========================================================================== f = N.Sequence( [ N.Embedding(max_features, embedding_size), N.Dropout(0.25), N.Dimshuffle(pattern=(0, 1, 'x', 2)), # convolution on time dimension N.Conv(nb_filter, filter_size=(filter_length, 1), pad='valid', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(pool_length, 1), mode='max'), N.Flatten(outdim=3), N.Merge( [ N.Dense(lstm_output_size, activation=K.linear, name='ingate'), # input-gate N.Dense(lstm_output_size, activation=K.linear, name='forgetgate'), # forget-gate N.Dense(lstm_output_size,
USE_MNIST_DATA = True if 'mnist' in arg['ds'].lower() else False if USE_MNIST_DATA: ds = fuel.load_mnist() else: ds = fuel.load_cifar10() print(ds) X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X') y = K.placeholder(shape=(None, ), name='y', dtype='int32') # =========================================================================== # Build network # =========================================================================== ops = N.Sequence([ N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle( (0, 2, 3, 1)), N.Conv(32, filter_size=3, strides=1, pad='same', activation=K.linear), N.BatchNorm(axes='auto', activation=K.relu), N.Pool(pool_size=2, strides=None), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=3), N.CudnnRNN(18, initial_states=None, rnn_mode='lstm', num_layers=2, input_mode='linear', direction_mode='unidirectional', params_split=False), N.Flatten(outdim=2), N.Dense(128, activation=K.relu),
def convolutional_vae(X, saved_states, **kwargs): """ convolutional_vae Return ------ [y_encoder, y_decoder] States ------ [f_inference (encoder), f_generative (decoder)] """ n = kwargs.get('n', 10) batch_size = K.get_shape(X)[0] if batch_size is None: raise ValueError( "You must specify batch_size dimension for the input placeholder.") # ====== init ====== # if saved_states is None: # Encoder f_inference = N.Sequence([ N.Reshape(shape=(-1, 28, 28, 1)), N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Conv(num_filters=64, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Dropout(level=0.1), N.Flatten(outdim=2), N.Dense(num_units=n * 2, b_init=None), N.BatchNorm(axes=0) ], debug=True, name='Encoder') # Decoder f_generative = N.Sequence([ N.Dimshuffle(pattern=(0, 'x', 'x', 1)), N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid', b_init=None), N.BatchNorm(activation=K.linear), N.Flatten(outdim=3) ], debug=True, name="Decoder") else: f_inference, f_generative = saved_states # ====== Perfrom ====== # # Encoder y_encoder = f_inference(K.cast(X, 'float32')) mu = y_encoder[:, :n] sigma = K.softplus(y_encoder[:, n:]) qz = Normal(mu=mu, sigma=sigma, name='Normal_qz') # Decoder z = Normal(mu=K.zeros(shape=(batch_size, n)), sigma=K.ones(shape=(batch_size, n)), name="Normal_pz") logits = f_generative(z) X_reconstruct = Bernoulli(logits=logits) # inference params = f_inference.parameters + f_generative.parameters inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X}) # ====== get cost for training ====== # # Bind p(x, z) and q(z | x) to the same placeholder for x. if K.is_training(): import tensorflow as tf inference.initialize() if True: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) updates = optimizer.apply_gradients( optimizer.compute_gradients(inference.loss, var_list=params)) init = tf.global_variables_initializer() init.run() f_train = K.function(X, inference.loss, updates) else: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) inference.initialize(optimizer=optimizer, var_list=params) init = tf.global_variables_initializer() init.run() f_train = lambda x: inference.update(feed_dict={X: x})['loss'] samples = K.sigmoid(logits) return (samples, z, qz), (f_inference, f_generative)