def gender(X, f, **kwargs): nb_gender = kwargs.get('nb_gender', 4) if f is None: f = N.Sequence([ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, strides=1, b_init=None, pad='valid'), N.BatchNorm(activation=K.relu), N.Pool(pool_size=2, mode='avg'), N.Conv(num_filters=64, filter_size=3, strides=1, b_init=None, pad='valid'), N.BatchNorm(activation=K.relu), N.Pool(pool_size=2, mode='avg'), N.Flatten(outdim=3), N.Dense(num_units=512, b_init=None), N.BatchNorm(axes=(0, 1)), N.AutoRNN(num_units=128, rnn_mode='gru', num_layers=2, input_mode='linear', direction_mode='unidirectional'), N.Flatten(outdim=2), N.Dense(num_units=nb_gender, activation=K.softmax) ], debug=True) return f(X), f
def test_shape(self): x = K.variable(np.ones((25, 8, 12))) def test_func(func): y = func(x) yT = func.T(func(x)) self.assertEquals(K.eval(y).shape, tuple(y.shape.as_list())) self.assertEquals(K.eval(yT).shape, (25, 8, 12)) self.assertEquals(K.eval(yT).shape, tuple(yT.shape.as_list())) test_func(N.Flatten(outdim=2)) test_func(N.Flatten(outdim=1)) test_func(N.Reshape((25, 4, 2, 6, 2))) test_func(N.Dimshuffle((2, 0, 1)))
def test_computational_graph3(self): # validate the number of updates found by ComputationGraph X = K.placeholder(shape=(None, 28, 28, 3)) f = N.Sequence([ N.Conv(32, 3, pad='same', activation=K.linear), N.BatchNorm(activation=K.relu), N.Flatten(outdim=2), N.Dense(16), N.BatchNorm(), N.Dense(10) ]) K.set_training(True) y_train = f(X) K.set_training(False) y_score = f(X) self.assertTrue( K.get_shape(y_train) == K.get_shape(y_score) and K.get_shape(y_score) == (None, 10)) cc_train = K.ComputationGraph(y_train) cc_score = K.ComputationGraph(y_score) self.assertTrue(len(cc_score.updates) == 0) self.assertTrue(len(cc_train.updates) == 4) # create real function fn_train = K.function(X, y_train) fn_score = K.function(X, y_score) shape1 = fn_train(np.random.rand(12, 28, 28, 3)).shape shape2 = fn_score(np.random.rand(12, 28, 28, 3)).shape self.assertTrue(shape1 == shape2 and shape1 == (12, 10))
def test_seq(self): X = K.placeholder((None, 28, 28, 1)) f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=tf.nn.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=tf.nn.softmax) ]) y = f(X) yT = f.T(y) f1 = K.function(X, y, defaults={K.is_training(): True}) f2 = K.function(X, yT, defaults={K.is_training(): False}) f = cPickle.loads(cPickle.dumps(f)) y = f(X) yT = f.T(y) f3 = K.function(X, y, defaults={K.is_training(): True}) f4 = K.function(X, yT, defaults={K.is_training(): False}) x = np.random.rand(12, 28, 28, 1) self.assertEquals(f1(x).shape, (2688, 10)) self.assertEquals(f3(x).shape, (2688, 10)) self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4)) self.assertEquals(y.shape.as_list(), (None, 10)) self.assertEquals(f2(x).shape, (12, 28, 28, 1)) self.assertEquals(f4(x).shape, (12, 28, 28, 1)) self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4]) self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
def cnn(X, y): nb_classes = y.shape.as_list()[-1] with N.args_scope(['Conv', dict(b_init=None, activation=K.linear)], ['BatchNorm', dict(activation=K.relu)]): f = N.Sequence([ N.Dimshuffle(pattern=(0, 2, 3, 1)), N.Conv(32, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(32, (3, 3), pad='same', stride=(1, 1), b_init=0, activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Conv(64, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(64, (3, 3), pad='same', stride=(1, 1), b_init=0., activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(nb_classes, activation=K.linear) ], debug=1) logit = f(X) prob = tf.nn.softmax(logit) return {'logit': logit, 'prob': prob}
def test(X, y): nb_classes = y.shape.as_list()[-1] f = N.Sequence([ N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(nb_classes, activation=K.linear) ], debug=2) logit = f(X) prob = tf.nn.softmax(logit) return {'logit': logit, 'prob': prob}
def test_slice_ops(self): X = K.placeholder(shape=(None, 28, 28, 28, 3)) f = N.Sequence([ N.Conv(32, 3, pad='same', activation=K.linear), N.BatchNorm(activation=tf.nn.relu), N.Flatten(outdim=4)[:, 8:12, 18:25, 13:], ]) y = f(X) fn = K.function(X, y) self.assertTrue( fn(np.random.rand(12, 28, 28, 28, 3)).shape[1:] == tuple( y.shape.as_list()[1:])) self.assertEqual(y.shape.as_list()[1:], [4, 7, 883])
def ladder1(X, y, states, **kwargs): noise = kwargs.get('noise', 0.3) # hyperparameters that denote the importance of each layer denoising_cost = [1000.0, 10.0, 0.10, 0.10, 0.10] if states is None: # f_encoder = N.Sequence([ N.Flatten(outdim=2), N.Dense(num_units=1024, b_init=None), N.BatchNorm( axes=0, noise_level=noise, noise_dims=None, activation=K.relu), N.Dense(num_units=512, b_init=None), N.BatchNorm( axes=0, noise_level=noise, noise_dims=None, activation=K.relu), N.Dense(num_units=256, b_init=None), N.BatchNorm( axes=0, noise_level=noise, noise_dims=None, activation=K.relu), N.Dense(num_units=128, b_init=None), N.BatchNorm( axes=0, noise_level=noise, noise_dims=None, activation=K.relu), N.Dense(num_units=10, activation=K.softmax), ], all_layers=True, debug=True, name='Encoder') # f_decoder = N.Sequence([ N.Dense(num_units=128, b_init=None), N.BatchNorm(axes=0, activation=K.relu), N.Dense(num_units=256, b_init=None), N.BatchNorm(axes=0, activation=K.relu), N.Dense(num_units=512, b_init=None), N.BatchNorm(axes=0, activation=K.relu), N.Dense(num_units=1024, b_init=None), N.BatchNorm(axes=0, activation=K.relu), N.Reshape(shape=(-1, 28, 28)), ], all_layers=True, debug=True, name='Decoder') else: f_encoder, f_decoder = states y_encoder_clean = f_encoder(X, noise=-1)[2::2] y_encoder_corrp = f_encoder(X, noise=1)[2::2] print(len(y_encoder_clean), len(y_encoder_corrp)) exit() return (None, None), [f_encoder, f_decoder]
def test_load_save3(self): X = K.placeholder(shape=(None, 28, 28)) ops = N.Sequence([ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(8, (3, 3), strides=(1, 1), pad='same', activation=K.relu), K.pool2d, N.Flatten(outdim=2), N.Dense(64, activation=K.relu), N.Dense(10, activation=K.softmax) ]) y = ops(X) f1 = K.function(X, y) ops_ = cPickle.loads( cPickle.dumps(ops, protocol=cPickle.HIGHEST_PROTOCOL)) y_ = ops_(X) f2 = K.function(X, y_) x = np.random.rand(32, 28, 28) self.assertEqual(np.sum(f1(x) - f2(x)), 0.)
f_encoder = N.Sequence([ N.Dropout(level=0.5), N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, pad='valid'), N.Pool(), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='valid'), N.BatchNorm(activation=tf.nn.elu), N.Pool(), N.Flatten(outdim=2), N.Dense(num_units=args.dim) ], debug=True, name='EncoderNetwork') f_decoder = N.Sequence([ N.Dropout(level=LATENT_DROPOUT, noise_type='uniform'), N.Noise(level=1.0, noise_type='gaussian'), N.Dimshuffle((0, 'x', 'x', 1)), N.TransposeConv(num_filters=64, filter_size=3, pad='valid'), N.Upsample(size=2, axes=(1, 2)), N.BatchNorm(), N.TransposeConv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(),
with N.args_scope( ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)], ['Dense', dict(activation=K.linear, b_init=None)]): f = N.Sequence([ N.Dropout(level=0.3), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv( n_new_features=512, n_time_context=7, name="LatentTDNN"), N.Dense(512), N.BatchNorm(activation=K.relu), N.Dense(1500), N.BatchNorm(activation=K.relu), N.StatsPool(axes=1, output_mode='concat'), N.Flatten(outdim=2, name="StatsPooling"), N.Dense(512, name="LatentDense"), N.BatchNorm(activation=K.relu), N.Dense(512), N.BatchNorm(activation=K.relu), N.Dense(num_units=n_classes, activation=K.linear, b_init=init_ops.constant_initializer(0)) ], debug=1) # ====== create outputs ====== # y_logit = f(X) y_proba = tf.nn.softmax(y_logit) z1 = K.ComputationGraph(y_proba).get(roles=N.Dense, scope='LatentDense', beginning_scope=False)[0]
# =========================================================================== # Build model # =========================================================================== f = N.Sequence( [ N.Embedding(max_features, embedding_size), N.Dropout(0.25), N.Dimshuffle(pattern=(0, 1, 'x', 2)), # convolution on time dimension N.Conv(nb_filter, filter_size=(filter_length, 1), pad='valid', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(pool_length, 1), mode='max'), N.Flatten(outdim=3), N.Merge( [ N.Dense(lstm_output_size, activation=K.linear, name='ingate'), # input-gate N.Dense(lstm_output_size, activation=K.linear, name='forgetgate'), # forget-gate N.Dense(lstm_output_size, activation=K.linear, name='cellupdate'), # cell-update N.Dense(lstm_output_size, activation=K.linear, name='outgate') # output-gate ], merge_function=K.concatenate), N.LSTM(num_units=lstm_output_size, input_mode='skip')[:, -1],
def test_mnist(self): ds = fuel.load_mnist() m = model.SequentialClassifier(N.Flatten(outdim=2), N.Dense(64, activation=K.relu), N.Dense(10, activation=K.softmax)) m.set_inputs( K.placeholder(shape=(None, 28, 28), name='X', dtype='float32')).set_outputs( K.placeholder(shape=(None, ), name='y', dtype='int32')) # ====== query info ====== # m.path self.assertEqual(m.is_initialized, True) self.assertEqual(m.input_shape, (None, 28, 28)) self.assertEqual(m.output_shape, (None, 10)) # ====== training test ====== # m.set_training_info(learning_rate=0.001, n_epoch=3) m.fit(X=(ds['X_train'], ds['y_train']), X_valid=(ds['X_valid'], ds['y_valid'])) score = m.score(ds['X_test'][:], ds['y_test'][:]) self.assertEqual( score > 0.8, True, msg='Test if the model get reasonable results: %f accuracy' % score) # ====== make prediction and transform test ====== # np.random.seed(12) _ = np.random.rand(8, 28, 28) self.assertEqual(m.transform(_).shape, (8, 10)) self.assertEqual( np.isclose(m.predict_proba(_).sum(-1), 1.).sum() == 8, True) self.assertEqual(len(m.predict(_)), 8) # ====== pickling test ====== # str_old = str(m) p_old = m.get_params(True) m = cPickle.loads(cPickle.dumps(m, protocol=cPickle.HIGHEST_PROTOCOL)) str_new = str(m) p_new = m.get_params(True) # ====== test same configurations ====== # self.assertEqual(str_new, str_old) # ====== test same params ====== # for i, j in p_new.iteritems(): k = p_old[i] for a, b in zip(j, k): self.assertEqual(np.array_equal(a, b), True) # ====== test set params ====== # params = m.get_params(deep=True) params_new = {} for n, p in params.iteritems(): params_new[n] = [ np.random.rand(*i.shape).astype('float32') for i in p ] m.set_params(**params_new) # test if equal new onces for i, j in m.get_params(deep=True).iteritems(): k = params_new[i] for a, b in zip(j, k): self.assertEqual(np.array_equal(a, b), True) # ====== training test ====== # print('Re-train the model second time:') m.fit(X=(ds['X_train'], ds['y_train']), X_valid=(ds['X_valid'], ds['y_valid'])) score = m.score(ds['X_test'][:], ds['y_test'][:]) self.assertEqual( score > 0.8, True, msg='Test if the model get reasonable results: %f accuracy' % score)
CNN = [ N.Dimshuffle(pattern=(0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, pad='same', strides=1, activation=K.linear), N.BatchNorm(activation=K.relu), N.Conv(num_filters=64, filter_size=3, pad='same', strides=1, activation=K.linear), N.BatchNorm(activation=K.relu), N.Pool(pool_size=2, strides=None, pad='valid', mode='max'), N.Flatten(outdim=3) ] if args['cnn'] else [] f = N.Sequence( CNN + [ # ====== RNN ====== # N.AutoRNN(128, rnn_mode='lstm', num_layers=3, direction_mode='bidirectional', prefer_cudnn=True), # ====== Dense ====== # N.Flatten(outdim=2), # N.Dropout(level=0.2), # adding dropout does not help N.Dense(num_units=1024, activation=K.relu),
def convolutional_vae(X, saved_states, **kwargs): """ convolutional_vae Return ------ [y_encoder, y_decoder] States ------ [f_inference (encoder), f_generative (decoder)] """ n = kwargs.get('n', 10) batch_size = K.get_shape(X)[0] if batch_size is None: raise ValueError( "You must specify batch_size dimension for the input placeholder.") # ====== init ====== # if saved_states is None: # Encoder f_inference = N.Sequence([ N.Reshape(shape=(-1, 28, 28, 1)), N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Conv(num_filters=64, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Dropout(level=0.1), N.Flatten(outdim=2), N.Dense(num_units=n * 2, b_init=None), N.BatchNorm(axes=0) ], debug=True, name='Encoder') # Decoder f_generative = N.Sequence([ N.Dimshuffle(pattern=(0, 'x', 'x', 1)), N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid', b_init=None), N.BatchNorm(activation=K.linear), N.Flatten(outdim=3) ], debug=True, name="Decoder") else: f_inference, f_generative = saved_states # ====== Perfrom ====== # # Encoder y_encoder = f_inference(K.cast(X, 'float32')) mu = y_encoder[:, :n] sigma = K.softplus(y_encoder[:, n:]) qz = Normal(mu=mu, sigma=sigma, name='Normal_qz') # Decoder z = Normal(mu=K.zeros(shape=(batch_size, n)), sigma=K.ones(shape=(batch_size, n)), name="Normal_pz") logits = f_generative(z) X_reconstruct = Bernoulli(logits=logits) # inference params = f_inference.parameters + f_generative.parameters inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X}) # ====== get cost for training ====== # # Bind p(x, z) and q(z | x) to the same placeholder for x. if K.is_training(): import tensorflow as tf inference.initialize() if True: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) updates = optimizer.apply_gradients( optimizer.compute_gradients(inference.loss, var_list=params)) init = tf.global_variables_initializer() init.run() f_train = K.function(X, inference.loss, updates) else: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) inference.initialize(optimizer=optimizer, var_list=params) init = tf.global_variables_initializer() init.run() f_train = lambda x: inference.update(feed_dict={X: x})['loss'] samples = K.sigmoid(logits) return (samples, z, qz), (f_inference, f_generative)