def cnn(X, y): nb_classes = y.shape.as_list()[-1] with N.args_scope(['Conv', dict(b_init=None, activation=K.linear)], ['BatchNorm', dict(activation=K.relu)]): f = N.Sequence([ N.Dimshuffle(pattern=(0, 2, 3, 1)), N.Conv(32, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(32, (3, 3), pad='same', stride=(1, 1), b_init=0, activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Conv(64, (3, 3), pad='same', stride=(1, 1)), N.BatchNorm(), N.Conv(64, (3, 3), pad='same', stride=(1, 1), b_init=0., activation=K.relu), N.Pool(pool_size=(2, 2), strides=None, mode='max'), N.Dropout(level=0.25), # N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(nb_classes, activation=K.linear) ], debug=1) logit = f(X) prob = tf.nn.softmax(logit) return {'logit': logit, 'prob': prob}
def test_dropout(self): x = K.placeholder((4, 6)) f1 = N.Dropout(level=0.5, noise_dims=0, rescale=True) y = f1(x) f = K.function(x, y, defaults={K.is_training(): True}) z = f(np.ones((4, 6))) z = z.tolist() self.assertTrue(all(i == z[0] for i in z)) f1 = N.Dropout(level=0.5, noise_dims=1, rescale=True) y = f1(x) f = K.function(x, y, defaults={K.is_training(): True}) z = f(np.ones((4, 6))) z = z.T.tolist() self.assertTrue(all(i == z[0] for i in z))
def test_seq(self): X = K.placeholder((None, 28, 28, 1)) f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=tf.nn.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=tf.nn.softmax) ]) y = f(X) yT = f.T(y) f1 = K.function(X, y, defaults={K.is_training(): True}) f2 = K.function(X, yT, defaults={K.is_training(): False}) f = cPickle.loads(cPickle.dumps(f)) y = f(X) yT = f.T(y) f3 = K.function(X, y, defaults={K.is_training(): True}) f4 = K.function(X, yT, defaults={K.is_training(): False}) x = np.random.rand(12, 28, 28, 1) self.assertEquals(f1(x).shape, (2688, 10)) self.assertEquals(f3(x).shape, (2688, 10)) self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4)) self.assertEquals(y.shape.as_list(), (None, 10)) self.assertEquals(f2(x).shape, (12, 28, 28, 1)) self.assertEquals(f4(x).shape, (12, 28, 28, 1)) self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4]) self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
def feedforward_vae(X, X1, f): if f is None: f = N.Sequence([ N.Dense(num_units=10, activation=K.softmax), N.Dropout(level=0.5) ]) return f(X), f
def test(X, y): nb_classes = y.shape.as_list()[-1] f = N.Sequence([ N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(nb_classes, activation=K.linear) ], debug=2) logit = f(X) prob = tf.nn.softmax(logit) return {'logit': logit, 'prob': prob}
def create(): f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.FlattenLeft(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=K.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=K.softmax) ], debug=True) y = f(X) yT = f.T(y) f1 = K.function(X, y) f2 = K.function(X, yT) cPickle.dump(f, open(U.get_modelpath('dummy.ai', override=True), 'w')) _ = f1(x) print(_.shape, _.sum()) _ = f2(x) print(_.shape, _.sum())
def test_transform_then_prediction(self): with TemporaryDirectory() as temp: from sklearn.pipeline import Pipeline path = os.path.join(temp, 'audio.sph') urlretrieve(filename=path, url='https://s3.amazonaws.com/ai-datasets/sw02001.sph') f = Pipeline([ ('mspec', model.SpeechTransform('mspec', fs=8000, vad=False)), ('slice', model.Transform(lambda x: x[:, :40])), ('pred', model.SequentialModel(N.Dropout(0.3), N.Dense(20, activation=K.relu), N.Dense(10, activation=K.softmax))) ]) x1 = f.predict(path) x2 = f.predict_proba(path) f = cPickle.loads(cPickle.dumps(f)) y1 = f.predict(path) y2 = f.predict_proba(path) self.assertEqual(np.array_equal(x1, y1), True) self.assertEqual(np.array_equal(x2, y2), True)
print("Train shape:", ctext(X_train.shape, 'cyan')) print("Valid shape:", ctext(X_valid.shape, 'cyan')) print("Test shape:", ctext(X_test.shape, 'cyan')) # ====== create basic tensor ====== # X = K.placeholder(shape=(None,) + input_shape[1:], name='X_input') y = K.placeholder(shape=(None,), name='y_input') # =========================================================================== # Create the network # =========================================================================== LATENT_DROPOUT = 0.3 if args.cnn: with N.args_scope(([N.Conv, N.Dense], dict(b_init=None, activation=K.linear)), (N.BatchNorm, dict(activation=tf.nn.elu)), (N.Pool, dict(mode='max', pool_size=2))): f_encoder = N.Sequence([ N.Dropout(level=0.5), N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')), N.Conv(num_filters=32, filter_size=3, pad='valid'), N.Pool(), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='valid'), N.BatchNorm(activation=tf.nn.elu), N.Pool(), N.Flatten(outdim=2), N.Dense(num_units=args.dim)
y_learn = ds['y_train'] X_test = ds['X_test'][:].astype('float32') / 255. y_test = ds['y_test'] # =========================================================================== # Create network # =========================================================================== X = K.placeholder(shape=(None, ) + X_learn.shape[1:], name='X') y_true = K.placeholder(shape=(None, ), name='y_true', dtype='int32') f = N.Sequence([ N.Dimshuffle(pattern=(0, 2, 3, 1)), N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Conv(32, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'), N.Dropout(level=0.25), N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Conv(64, (3, 3), pad='same', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(2, 2), ignore_border=True, strides=None, mode='max'), N.Dropout(level=0.25), N.Flatten(outdim=2), N.Dense(512, activation=K.relu), N.Dropout(level=0.5), N.Dense(10, activation=K.softmax) ], debug=True) K.set_training(True) y_train = f(X) K.set_training(False) y_pred = f(X)
] X = inputs[0] y = inputs[1] print("Inputs:", ctext(inputs, 'cyan')) # ====== the network ====== # if os.path.exists(MODEL_PATH): x_vec = N.deserialize(path=MODEL_PATH, force_restore_vars=True) else: TRAIN_MODEL = True with N.args_scope( ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)], ['Dense', dict(activation=K.linear, b_init=None)], ['BatchNorm', dict(activation=K.relu)]): x_vec = N.Sequence([ N.Dropout(level=0.3), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=5), N.TimeDelayedConv(n_new_features=512, n_time_context=7), N.Dense(512), N.BatchNorm(), N.Dense(1500), N.BatchNorm(), N.StatsPool(axes=1, output_mode='concat'), N.Flatten(outdim=2), N.Dense(512, name="LatentOutput"), N.BatchNorm(), N.Dense(512), N.BatchNorm(), N.Dense(n_speakers, activation=K.linear,
def dense_creator(): net = N.Sequence([ N.Dense(int(args.hdim), b_init=0 if args.no_batchnorm else None, activation=K.relu if args.no_batchnorm else K.linear), None if args.no_batchnorm else N.BatchNorm(activation=K.relu) ], debug=True, name="DenseBatchNorm%d" % index[0]) index[0] += 1 return net f_encoder = N.Sequence([ N.Flatten(outdim=2), N.Dropout(level=args.xdrop) if args.xdrop > 0 else None, dense_creator(), dense_creator(), N.Dropout(level=args.edrop) if args.edrop > 0 else None, ], debug=True, name='Encoder') f_decoder = N.Sequence([ N.Dropout(level=args.zdrop) if args.zdrop > 0 else None, dense_creator(), dense_creator(), N.Dropout(level=args.ddrop) if args.ddrop > 0 else None, ], debug=True, name='Decoder')
# =========================================================================== ds = F.load_imdb(nb_words=max_features, maxlen=maxlen) X_train = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X_train') X_score = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X_score') y = K.placeholder(shape=(None, ), name='y', dtype='int32') # =========================================================================== # Build model # =========================================================================== f = N.Sequence( [ N.Embedding(max_features, embedding_size), N.Dropout(0.25), N.Dimshuffle(pattern=(0, 1, 'x', 2)), # convolution on time dimension N.Conv(nb_filter, filter_size=(filter_length, 1), pad='valid', stride=(1, 1), activation=K.relu), N.Pool(pool_size=(pool_length, 1), mode='max'), N.Flatten(outdim=3), N.Merge( [ N.Dense(lstm_output_size, activation=K.linear, name='ingate'), # input-gate N.Dense(lstm_output_size, activation=K.linear, name='forgetgate'), # forget-gate
def convolutional_vae(X, saved_states, **kwargs): """ convolutional_vae Return ------ [y_encoder, y_decoder] States ------ [f_inference (encoder), f_generative (decoder)] """ n = kwargs.get('n', 10) batch_size = K.get_shape(X)[0] if batch_size is None: raise ValueError( "You must specify batch_size dimension for the input placeholder.") # ====== init ====== # if saved_states is None: # Encoder f_inference = N.Sequence([ N.Reshape(shape=(-1, 28, 28, 1)), N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Conv(num_filters=64, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.Dropout(level=0.1), N.Flatten(outdim=2), N.Dense(num_units=n * 2, b_init=None), N.BatchNorm(axes=0) ], debug=True, name='Encoder') # Decoder f_generative = N.Sequence([ N.Dimshuffle(pattern=(0, 'x', 'x', 1)), N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same', b_init=init_ops.constant_initializer(0.), activation=K.elu), N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid', b_init=None), N.BatchNorm(activation=K.linear), N.Flatten(outdim=3) ], debug=True, name="Decoder") else: f_inference, f_generative = saved_states # ====== Perfrom ====== # # Encoder y_encoder = f_inference(K.cast(X, 'float32')) mu = y_encoder[:, :n] sigma = K.softplus(y_encoder[:, n:]) qz = Normal(mu=mu, sigma=sigma, name='Normal_qz') # Decoder z = Normal(mu=K.zeros(shape=(batch_size, n)), sigma=K.ones(shape=(batch_size, n)), name="Normal_pz") logits = f_generative(z) X_reconstruct = Bernoulli(logits=logits) # inference params = f_inference.parameters + f_generative.parameters inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X}) # ====== get cost for training ====== # # Bind p(x, z) and q(z | x) to the same placeholder for x. if K.is_training(): import tensorflow as tf inference.initialize() if True: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) updates = optimizer.apply_gradients( optimizer.compute_gradients(inference.loss, var_list=params)) init = tf.global_variables_initializer() init.run() f_train = K.function(X, inference.loss, updates) else: optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0) inference.initialize(optimizer=optimizer, var_list=params) init = tf.global_variables_initializer() init.run() f_train = lambda x: inference.update(feed_dict={X: x})['loss'] samples = K.sigmoid(logits) return (samples, z, qz), (f_inference, f_generative)
X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:], name='X') y = K.placeholder(shape=(None, ), name='y', dtype='int32') y_onehot = tf.one_hot(y, depth=10) # =========================================================================== # Build network # =========================================================================== if not arg.rnn: ops = N.Sequence([ N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle( (0, 2, 3, 1)), N.BatchNorm(axes='auto'), N.Conv(32, (3, 3), strides=(1, 1), pad='same', activation=tf.nn.relu), N.Pool(pool_size=(2, 2), strides=None), N.Conv(64, (3, 3), strides=(1, 1), pad='same', activation=tf.nn.relu), N.Pool(pool_size=(2, 2), strides=None), N.Dropout(level=0.5), N.Flatten(outdim=2), N.Dense(256, activation=tf.nn.relu), N.Dense(10, activation=K.linear) ], debug=True) else: ops = N.Sequence([ N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle( (0, 2, 3, 1)), N.Conv(32, filter_size=3, strides=1, pad='same', activation=K.linear), N.BatchNorm(axes='auto', activation=K.relu), N.Pool(pool_size=2, strides=None), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=3), N.CudnnRNN(18,