def test_noise(self): x = K.placeholder((2, 3)) f1 = N.Noise(level=0.5, noise_dims=0, noise_type='gaussian') y = f1(x) f = K.function(x, y, defaults={K.is_training(): True}) z = f(np.ones((2, 3))) z = z.tolist() self.assertTrue(all(i == z[0] for i in z)) f1 = N.Noise(level=0.5, noise_dims=1, noise_type='gaussian') y = f1(x) f = K.function(x, y, defaults={K.is_training(): True}) z = f(np.ones((2, 3))) z = z.T.tolist() self.assertTrue(all(i == z[0] for i in z))
def test_seq(self): X = K.placeholder((None, 28, 28, 1)) f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.Flatten(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=tf.nn.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=tf.nn.softmax) ]) y = f(X) yT = f.T(y) f1 = K.function(X, y, defaults={K.is_training(): True}) f2 = K.function(X, yT, defaults={K.is_training(): False}) f = cPickle.loads(cPickle.dumps(f)) y = f(X) yT = f.T(y) f3 = K.function(X, y, defaults={K.is_training(): True}) f4 = K.function(X, yT, defaults={K.is_training(): False}) x = np.random.rand(12, 28, 28, 1) self.assertEquals(f1(x).shape, (2688, 10)) self.assertEquals(f3(x).shape, (2688, 10)) self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4)) self.assertEquals(y.shape.as_list(), (None, 10)) self.assertEquals(f2(x).shape, (12, 28, 28, 1)) self.assertEquals(f4(x).shape, (12, 28, 28, 1)) self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4]) self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
def create(): f = N.Sequence([ N.Conv(8, (3, 3), strides=1, pad='same'), N.Dimshuffle(pattern=(0, 3, 1, 2)), N.FlattenLeft(outdim=2), N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'), N.Dense(128, activation=K.relu), N.Dropout(level=0.3, noise_dims=None), N.Dense(10, activation=K.softmax) ], debug=True) y = f(X) yT = f.T(y) f1 = K.function(X, y) f2 = K.function(X, yT) cPickle.dump(f, open(U.get_modelpath('dummy.ai', override=True), 'w')) _ = f1(x) print(_.shape, _.sum()) _ = f2(x) print(_.shape, _.sum())
N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(), N.Conv(num_filters=64, filter_size=3, pad='valid'), N.BatchNorm(activation=tf.nn.elu), N.Pool(), N.Flatten(outdim=2), N.Dense(num_units=args.dim) ], debug=True, name='EncoderNetwork') f_decoder = N.Sequence([ N.Dropout(level=LATENT_DROPOUT, noise_type='uniform'), N.Noise(level=1.0, noise_type='gaussian'), N.Dimshuffle((0, 'x', 'x', 1)), N.TransposeConv(num_filters=64, filter_size=3, pad='valid'), N.Upsample(size=2, axes=(1, 2)), N.BatchNorm(), N.TransposeConv(num_filters=64, filter_size=3, pad='same'), N.BatchNorm(), N.TransposeConv(num_filters=32, filter_size=3, pad='valid'), N.Upsample(size=2, axes=(1, 2), desire_shape=None if is_cifar10 else (None, 14, 14, None)), N.BatchNorm(), N.TransposeConv(num_filters=3 if is_cifar10 else 1,