def vanilla(mark, bn=False): z_dim = 100 model = VAE(z_dim=z_dim, mark=mark, classes=0, sample_shape=[784], output_shape=[28, 28, 1]) # Define encoder model.Q.add(Linear(output_dim=128)) model.Q.add(Activation.ReLU()) fork = Fork(name='mu_sigma') fork.add('mu', Linear(output_dim=z_dim)) fork.add('sigma', Linear(output_dim=z_dim)) model.Q.add(fork) # Define decoder model.P.add(Linear(output_dim=128)) model.P.add(Activation.ReLU()) model.P.add(Linear(output_dim=784)) model.P.add(Activation('sigmoid')) # Build model model.build() return model
def ConvLayer(filters, bn=False): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation.ReLU())
def dcgan(mark): # Initiate model model = GAN(z_dim=100, sample_shape=[28, 28, 1], mark=mark, classes=10) # Define generator model.G.add(Linear(output_dim=7 * 7 * 128)) model.G.add(Reshape(shape=[7, 7, 128])) model.G.add(BatchNorm()) model.G.add(Activation.ReLU()) model.G.add(Deconv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.G.add(BatchNorm()) model.G.add(Activation.ReLU()) model.G.add(Deconv2D(filters=1, kernel_size=5, strides=2, padding='same')) model.G.add(Activation('sigmoid')) # model.G.add(Activation('tanh')) # model.G.add(Rescale(from_scale=[-1., 1.], to_scale=[0., 1.])) # Define discriminator # model.D.add(Rescale(from_scale=[0., 1.], to_scale=[-1., 1.])) model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.D.add(Activation.LeakyReLU()) model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.D.add(BatchNorm()) model.D.add(Activation.LeakyReLU()) model.D.add(Reshape(shape=[7 * 7 * 128])) model.D.add(Linear(output_dim=1)) model.D.add(Activation('sigmoid')) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5) model.build(loss=pedia.cross_entropy, G_optimizer=optimizer, D_optimizer=optimizer) return model
def mlp00(mark): # Define model model = TDPlayer(mark=mark) model.add(Input(sample_shape=[15, 15])) model.add(Flatten()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(1)) model.add(Activation('sigmoid')) # Build model model.build() return model
def ka_convnet(mark): model = Classifier(mark=mark) model.add(Input(sample_shape=config.sample_shape)) strength = 1e-5 def ConvLayer(filters, bn=False): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation.ReLU()) # Define structure ConvLayer(32) model.add(Dropout(0.5)) ConvLayer(32, False) model.add(Dropout(0.5)) model.add(MaxPool2D(2, 2, 'same')) ConvLayer(64, True) model.add(Dropout(0.5)) model.add(MaxPool2D(2, 2, 'same')) model.add(Flatten()) model.add(Linear(128)) model.add(Activation.ReLU()) # model.add(Dropout(0.5)) model.add(Linear(10)) # Build model model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4)) return model