def generator(self, z, y=None, share_params=False, reuse=False, training=True, name=""): if y is None: x = tf.concat([z, y], axis=1) else: x = z x = tf.layers.flatten(x) x = tf.layers.dense(x, self.fc_unit, reuse=share_params, name='gen-dense-0') x = t.prelu(x, reuse=share_params, name='gen-prelu-0') x = tf.layers.dense(x, self.gf_dim * 8 * 7 * 7, reuse=share_params, name='gen-dense-1') x = t.batch_norm(x, reuse=share_params, is_train=training, name='gen-bn-0') x = t.prelu(x, reuse=share_params, name='gen-prelu-1') x = tf.reshape(x, (self.batch_size, 7, 7, self.gf_dim * 8)) # x = deconv2d(x, f=self.gf_dim * 16, k=4, s=1, reuse=share_params, name='gen-deconv2d-0') # x = batch_norm(x, reuse=share_params, training=training, name="gen-bn-0") # x = prelu(x, reuse=share_params, name='gen-prelu-1') x = t.deconv2d(x, f=self.gf_dim * 4, k=3, s=2, reuse=share_params, name='gen-deconv2d-1') x = t.batch_norm(x, reuse=share_params, is_train=training, name="gen-bn-1") x = t.prelu(x, reuse=share_params, name='gen-prelu-2') x = t.deconv2d(x, f=self.gf_dim * 2, k=3, s=2, reuse=share_params, name='gen-deconv2d-2') x = t.batch_norm(x, reuse=share_params, is_train=training, name="gen-bn-2") x = t.prelu(x, reuse=share_params, name='gen-prelu-3') with tf.variable_scope("generator-%s" % name, reuse=reuse): x = t.deconv2d(x, f=self.channel, k=6, s=1, reuse=False, name='gen-' + name + '-deconv2d-3') x = tf.nn.sigmoid(x, name='gen' + name + '-sigmoid-0') return x
def discriminator(self, x, y=None, share_params=False, reuse=False, name=""): with tf.variable_scope("discriminator-%s" % name, reuse=reuse): if y is None: x = tf.layers.flatten(x) x = tf.concat([x, y], axis=1) x = t.dense(x, self.height * self.width * self.channel, name='disc-' + name + '-dense-0-y') x = tf.reshape(x, self.image_shape) else: pass # Using conv2d pooling instead of max_pool2d because of the speed. # In the CoGAN paper, max_pool2d is used. x = t.conv2d(x, f=self.df_dim, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-0') x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-0') # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-0') x = t.conv2d(x, f=self.df_dim * 2, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-1') x = t.batch_norm(x, is_train=False, name='disc-bn-0') x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-1') # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-1') x = tf.layers.flatten(x) x = t.dense(x, self.fc_unit, reuse=share_params, name='disc-dense-0') x = t.batch_norm(x, is_train=share_params, name='disc-bn-1') x = t.prelu(x, reuse=share_params, name='disc-prelu-2') x = t.dense(x, 1, reuse=share_params, name='disc-dense-1') return x
def generator(self, z, reuse=None, is_train=True): """ :param z: embeddings :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.reshape(z, (-1, 1, 1, self.z_dim)) x = t.deconv2d(x, self.df_dim * 8, 4, 1, pad='VALID', name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) for i in range(1, 4): x = t.deconv2d(x, self.df_dim * 8 // (2**i), 4, 2, name='gen-deconv2d-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % (i + 1)) x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 4, 2, name='gen-deconv2d-5') x = tf.nn.tanh(x) return x
def discriminator(self, x, reuse=None): """ :param x: images :param reuse: re-usable :return: logits """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 4, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 2, 4, 2, name='disc-conv2d-2') x = t.batch_norm(x, name='disc-bn-1') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 4, 4, 2, name='disc-conv2d-3') x = t.batch_norm(x, name='disc-bn-2') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 8, 4, 2, name='disc-conv2d-4') x = t.batch_norm(x, name='disc-bn-3') x = tf.nn.leaky_relu(x, alpha=0.1) x = tf.layers.flatten(x) x = t.dense(x, self.fc_unit, name='disc-fc-1') x = t.batch_norm(x, name='disc-bn-4') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.dense(x, 1 + self.n_cont + self.n_cat, name='disc-fc-2') prob, cont, cat = x[:, 0], x[:, 1:1 + self.n_cont], x[:, 1 + self.n_cont:] # logits prob = tf.nn.sigmoid(prob) # probability cat = tf.nn.softmax(cat) # categories return prob, cont, cat
def generator(self, z, reuse=None, is_train=True): """ # referred architecture in the paper : (1024)4c - (512)4c2s - (256)4c2s - (128)4c2s - (3)4c2s :param z: embeddings :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = t.dense(z, self.gf_dim * 8 * 4 * 4, name='gen-fc-1') x = tf.reshape(x, (-1, 4, 4, self.gf_dim * 8)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 4, 4, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 4, 2, name='gen-deconv2d-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 4, 2, name='gen-deconv2d-3') x = t.batch_norm(x, is_train=is_train, name='gen-bn-4') x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 4, 2, name='gen-deconv2d-4') x = tf.nn.tanh(x) return x
def generator(self, z, c, reuse=None, is_train=True): """ :param z: 139 z-noise :param c: 10 categories * 10 dimensions :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, c], axis=1) # (-1, 128 + 1 + 10) x = t.dense(x, 2 * 2 * 512, name='gen-fc-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 2, 2, 512)) x = t.deconv2d(x, self.gf_dim * 8, 4, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 4, 4, 2, name='gen-deconv2d-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 4, 2, name='gen-deconv2d-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 4, 2, name='gen-deconv2d-4') x = tf.nn.relu(x) x = t.deconv2d(x, 3, 4, 2, name='gen-deconv2d-5') x = tf.nn.tanh(x) return x
def residual_block(x, f, name="", _is_train=True): with tf.variable_scope(name): shortcut = tf.identity(x, name='n64s1-shortcut') x = t.conv2d(x, f, 3, 1, name="n64s1-1") x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1") x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1') x = t.conv2d(x, f, 3, 1, name="n64s1-2") x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2") x = tf.add(x, shortcut) return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gfc_unit, name='gen-fc-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.dense(x, self.gfc_unit, name='gen-fc-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.dense(x, self.n_input, name='gen-fc-3') x = tf.nn.sigmoid(x) return x
def generator(self, y, z, reuse=None, is_train=True): """ # Following a G Network, CiFar-like-hood, referred in the paper :param y: image label :param z: image noise :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, y], axis=1) x = tf.layers.dense(x, self.gf_dim * 2, name='g-fc-0') x = tf.nn.relu(x) x = tf.layers.dense(x, self.gf_dim * 7 * 7, name='g-fc-1') x = tf.nn.relu(x) x = tf.reshape(x, [-1, 7, 7, self.gf_dim]) x = t.deconv2d(x, f=self.gf_dim // 2, k=5, s=2, name='g-deconv-1') x = t.batch_norm(x, is_train=is_train) x = tf.nn.relu(x) x = t.deconv2d(x, f=1, k=5, s=2, name='g-deconv-2') # channel x = tf.nn.sigmoid(x) # x = tf.nn.tanh(x) return x
def discriminator(self, x, reuse=None): """ # Following a D Network, CiFar-like-hood, referred in the paper :param x: image, shape=(-1, 28, 28, 1) :param reuse: re-usable :return: logits, networks """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim, k=3, s=2, name='d-conv-0') x = tf.nn.leaky_relu(x) x = tf.layers.dropout(x, 0.5, name='d-dropout-0') for i in range(1, 2 * 2 + 1): f = self.df_dim * (i + 1) x = t.conv2d(x, f=f, k=3, s=(i % 2 + 1), name='d-conv-%d' % i) x = t.batch_norm(x) x = tf.nn.leaky_relu(x) x = tf.layers.dropout(x, 0.5, name='d-dropout-%d' % i) x = tf.layers.flatten(x) x = t.dense(x, self.fc_unit * 2, name='d-fc-1') net = tf.nn.leaky_relu(x) x = tf.layers.dense(net, 1, name='d-fc-2') # logits return x, net
def generator(self, z, reuse=None): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1') x = t.batch_norm(x, reuse=reuse, name='gen-bn-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 4, 4, self.gf_dim * 4)) for i in range(1, 4): x = t.deconv2d(x, self.gf_dim * 4 // (2 ** (i - 1)), 5, 2, name='gen-deconv2d-%d' % i) x = t.batch_norm(x, reuse=reuse, name='gen-bn-%d' % (i + 1)) x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 1, name='gen-deconv2d-5') x = tf.nn.tanh(x) return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 8 * 4 * 4, name='gen-fc-1') x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 8]) x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) for i in range(1, 4): x = t.deconv2d(x, self.gf_dim * 4, 3, 2, name='gen-deconv2d-%d' % i) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % (i + 1)) x = tf.nn.relu(x) x = t.conv2d(x, self.channel, 3, name='gen-conv2d-1') x = tf.nn.sigmoid(x) return x
def discriminator(self, x, reuse=None): """ # Following a D Network, CiFar-like-hood, referred in the paper :param x: images :param y: labels :param reuse: re-usable :return: classification, probability (fake or real), network """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim, 3, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x, alpha=0.2) x = tf.layers.dropout(x, 0.5, name='disc-dropout2d-1') for i in range(5): x = t.conv2d(x, self.df_dim * (2**(i + 1)), k=3, s=(i % 2 + 1), name='disc-conv2d-%d' % (i + 2)) x = t.batch_norm(x, reuse=reuse, name="disc-bn-%d" % (i + 1)) x = tf.nn.leaky_relu(x, alpha=0.2) x = tf.layers.dropout(x, 0.5, name='disc-dropout2d-%d' % (i + 1)) net = tf.layers.flatten(x) cat = t.dense(net, self.n_classes, name='disc-fc-cat') disc = t.dense(net, 1, name='disc-fc-disc') return cat, disc, net
def discriminator(self, x, reuse=None): """ # Following a network architecture referred in the paper :param x: Input images (-1, 384, 384, 3) :param reuse: re-usability :return: HR (High Resolution) or SR (Super Resolution) images """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim, 3, 1, name='n64s1-1') x = tf.nn.leaky_relu(x) strides = [2, 1] filters = [1, 2, 2, 4, 4, 8, 8] for i, f in enumerate(filters): x = t.conv2d(x, f=f, k=3, s=strides[i % 2], name='n%ds%d-%d' % (f, strides[i % 2], i + 1)) x = t.batch_norm(x, name='n%d-bn-%d' % (f, i + 1)) x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) # (-1, 96 * 96 * 64) x = t.dense(x, 1024, name='disc-fc-1') x = tf.nn.leaky_relu(x) x = t.dense(x, 1, name='disc-fc-2') # x = tf.nn.sigmoid(x) return x
def discriminator(self, x, y=None, reuse=None, is_train=True): """ :param x: images :param y: labels :param reuse: re-usable :param is_train: en/disable batch_norm, default True :return: logits """ with tf.variable_scope("discriminator", reuse=reuse): if y: raise NotImplemented("[-] Not Implemented Yet...") x = t.conv2d(x, f=self.gf_dim * 1, name="disc-conv2d-0") x = tf.nn.leaky_relu(x) for i in range(1, 4): x = t.conv2d(x, f=self.gf_dim * (2 ** i), name="disc-conv2d-%d" % i) x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) feature_match = x # (-1, 8, 8, 512) x = tf.layers.flatten(x) x = t.dense(x, 1, name='disc-fc-0') return feature_match, x
def generator(self, z, y, reuse=None, is_train=True): """ # Following a G Network, CiFar-like-hood, referred in the paper :param z: noise :param y: image label :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, y], axis=1) # (-1, 110) x = t.dense(x, self.gf_dim, name='gen-fc-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 4, 4, 24)) for i in range(1, 3): x = t.deconv2d(x, self.gf_dim // (2**i), 5, 2, name='gen-deconv2d-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, reuse=reuse, name="gen-bn-%d" % i) x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-4') x = tf.nn.tanh(x) # scaling to [-1, 1] return x
def generator(self, z, y=None, reuse=None, is_train=True): """ :param z: embeddings :param y: labels :param reuse: re-usable :param is_train: en/disable batch_norm, default True :return: prob """ with tf.variable_scope("generator", reuse=reuse): if y: raise NotImplemented("[-] Not Implemented Yet...") x = t.dense(z, f=self.fc_unit, name='gen-fc-0') x = tf.nn.leaky_relu(x) x = tf.reshape(x, [-1, 8, 8, self.fc_unit // (8 * 8)]) for i in range(1, 4): x = t.deconv2d(x, f=self.gf_dim * (2 ** i), name="gen-conv2d-%d" % i) x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = t.deconv2d(x, f=3, s=1, name="gen-conv2d-4") # (-1, 64, 64, 3) x = tf.sigmoid(x) # [0, 1] return x
def generator(self, z, y=None, share_params=False, reuse=False, name=""): x = t.dense(z, self.fc_g_unit, reuse=share_params, name='gen-fc-1') x = t.batch_norm(x, reuse=share_params, name='gen-bn-1') x = t.prelu(x, reuse=share_params, name='gen-prelu-1') x = t.dense(x, self.gf_dim * 8 * 7 * 7, reuse=share_params, name='gen-fc-2') x = t.batch_norm(x, reuse=share_params, name='gen-bn-2') x = t.prelu(x, reuse=share_params, name='gen-prelu-2') x = tf.reshape(x, (-1, 7, 7, self.gf_dim * 8)) for i in range(1, 3): x = t.deconv2d(x, f=self.gf_dim * 4 // i, k=3, s=2, reuse=share_params, name='gen-deconv2d-%d' % i) x = t.batch_norm(x, reuse=share_params, name="gen-bn-%d" % (i + 2)) x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % (i + 2)) """ x = z # tf.concat([z, y], axis=1) loop = 5 for i in range(1, loop): x = t.dense(x, self.fc_g_unit, reuse=share_params, name='gen-fc-%d' % i) x = t.batch_norm(x, reuse=share_params, name='gen-bn-%d' % i) x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % i) """ with tf.variable_scope("generator-%s" % name, reuse=reuse): x = t.deconv2d(x, f=self.channel, k=6, s=1, reuse=False, name='gen-' + name + '-deconv2d-3') x = tf.nn.sigmoid(x, name='gen' + name + '-sigmoid-1') """ x = t.dense(x, self.n_input, reuse=False, name='gen-' + name + '-fc-%d' % loop) x = tf.nn.sigmoid(x) """ return x
def generator(self, z, reuse=None, is_train=True): """ :param z: noise :param y: image label :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): f = self.gf_dim * 8 x = t.dense_alt(z, 4 * 4 * f, sn=True, name='gen-fc-1') x = tf.reshape(x, (-1, 4, 4, f)) for i in range(self.n_layer // 2): if self.up_sampling: x = t.up_sampling(x, interp=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = t.conv2d_alt(x, f // 2, 5, 1, pad=2, sn=True, use_bias=False, name='gen-conv2d-%d' % (i + 1)) else: x = t.deconv2d_alt(x, f // 2, 4, 2, sn=True, use_bias=False, name='gen-deconv2d-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % i) x = tf.nn.relu(x) f //= 2 # Self-Attention Layer x = self.attention(x, f, reuse=reuse) for i in range(self.n_layer // 2, self.n_layer): if self.up_sampling: x = t.up_sampling(x, interp=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = t.conv2d_alt(x, f // 2, 5, 1, pad=2, sn=True, use_bias=False, name='gen-conv2d-%d' % (i + 1)) else: x = t.deconv2d_alt(x, f // 2, 4, 2, sn=True, use_bias=False, name='gen-deconv2d-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % i) x = tf.nn.relu(x) f //= 2 x = t.conv2d_alt(x, self.channel, 5, 1, pad=2, sn=True, name='gen-conv2d-%d' % (self.n_layer + 1)) x = tf.nn.tanh(x) return x
def encoder(self, x, reuse=None): """ (64)4c2s - (128)4c2s - (256)4c2s :param x: images :param reuse: re-usable :return: logits """ with tf.variable_scope('encoder', reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 4, 2, name='enc-conv2d-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 2, 4, 2, name='enc-conv2d-2') x = t.batch_norm(x, name='enc-bn-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 4, 4, 2, name='enc-conv2d-3') x = t.batch_norm(x, name='enc-bn-2') x = tf.nn.leaky_relu(x) return x
def decoder(self, x, reuse=None): """ (128)4c2s - (64)4c2s - (3)4c2s :param x: embeddings :param reuse: re-usable :return: prob """ with tf.variable_scope('decoder', reuse=reuse): x = t.deconv2d(x, self.df_dim * 2, 4, 2, name='dec-deconv2d-1') x = t.batch_norm(x, name='dec-bn-1') x = tf.nn.relu(x) x = t.deconv2d(x, self.df_dim * 1, 4, 2, name='dec-deconv2d-2') x = t.batch_norm(x, name='dec-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 4, 2, name='dec-deconv2d-3') x = tf.nn.tanh(x) return x
def generator(self, x, reuse=None, is_train=True): """ :param x: LR (Low Resolution) images, (-1, 96, 96, 3) :param reuse: scope re-usability :param is_train: is trainable, default True :return: SR (Super Resolution) images, (-1, 384, 384, 3) """ with tf.variable_scope("generator", reuse=reuse): def residual_block(x, f, name="", _is_train=True): with tf.variable_scope(name): shortcut = tf.identity(x, name='n64s1-shortcut') x = t.conv2d(x, f, 3, 1, name="n64s1-1") x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1") x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1') x = t.conv2d(x, f, 3, 1, name="n64s1-2") x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2") x = tf.add(x, shortcut) return x x = t.conv2d(x, self.gf_dim, 9, 1, name='n64s1-1') x = t.prelu(x, name='n64s1-prelu-1') skip_conn = tf.identity(x, name='skip_connection') # B residual blocks for i in range(1, 17): # (1, 9) x = residual_block(x, self.gf_dim, name='b-residual_block_%d' % i, _is_train=is_train) x = t.conv2d(x, self.gf_dim, 3, 1, name='n64s1-3') x = t.batch_norm(x, is_train=is_train, name='n64s1-bn-3') x = tf.add(x, skip_conn) # sub-pixel conv2d blocks for i in range(1, 3): x = t.conv2d(x, self.gf_dim * 4, 3, 1, name='n256s1-%d' % (i + 2)) x = t.sub_pixel_conv2d(x, f=None, s=2) x = t.prelu(x, name='n256s1-prelu-%d' % i) x = t.conv2d(x, self.channel, 9, 1, name='n3s1') # (-1, 384, 384, 3) x = tf.nn.tanh(x) return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 8 * 4 * 4) x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 8]) x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = t.deconv2d(x, self.gf_dim * 4, name='g-deconv-1') x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = t.deconv2d(x, self.gf_dim * 2, name='g-deconv-2') x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) logits = t.deconv2d(x, self.channel, name='g-deconv-3') prob = tf.nn.tanh(logits) return prob
def generator(self, x, reuse=None, is_train=True): with tf.variable_scope("generator", reuse=reuse): for i in range(2): x = t.dense(x, self.fc_unit, name='g-fc-%d' % i) x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) logits = t.dense(x, self.n_input, name='g-fc-2') prob = tf.nn.sigmoid(logits) return prob
def discriminator(self, x, reuse=None): """ Same as DCGAN Disc Net """ with tf.variable_scope('discriminator', reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 5, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 2, 5, 2, name='disc-conv2d-2') x = t.batch_norm(x, name='disc-bn-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 4, 5, 2, name='disc-conv2d-3') x = t.batch_norm(x, name='disc-bn-2') x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) logits = t.dense(x, 1, name='disc-fc-1') prob = tf.nn.sigmoid(logits) return prob, logits
def discriminator(self, x, reuse=None): with tf.variable_scope('discriminator', reuse=reuse): for i in range(1, 4): x = t.conv2d(x, self.gf_dim * (2 ** (i - 1)), 3, 2, name='disc-conv2d-%d' % i) x = t.batch_norm(x, name='disc-bn-%d' % i) x = tf.nn.leaky_relu(x, alpha=0.3) x = tf.layers.flatten(x) x = t.dense(x, 1, name='disc-fc-1') return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.fc_unit, name='gen-fc-0') x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = t.dense(x, self.gf_dim * 4 * 7 * 7, name='gen-fc-1') x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = tf.reshape(x, [-1, 7, 7, self.gf_dim * 4]) x = t.deconv2d(x, self.gf_dim * 2, name='gen-deconv2d-0') x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) logits = t.deconv2d(x, self.channel, name='gen-deconv2d-1') prob = tf.nn.sigmoid(logits) return prob
def res_block(x, f, scale_type, use_bn=True, name=""): with tf.variable_scope("res_block-%s" % name): assert scale_type in ["up", "down"] scale_up = False if scale_type == "down" else True ssc = x x = t.batch_norm(x, name="bn-1") if use_bn else x x = tf.nn.relu(x) x = t.conv2d_alt(x, f, sn=True, name="conv2d-1") x = t.batch_norm(x, name="bn-2") if use_bn else x x = tf.nn.relu(x) if not scale_up: x = t.conv2d_alt(x, f, sn=True, name="conv2d-2") x = tf.layers.average_pooling2d(x, pool_size=(2, 2)) else: x = t.deconv2d_alt(x, f, sn=True, name="up-sampling") return x + ssc
def encoder(self, x, reuse=None): with tf.variable_scope('encoder', reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 5, 2, name='enc-conv2d-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 2, 5, 2, name='enc-conv2d-2') x = t.batch_norm(x, name='enc-bn-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 4, 5, 2, name='enc-conv2d-3') x = t.batch_norm(x, name='enc-bn-2') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 8, 5, 2, name='enc-conv2d-4') x = t.batch_norm(x, name='enc-bn-3') x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) x = t.dense(x, self.z_dim, name='enc-fc-1') return x
def generator(self, z, reuse=None, is_train=True): """ Same as DCGAN Gen Net """ with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1') x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 4]) x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 5, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 5, 2, name='gen-deconv2d-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-3') x = tf.nn.tanh(x) return x