def generator(self, z, y=None, share_params=False, reuse=False, name=""): x = t.dense(z, self.fc_g_unit, reuse=share_params, name='gen-fc-1') x = t.batch_norm(x, reuse=share_params, name='gen-bn-1') x = t.prelu(x, reuse=share_params, name='gen-prelu-1') x = t.dense(x, self.gf_dim * 8 * 7 * 7, reuse=share_params, name='gen-fc-2') x = t.batch_norm(x, reuse=share_params, name='gen-bn-2') x = t.prelu(x, reuse=share_params, name='gen-prelu-2') x = tf.reshape(x, (-1, 7, 7, self.gf_dim * 8)) for i in range(1, 3): x = t.deconv2d(x, f=self.gf_dim * 4 // i, k=3, s=2, reuse=share_params, name='gen-deconv2d-%d' % i) x = t.batch_norm(x, reuse=share_params, name="gen-bn-%d" % (i + 2)) x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % (i + 2)) """ x = z # tf.concat([z, y], axis=1) loop = 5 for i in range(1, loop): x = t.dense(x, self.fc_g_unit, reuse=share_params, name='gen-fc-%d' % i) x = t.batch_norm(x, reuse=share_params, name='gen-bn-%d' % i) x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % i) """ with tf.variable_scope("generator-%s" % name, reuse=reuse): x = t.deconv2d(x, f=self.channel, k=6, s=1, reuse=False, name='gen-' + name + '-deconv2d-3') x = tf.nn.sigmoid(x, name='gen' + name + '-sigmoid-1') """ x = t.dense(x, self.n_input, reuse=False, name='gen-' + name + '-fc-%d' % loop) x = tf.nn.sigmoid(x) """ return x
def discriminator(self, x, reuse=None): """ # Following a network architecture referred in the paper :param x: Input images (-1, 384, 384, 3) :param reuse: re-usability :return: HR (High Resolution) or SR (Super Resolution) images """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim, 3, 1, name='n64s1-1') x = tf.nn.leaky_relu(x) strides = [2, 1] filters = [1, 2, 2, 4, 4, 8, 8] for i, f in enumerate(filters): x = t.conv2d(x, f=f, k=3, s=strides[i % 2], name='n%ds%d-%d' % (f, strides[i % 2], i + 1)) x = t.batch_norm(x, name='n%d-bn-%d' % (f, i + 1)) x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) # (-1, 96 * 96 * 64) x = t.dense(x, 1024, name='disc-fc-1') x = tf.nn.leaky_relu(x) x = t.dense(x, 1, name='disc-fc-2') # x = tf.nn.sigmoid(x) return x
def discriminator(self, x, reuse=None): """ # Following a D Network, CiFar-like-hood, referred in the paper :param x: images :param y: labels :param reuse: re-usable :return: classification, probability (fake or real), network """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim, 3, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x, alpha=0.2) x = tf.layers.dropout(x, 0.5, name='disc-dropout2d-1') for i in range(5): x = t.conv2d(x, self.df_dim * (2 ** (i + 1)), k=3, s=(i % 2 + 1), name='disc-conv2d-%d' % (i + 2)) x = t.batch_norm(x, reuse=reuse, name="disc-bn-%d" % (i + 1)) x = tf.nn.leaky_relu(x, alpha=0.2) x = tf.layers.dropout(x, 0.5, name='disc-dropout2d-%d' % (i + 1)) net = tf.layers.flatten(x) cat = t.dense(net, self.n_classes, name='disc-fc-cat') disc = t.dense(net, 1, name='disc-fc-disc') return cat, disc, net
def discriminator(self, x, y, do_rate=0.5, reuse=None): with tf.variable_scope("discriminator", reuse=reuse): """ x = t.dense(x, self.fc_unit * 5, name='disc-fc-x') x = tf.reshape(x, (-1, self.fc_unit, 5)) x = tf.reduce_max(x, axis=-1, keepdims=False, name='disc-maxout-x') x = tf.nn.relu(x) x = tf.layers.dropout(x, do_rate, name='disc-do-x') y = t.dense(y, (self.fc_unit // 4) * 5, name='disc-fc-y') y = tf.reshape(y, (-1, (self.fc_unit // 4), 5)) y = tf.reduce_max(y, axis=-1, keepdims=False, name='disc-maxout-y') y = tf.nn.relu(y) y = tf.layers.dropout(y, do_rate, name='disc-do-y') """ x = tf.concat([x, y], axis=1) x = t.dense(x, self.fc_unit * 5, name='disc-fc-1') x = tf.reshape(x, (-1, self.fc_unit, 5)) x = tf.reduce_max(x, axis=-1, keepdims=False, name='disc-maxout-1') x = tf.nn.relu(x) x = tf.layers.dropout(x, do_rate, name='disc-do-1') x = t.dense(x, self.fc_unit * 4, name='disc-fc-2') x = tf.reshape(x, (-1, self.fc_unit, 4)) x = tf.reduce_max(x, axis=-1, keepdims=False, name='disc-maxout-2') x = tf.nn.relu(x) x = tf.layers.dropout(x, do_rate, name='disc-do-2') x = t.dense(x, 1, name='disc-fc-3') x = tf.sigmoid(x) return x
def discriminator(self, x, reuse=None): with tf.variable_scope("discriminator", reuse=reuse): x = t.dense(x, self.fc_unit, name='disc-fc-1') x = tf.nn.leaky_relu(x) x = t.dense(x, 1, name='discd-fc-2') return x
def generator(self, z, reuse=None): with tf.variable_scope("generator", reuse=reuse): x = t.dense(z, self.fc_unit, name='gen-fc-1') x = tf.nn.leaky_relu(x) x = t.dense(x, self.n_input, name='gen-fc-2') x = tf.nn.sigmoid(x) return x
def discriminator(self, x, reuse=None): with tf.variable_scope("discriminator", reuse=reuse): for i in range(2): x = t.dense(x, self.fc_unit, name='disc-fc-%d' % (i + 1)) x = tf.nn.leaky_relu(x) logits = t.dense(x, 1, name='disc-fc-3') prob = tf.nn.sigmoid(logits) return prob, logits
def generator(self, z, reuse=None): with tf.variable_scope('generator', reuse=reuse): x = z for i in range(1, 5): x = t.dense(x, self.fc_unit, name='gen-fc-%d' % i) x = tf.nn.relu(x) x = t.dense(x, self.n_input, name='gen-fc-5') x = tf.nn.sigmoid(x) return x
def discriminator(self, x, reuse=None): with tf.variable_scope('discriminator', reuse=reuse): x = tf.layers.flatten(x) for i in range(1, 5): x = t.dense(x, self.fc_unit, name='disc-fc-%d' % i) x = tf.nn.leaky_relu(x) x = t.dense(x, 1, name='disc-fc-5') return x
def discriminator(self, x, reuse=None): with tf.variable_scope('discriminator', reuse=reuse): x = t.dense(x, self.dfc_unit, name='disc-fc-1') x = tf.nn.elu(x) x = t.dense(x, self.dfc_unit, name='disc-fc-2') x = tf.nn.elu(x) x = tf.layers.flatten(x) x = t.dense(x, 1, name='disc-fc-3') return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gfc_unit, name='gen-fc-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.dense(x, self.gfc_unit, name='gen-fc-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.dense(x, self.n_input, name='gen-fc-3') x = tf.nn.sigmoid(x) return x
def generator(self, z, reuse=None, is_train=True): with tf.variable_scope("generator", reuse=reuse): x = z for i in range(2): x = t.dense(x, self.fc_unit, name='gen-fc-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % (i + 1)) x = tf.nn.leaky_relu(x) logits = t.dense(x, self.n_input, name='gen-fc-3') prob = tf.nn.sigmoid(logits) return prob
def generator(self, z, y, do_rate=0.5, reuse=None): with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, y], axis=1) x = t.dense(x, self.fc_unit * 1, name='gen-fc-1') x = tf.nn.relu(x) x = tf.layers.dropout(x, do_rate, name='gen-do-1') x = t.dense(x, self.fc_unit * 4, name='gen-fc-2') x = tf.nn.relu(x) x = tf.layers.dropout(x, do_rate, name='gen-do-2') x = t.dense(x, self.n_input, name='gen-fc-3') x = tf.sigmoid(x) return x
def generator(self, z, c, reuse=None, is_train=True): """ :param z: 139 z-noise :param c: 10 categories * 10 dimensions :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, c], axis=1) # (-1, 128 + 1 + 10) x = t.dense(x, 2 * 2 * 512, name='gen-fc-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 2, 2, 512)) x = t.deconv2d(x, self.gf_dim * 8, 4, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 4, 4, 2, name='gen-deconv2d-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 4, 2, name='gen-deconv2d-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 4, 2, name='gen-deconv2d-4') x = tf.nn.relu(x) x = t.deconv2d(x, 3, 4, 2, name='gen-deconv2d-5') x = tf.nn.tanh(x) return x
def encoder(self, x, reuse=None): """ :param x: Input images (32x32x3 or 64x64x3) :param reuse: re-usable :return: embeddings """ with tf.variable_scope('encoder', reuse=reuse): repeat = int(np.log2(self.height)) - 2 x = t.conv2d(x, f=self.df_dim, name="enc-conv2d-1") x = tf.nn.elu(x) for i in range(1, repeat + 1): f = self.df_dim * i x = t.conv2d(x, f, 3, 1, name="enc-conv2d-%d" % (i * 2)) x = tf.nn.elu(x) x = t.conv2d(x, f, 3, 1, name="enc-conv2d-%d" % (i * 2 + 1)) x = tf.nn.elu(x) if i < repeat: """ You can choose one of them. max-pool or avg-pool or conv-pool. Speed Order : conv-pool > avg-pool > max-pool. i guess :) """ x = t.conv2d(x, f, 3, 2, name='enc-conv2d-pool-%d' % i) # conv pooling x = tf.nn.elu(x) x = t.flatten(x) x = t.dense(x, self.z_dim, name='enc-fc-1') # normally, (-1, 128) return x
def generator(self, z, y=None, reuse=None, is_train=True): """ :param z: embeddings :param y: labels :param reuse: re-usable :param is_train: en/disable batch_norm, default True :return: prob """ with tf.variable_scope("generator", reuse=reuse): if y: raise NotImplementedError("[-] Not Implemented Yet...") x = t.dense(z, f=self.fc_unit, name='gen-fc-1') x = tf.nn.leaky_relu(x) x = tf.reshape(x, [-1, 8, 8, self.fc_unit // (8 * 8)]) for i in range(1, 4): x = t.deconv2d(x, f=self.gf_dim * (2**i), name="gen-conv2d-%d" % i) x = t.batch_norm(x, is_train=is_train, name='gen-bn-%d' % i) x = tf.nn.leaky_relu(x) x = t.deconv2d(x, f=3, s=1, name="gen-conv2d-4") # (-1, 64, 64, 3) x = tf.sigmoid(x) # [0, 1] return x
def generator(self, z, reuse=None, is_train=True): """ # referred architecture in the paper : (1024)4c - (512)4c2s - (256)4c2s - (128)4c2s - (3)4c2s :param z: embeddings :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = t.dense(z, self.gf_dim * 8 * 4 * 4, name='gen-fc-1') x = tf.reshape(x, (-1, 4, 4, self.gf_dim * 8)) x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 4, 4, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 4, 2, name='gen-deconv2d-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 4, 2, name='gen-deconv2d-3') x = t.batch_norm(x, is_train=is_train, name='gen-bn-4') x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 4, 2, name='gen-deconv2d-4') x = tf.nn.tanh(x) return x
def discriminator(self, x, y=None, reuse=None, is_train=True): """ :param x: images :param y: labels :param reuse: re-usable :param is_train: en/disable batch_norm, default True :return: fm, logits """ with tf.variable_scope("discriminator", reuse=reuse): if y: raise NotImplementedError("[-] Not Implemented Yet...") x = t.conv2d(x, f=self.gf_dim * 1, name="disc-conv2d-1") x = tf.nn.leaky_relu(x) for i in range(1, 4): x = t.conv2d(x, f=self.gf_dim * (2**i), name="disc-conv2d-%d" % (i + 1)) x = t.batch_norm(x, is_train=is_train, name='disc-bn-%d' % (i + 1)) x = tf.nn.leaky_relu(x) feature_match = x # (-1, 8, 8, 512) x = t.flatten(x) x = t.dense(x, 1, name='disc-fc-1') return feature_match, x
def generator(self, z, y, reuse=None, is_train=True): """ # Following a G Network, CiFar-like-hood, referred in the paper :param z: noise :param y: image label :param reuse: re-usable :param is_train: trainable :return: prob """ with tf.variable_scope("generator", reuse=reuse): x = tf.concat([z, y], axis=1) # (-1, 110) x = t.dense(x, self.gf_dim, name='gen-fc-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 4, 4, 24)) for i in range(1, 3): x = t.deconv2d(x, self.gf_dim // (2 ** i), 5, 2, name='gen-deconv2d-%d' % (i + 1)) x = t.batch_norm(x, is_train=is_train, reuse=reuse, name="gen-bn-%d" % i) x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-4') x = tf.nn.tanh(x) # scaling to [-1, 1] return x
def generator(self, x, y, z, scale=32, reuse=None, do_rate=0.5): """ :param x: images to fake :param y: classes :param z: noise :param scale: image size :param reuse: variable re-use :param do_rate: dropout rate :return: logits """ assert scale % 8 == 0 # 32, 16, 8 with tf.variable_scope('generator_{0}'.format(scale), reuse=reuse): if scale == 8: h = tf.concat([z, y], axis=1) h = t.dense(h, self.g_fc_unit, name='gen-fc-1') h = tf.nn.relu(h) h = tf.layers.dropout(h, do_rate, name='gen-dropout-1') h = t.dense(h, self.g_fc_unit, name='gen-fc-2') h = tf.nn.relu(h) h = tf.layers.dropout(h, do_rate, name='gen-dropout-2') h = t.dense(h, self.channel * 8 * 8, name='gen-fc-3') h = tf.reshape(h, [-1, 8, 8, self.channel]) else: y = t.dense(y, scale * scale, name='gen-fc-y') y = tf.reshape(y, [-1, scale, scale, 1]) z = tf.reshape(z, [-1, scale, scale, 1]) h = tf.concat([z, y, x], axis=3) # concat into 5 dims h = t.conv2d(h, self.gf_dim * 1, 5, 1, name='gen-deconv2d-1') h = tf.nn.relu(h) h = t.conv2d(h, self.gf_dim * 1, 5, 1, name='gen-deconv2d-2') h = tf.nn.relu(h) h = t.conv2d(h, self.channel, 5, 1, name='gen-conv2d-3') h = tf.nn.tanh(h) return h
def discriminator(self, x, scope_name, reuse=None): with tf.variable_scope("%s" % scope_name, reuse=reuse): x = t.conv2d(x, f=self.df_dim, k=4, s=1) # 64 x 64 x 3 x = tf.nn.leaky_relu(x) for i in range(np.log2(x.get_shape()[1]) - 2): # 0 ~ 3 x = t.conv2d(x, self.df_dim * (2**(i + 1)), k=4, s=2) x = t.batch_norm(x) x = tf.nn.leaky_relu(x) # (-1, 4, 4, 512) x = tf.layers.flatten(x) x = t.dense(x, 512) x = tf.nn.leaky_relu(x) x = t.dense(x, 1) x = tf.sigmoid(x) return x
def discriminator(self, x, reuse=None): """ :param x: images :param reuse: re-usable :return: logits """ with tf.variable_scope("discriminator", reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 4, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 2, 4, 2, name='disc-conv2d-2') x = t.batch_norm(x, name='disc-bn-1') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 4, 4, 2, name='disc-conv2d-3') x = t.batch_norm(x, name='disc-bn-2') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.conv2d(x, self.df_dim * 8, 4, 2, name='disc-conv2d-4') x = t.batch_norm(x, name='disc-bn-3') x = tf.nn.leaky_relu(x, alpha=0.1) x = tf.layers.flatten(x) x = t.dense(x, self.fc_unit, name='disc-fc-1') x = t.batch_norm(x, name='disc-bn-4') x = tf.nn.leaky_relu(x, alpha=0.1) x = t.dense(x, 1 + self.n_cont + self.n_cat, name='disc-fc-2') prob, cont, cat = x[:, 0], x[:, 1:1 + self.n_cont], x[:, 1 + self.n_cont:] # logits prob = tf.nn.sigmoid(prob) # probability cat = tf.nn.softmax(cat) # categories return prob, cont, cat
def discriminator(self, x, y=None, share_params=False, reuse=False, name=""): with tf.variable_scope("discriminator-%s" % name, reuse=reuse): x = tf.reshape(x, (-1, self.height, self.width, self.channel)) x = t.conv2d(x, f=self.df_dim * 1, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-1') x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-1') # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME', # name='disc-' + name + '-max_pool2d-1') x = t.conv2d(x, f=self.df_dim * 2, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-2') x = t.batch_norm(x, reuse=False, name='disc-' + name + '-bn-1') x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-2') # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME', # name='disc-' + name + '-max_pool2d-2') x = tf.layers.flatten(x) x = t.dense(x, self.fc_d_unit, reuse=share_params, name='disc-fc-1') x = t.batch_norm(x, reuse=share_params, name='disc-bn-2') x = t.prelu(x, reuse=share_params, name='disc-prelu-3') x = t.dense(x, 1, reuse=share_params, name='disc-fc-2') return x
def discriminator(self, x, reuse=None): with tf.variable_scope('discriminator', reuse=reuse): for i in range(1, 4): x = t.conv2d(x, self.gf_dim * (2**(i - 1)), 3, 2, name='disc-conv2d-%d' % i) x = t.batch_norm(x, name='disc-bn-%d' % i) x = tf.nn.leaky_relu(x, alpha=0.3) x = tf.layers.flatten(x) x = t.dense(x, 1, name='disc-fc-1') return x
def generator(self, z, scope_name, reuse=None, is_train=True): with tf.variable_scope("%s" % scope_name, reuse=reuse): x = t.dense(z, 4 * 4 * 8 * self.gf_dim) x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) x = tf.reshape(x, (-1, 4, 4, 8)) for i in range(np.log2(self.height) - 2): # 0 ~ 3 x = t.deconv2d(x, self.gf_dim * (2**(i + 1)), k=4, s=2) x = t.batch_norm(x, is_train=is_train) x = tf.nn.leaky_relu(x) x = t.conv2d(x, 3) return x
def discriminator(self, x, reuse=None): with tf.variable_scope("discriminator", reuse=reuse): for i in range(1, 3): x = t.conv2d(x, self.df_dim * i, 5, 2, name='disc-conv2d-%d' % i) x = t.batch_norm(x, name='disc-bn-%d' % i) x = tf.nn.leaky_relu(x, alpha=0.3) x = t.flatten(x) logits = t.dense(x, 1, name='disc-fc-1') prob = tf.nn.sigmoid(logits) return prob, logits
def discriminator(self, x, reuse=None): with tf.variable_scope('discriminator', reuse=reuse): x = tf.reshape(x, (-1, self.height, self.width, self.channel)) x = t.conv2d(x, self.df_dim, 5, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x) for i in range(1, 3): x = t.conv2d(x, self.df_dim, 5, 2, name='disc-conv2d-%d' % (i + 1)) x = t.batch_norm(x, reuse=reuse, name='disc-bn-%d' % i) x = tf.nn.leaky_relu(x) x = t.flatten(x) x = t.dense(x, 1, name='disc-fc-1') return x
def generator(self, z, reuse=None): with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1') x = t.batch_norm(x, reuse=reuse, name='gen-bn-1') x = tf.nn.relu(x) x = tf.reshape(x, (-1, 4, 4, self.gf_dim * 4)) for i in range(1, 4): x = t.deconv2d(x, self.gf_dim * 4 // (2 ** (i - 1)), 5, 2, name='gen-deconv2d-%d' % i) x = t.batch_norm(x, reuse=reuse, name='gen-bn-%d' % (i + 1)) x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 1, name='gen-deconv2d-5') x = tf.nn.tanh(x) return x
def discriminator(self, x, reuse=None): """ Same as DCGAN Disc Net """ with tf.variable_scope('discriminator', reuse=reuse): x = t.conv2d(x, self.df_dim * 1, 5, 2, name='disc-conv2d-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 2, 5, 2, name='disc-conv2d-2') x = t.batch_norm(x, name='disc-bn-1') x = tf.nn.leaky_relu(x) x = t.conv2d(x, self.df_dim * 4, 5, 2, name='disc-conv2d-3') x = t.batch_norm(x, name='disc-bn-2') x = tf.nn.leaky_relu(x) x = tf.layers.flatten(x) logits = t.dense(x, 1, name='disc-fc-1') prob = tf.nn.sigmoid(logits) return prob, logits
def generator(self, z, reuse=None, is_train=True): """ Same as DCGAN Gen Net """ with tf.variable_scope('generator', reuse=reuse): x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1') x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 4]) x = t.batch_norm(x, is_train=is_train, name='gen-bn-1') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 2, 5, 2, name='gen-deconv2d-1') x = t.batch_norm(x, is_train=is_train, name='gen-bn-2') x = tf.nn.relu(x) x = t.deconv2d(x, self.gf_dim * 1, 5, 2, name='gen-deconv2d-2') x = t.batch_norm(x, is_train=is_train, name='gen-bn-3') x = tf.nn.relu(x) x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-3') x = tf.nn.tanh(x) return x