def __call__(self, input, z): with tf.variable_scope(self.name, reuse=self._reuse): batch_size = int(input.get_shape()[0]) latent_dim = int(z.get_shape()[-1]) num_filters = [64, 128, 256, 512, 512, 512, 512] if self._image_size == 256: num_filters.append(512) layers = [] G = input z = tf.reshape(z, [batch_size, 1, 1, latent_dim]) z = tf.tile(z, [1, self._image_size, self._image_size, 1]) G = tf.concat([G, z], axis=3) for i, n in enumerate(num_filters): G = ops.conv_block(G, n, 'C{}_{}'.format(n, i), 4, 2, self._is_train, self._reuse, norm=self._norm if i else None, activation='leaky') layers.append(G) layers.pop() num_filters.pop() num_filters.reverse() for i, n in enumerate(num_filters): G = ops.deconv_block(G, n, 'CD{}_{}'.format(n, i), 4, 2, self._is_train, self._reuse, norm=self._norm, activation='relu') G = tf.concat([G, layers.pop()], axis=3) G = ops.deconv_block(G, 3, 'last_layer', 4, 2, self._is_train, self._reuse, norm=None, activation='tanh') self._reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name) return G
def image_decoder(input, is_training): batch_size = int(input.get_shape()[0]) z = tf.layers.dense(input, 1024, activation=tf.nn.relu) G = tf.reshape(z, [batch_size, 4, 4, 64]) filters = [32, 16,8] for i, n in enumerate(filters): G = ops.deconv_block(G, n, 'CD{}_{}'.format(n, i), 4, 2, is_training, reuse = True, norm = 'batch', activation = 'relu') G = ops.deconv_block(G, 3, 'last_layer', 4, 2, is_training, reuse=True, norm=None, activation = 'tanh') return G
def __call__(self, input_op): # 改回了魔术方法的实现,更加简洁 with tf.variable_scope(self.name): conv1 = ops.conv_block(input_op, 32, 'conv1', 7, 1, self.is_train, self.reuse, self.norm, self.activation, pad='REFLECT') conv2 = ops.conv_block(conv1, 64, 'conv2', 3, 2, self.is_train, self.reuse, self.norm, self.activation) res = ops.conv_block(conv2, 128, 'conv3', 3, 2, self.is_train, self.reuse, self.norm, self.activation) for i in range(self.block_size): res = ops.residual_block(res, 128, 'res' + str(i), self.is_train, self.reuse, self.norm) deconv1 = ops.deconv_block(res, 64, 'deconv1', 3, 2, self.is_train, self.reuse, self.norm, self.activation) deconv2 = ops.deconv_block(deconv1, 32, 'deconv2', 3, 2, self.is_train, self.reuse, self.norm, self.activation) self.gen = ops.conv_block(deconv2, 3, 'conv_end', 7, 1, self.is_train, self.reuse, norm=None, activation=tf.nn.tanh, pad='REFLECT') self.reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
def __call__(self, input): with tf.variable_scope(self.name, reuse=self._reuse): G = ops.conv_block(input, 32, 'c7s1-32', 7, 1, self._is_train, self._reuse, self._norm, self._activation, pad='REFLECT') G = ops.conv_block(G, 64, 'd64', 3, 2, self._is_train, self._reuse, self._norm, self._activation) G = ops.conv_block(G, 128, 'd128', 3, 2, self._is_train, self._reuse, self._norm, self._activation) for i in range(self._num_res_block): G = ops.residual(G, 128, 'R128_{}'.format(i), self._is_train, self._reuse, self._norm) G = ops.deconv_block(G, 64, 'u64', 3, 2, self._is_train, self._reuse, self._norm, self._activation) G = ops.deconv_block(G, 32, 'u32', 3, 2, self._is_train, self._reuse, self._norm, self._activation) G = ops.conv_block(G, 3, 'c7s1-3', 7, 1, self._is_train, self._reuse, norm=None, activation='tanh', pad='REFLECT') self._reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name) return G