def __init__(self, config, weight_init): super().__init__() with tf.device('{}:*'.format(config.device)): with tf.name_scope('discriminator_f'): self.weight_init = weight_init self.ch = config.gen_disc_ch if config.conditional: self.emb = tf.keras.layers.Embedding( config.num_classes, self.ch * 4) self.res_down0 = ops.resblock_down( channels=self.ch, config=config, weight_init=self.weight_init, use_bias=False) self.bn0 = BatchNormalization() self.att0 = ops.Attention(ch=self.ch, config=config) self.res_down1 = ops.resblock_down( channels=self.ch * 2, config=config, weight_init=self.weight_init, use_bias=False) self.res_down2 = ops.resblock_down( channels=self.ch * 4, config=config, weight_init=self.weight_init, use_bias=False) self.res0 = ops.resblock(channels=self.ch * 4, config=config, weight_init=self.weight_init, use_bias=False) self.dense0 = Dense(units=1, use_bias=True, kernel_initializer=self.weight_init)
def encoder(inputs, scope="encoder", is_training=True, reuse=False, shared_scope="shared_encoder", shared_reuse=False): """ Define Encoder Network inputs: input images scope: name of encoder scope is_training: is training process reuse: reuse variable of scope shared_scope: name of shared encoder scope shared_reuse: reuse variable of shared_scope """ with tf.variable_scope(scope, reuse=reuse): channel = params.encoder.channel net = inputs net = ops.conv(net, scope="conv1", dim=channel, kernel_size=[7, 7], stride=1, activation_fn=ops.leaky_relu, is_training=is_training, weights_initializer=params.encoder.weights_initializer) for i in range(1, params.encoder.n_enconder): channel *= 2 net = ops.conv( net, scope="conv_{}".format(i + 1), dim=channel, kernel_size=[3, 3], stride=2, activation_fn=ops.leaky_relu, is_training=is_training, weights_initializer=params.encoder.weights_initializer) for i in range(params.encoder.n_resblock - 1): net = ops.resblock( net, scope="resblock_{}".format(i + 1), dim=channel, kernel_size=[3, 3], stride=1, norm_fn=params.encoder.norm_fn, is_training=is_training, weights_initializer=params.encoder.weights_initializer, dropout_ratio=params.encoder.dropout_ratio) with tf.variable_scope(shared_scope, reuse=shared_reuse): chanel = params.decoder.channel net = ops.resblock( net, scope="resblock_{}".format(params.encoder.n_resblock), dim=channel, kernel_size=[3, 3], stride=1, norm_fn=params.encoder.norm_fn, is_training=is_training, weights_initializer=params.encoder.weights_initializer, dropout_ratio=params.encoder.dropout_ratio) with tf.variable_scope(scope, reuse=reuse): net = ops.gaussian_noise_layer(net) return net
def attention(inputs, scope="attention", is_training=True, reuse=False, shared_scope="shared_attention", shared_reuse=False): """ Define Attention Network inputs: input images scope: name of attetion scope is_training: is training process reuse: reuse variable of scope shared_scope: name of shared attetion scope shared_reuse: reuse variable of shared_scope """ with tf.variable_scope(scope, reuse=reuse): net = inputs channel = params.encoder.channel net = ops.conv(net, scope="conv1", dim=channel, kernel_size=[7, 7], stride=1, activation_fn=ops.leaky_relu, is_training=is_training, weights_initializer=params.encoder.weights_initializer) for i in range(1, params.encoder.n_enconder): channel *= 2 net = ops.conv( net, scope="conv_{}".format(i + 1), dim=channel, kernel_size=[3, 3], stride=2, activation_fn=ops.leaky_relu, is_training=is_training, weights_initializer=params.encoder.weights_initializer) for i in range(params.encoder.n_resblock): net = ops.resblock( net, scope="resblock_{}".format(i + 1), dim=channel, kernel_size=[3, 3], stride=1, norm_fn=params.encoder.norm_fn, is_training=is_training, weights_initializer=params.encoder.weights_initializer, dropout_ratio=params.encoder.dropout_ratio) channel = params.decoder.channel for i in range(params.decoder.n_resblock): net = ops.resblock( net, scope="deresblock_{}".format(params.decoder.n_resblock - i), dim=channel, kernel_size=[3, 3], stride=1, norm_fn=params.encoder.norm_fn, is_training=is_training, weights_initializer=params.encoder.weights_initializer, dropout_ratio=params.encoder.dropout_ratio) for i in range(1, params.decoder.n_decoder): channel = channel / 2 net = ops.deconv( net, scope="deconv_{}".format(params.decoder.n_decoder - i + 1), dim=channel, kernel_size=[3, 3], stride=2, activation_fn=ops.leaky_relu, is_training=is_training, weights_initializer=params.decoder.weights_initializer) net = ops.deconv( net, scope="deconv_1", dim=1, kernel_size=[1, 1], stride=1, activation_fn=ops.sigmoid, is_training=is_training, weights_initializer=params.decoder.weights_initializer) return net