示例#1
0
    def __init__(self, n_f0, n_f1, reduce_height, in_h_res, in_w_res,
                 **kwargs):
        super(DiscriminatorBlock, self).__init__(**kwargs)
        self.gain = 1.0
        self.lrmul = 1.0
        self.n_f0 = n_f0
        self.n_f1 = n_f1
        self.reduce_height = reduce_height
        self.in_h_res = in_h_res
        self.in_w_res = in_w_res

        self.resnet_scale = 1.0 / tf.sqrt(2.0)

        # conv_0
        self.conv_0 = Conv2D(
            in_fmaps=self.n_f0,
            out_fmaps=self.n_f0,
            kernel=3,
            down=False,
            resample_kernel=None,
            gain=self.gain,
            lrmul=self.lrmul,
            name="conv_0",
        )
        self.apply_bias_act_0 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_0")

        # conv_1 down
        self.conv_1 = Conv2D(
            in_fmaps=self.n_f0,
            out_fmaps=self.n_f1,
            kernel=3,
            down=True,
            resample_kernel=[1, 3, 3, 1],
            gain=self.gain,
            lrmul=self.lrmul,
            name="conv_1",
            reduce_height=self.reduce_height,
            in_h_res=self.in_h_res,
            in_w_res=self.in_w_res,
        )
        self.apply_bias_act_1 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_1")

        # resnet skip
        self.conv_skip = Conv2D(
            in_fmaps=self.n_f0,
            out_fmaps=self.n_f1,
            kernel=1,
            down=True,
            resample_kernel=[1, 3, 3, 1],
            gain=self.gain,
            lrmul=self.lrmul,
            name="skip",
            reduce_height=self.reduce_height,
            in_h_res=self.in_h_res,
            in_w_res=self.in_w_res,
        )
示例#2
0
    def __init__(self, n_f0, n_f1, **kwargs):
        super(DiscriminatorLastBlock, self).__init__(**kwargs)
        self.gain = 1.0
        self.lrmul = 1.0
        self.n_f0 = n_f0
        self.n_f1 = n_f1

        self.minibatch_std = MinibatchStd(group_size=4,
                                          num_new_features=1,
                                          name="minibatchstd")

        # conv_0
        self.conv_0 = Conv2D(
            in_fmaps=self.n_f0 + 1,
            out_fmaps=self.n_f0,
            kernel=3,
            down=False,
            resample_kernel=None,
            gain=self.gain,
            lrmul=self.lrmul,
            name="conv_0",
        )
        self.apply_bias_act_0 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_0")

        # dense_1
        self.dense_1 = Dense(self.n_f1,
                             gain=self.gain,
                             lrmul=self.lrmul,
                             name="dense_1")
        self.apply_bias_act_1 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_1")
示例#3
0
    def __init__(self, in_ch, out_fmaps, out_h_res, out_w_res, kernel_shape,
                 **kwargs):
        super(SynthesisBlock, self).__init__(**kwargs)
        self.in_ch = in_ch
        self.fmaps = out_fmaps
        self.gain = 1.0
        self.lrmul = 1.0

        self.out_h_res = out_h_res
        self.out_w_res = out_w_res
        self.kernel_shape = kernel_shape

        # conv0 up
        self.conv_0 = ModulatedConv2D(
            in_fmaps=self.in_ch,
            out_fmaps=self.fmaps,
            kernel_shape=self.kernel_shape,
            up=True,
            demodulate=True,
            resample_kernel=[1, 3, 3, 1],
            gain=self.gain,
            lrmul=self.lrmul,
            fused_modconv=True,
            in_h_res=self.out_h_res // 2,
            in_w_res=self.out_w_res // 2,
            name="conv_0",
        )
        self.apply_noise_0 = Noise(name="noise_0")
        self.apply_bias_act_0 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_0")

        # conv block
        self.conv_1 = ModulatedConv2D(
            in_w_res=self.out_w_res,
            in_h_res=self.out_h_res,
            in_fmaps=self.fmaps,
            out_fmaps=self.fmaps,
            kernel_shape=self.kernel_shape,
            up=False,
            demodulate=True,
            resample_kernel=[1, 3, 3, 1],
            gain=self.gain,
            lrmul=self.lrmul,
            fused_modconv=True,
            name="conv_1",
        )
        self.apply_noise_1 = Noise(name="noise_1")
        self.apply_bias_act_1 = BiasAct(lrmul=self.lrmul,
                                        act="lrelu",
                                        name="bias_1")
示例#4
0
    def __init__(self, fmaps, h_res, w_res, **kwargs):
        super(FromRGB, self).__init__(**kwargs)
        self.fmaps = fmaps
        self.h_res = h_res
        self.w_res = w_res

        self.conv = Conv2D(
            in_fmaps=3,
            out_fmaps=self.fmaps,
            kernel=1,
            down=False,
            resample_kernel=None,
            gain=1.0,
            lrmul=1.0,
            name="conv",
        )
        self.apply_bias_act = BiasAct(lrmul=1.0, act="lrelu", name="bias")
示例#5
0
    def __init__(self, **kwargs):
        super(Discriminator, self).__init__(**kwargs)
        # discriminator's (resolutions and featuremaps) are reversed against generator's
        self.resolutions = cfg.discrim_resolutions
        self.feat_maps = cfg.discrim_feat_maps

        # stack discriminator blocks
        res0, n_f0 = self.resolutions[0], self.feat_maps[0]
        self.initial_fromrgb = FromRGB(
            fmaps=n_f0,
            h_res=res0[0],
            w_res=res0[1],
            name="{:d}x{:d}/FromRGB".format(res0[0], res0[1]),
        )

        self.blocks = list()
        for res, next_step_res, f_m0, f_m1 in zip(
                self.resolutions[:-1],
                self.resolutions[1:],
                self.feat_maps[:-1],
                self.feat_maps[1:],
        ):
            self.blocks.append(
                DiscriminatorBlock(
                    n_f0=f_m0,
                    n_f1=f_m1,
                    reduce_height=res[0] != next_step_res[0],
                    in_h_res=res[0],
                    in_w_res=res[1],
                    name="{:d}x{:d}".format(res[0], res0[1]),
                ), )

        # set last discriminator block
        res_final = self.resolutions[-1]
        n_f0, n_f1 = self.feat_maps[-2], self.feat_maps[-1]
        self.last_block = DiscriminatorLastBlock(n_f0,
                                                 n_f1,
                                                 name="{:d}x{:d}".format(
                                                     res_final[0],
                                                     res_final[1]))

        # set last dense layer
        self.last_dense = Dense(1, gain=1.0, lrmul=1.0, name="last_dense")
        self.last_bias = BiasAct(lrmul=1.0, act="linear", name="last_bias")
示例#6
0
    def __init__(self, in_ch, h_res, w_res, **kwargs):
        super(ToRGB, self).__init__(**kwargs)
        self.in_ch = in_ch
        self.h_res = h_res
        self.w_res = w_res
        self.conv = ModulatedConv2D(
            in_fmaps=self.in_ch,
            out_fmaps=3,
            kernel_shape=[1, 1],
            up=False,
            demodulate=False,
            resample_kernel=None,
            gain=1.0,
            lrmul=1.0,
            fused_modconv=True,
            name="conv",
        )

        self.apply_bias = BiasAct(lrmul=1.0, act="linear", name="bias")
示例#7
0
    def __init__(self,
                 in_fmaps,
                 out_fmaps,
                 kernel_shape,
                 up,
                 demodulate,
                 resample_kernel,
                 gain,
                 lrmul,
                 fused_modconv,
                 in_h_res=None,
                 in_w_res=None,
                 **kwargs):
        super(ModulatedConv2D, self).__init__(**kwargs)

        self.in_fmaps = in_fmaps
        self.out_fmaps = out_fmaps
        self.kernel_shape = kernel_shape
        self.demodulate = demodulate
        self.up = up
        self.fused_modconv = fused_modconv
        self.in_h_res = in_h_res
        self.in_w_res = in_w_res
        self.gain = gain
        self.lrmul = lrmul

        self.k, self.pad0, self.pad1 = compute_paddings(resample_kernel,
                                                        up,
                                                        False,
                                                        is_conv=True)

        # self.factor = 2
        self.mod_dense = Dense(self.in_fmaps,
                               gain=1.0,
                               lrmul=1.0,
                               name="mod_dense")
        self.mod_bias = BiasAct(lrmul=1.0, act="linear", name="mod_bias")
示例#8
0
    def __init__(self, style_dim, n_mapping, name, **kwargs):
        super(Mapping, self).__init__(name=name, **kwargs)
        self.style_dim = style_dim
        self.n_mapping = n_mapping
        self.gain = 1.0
        self.lrmul = 0.01

        self.normalize = tf.keras.layers.Lambda(lambda x: x * tf.math.rsqrt(
            tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + 1e-8))

        self.dense_layers = list()
        self.bias_act_layers = list()
        for ii in range(self.n_mapping):
            self.dense_layers.append(
                Dense(
                    self.style_dim,
                    gain=self.gain,
                    lrmul=self.lrmul,
                    name="dense_{:d}".format(ii),
                ))
            self.bias_act_layers.append(
                BiasAct(lrmul=self.lrmul,
                        act="lrelu",
                        name="bias_{:d}".format(ii)))