コード例 #1
0
    def __init__(
            self,
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=False,
            blur_kernel=[1, 3, 3, 1],
            demodulate=True,
            fused_bias_linear=None,  # fused_bias(embed) -> out_channel
            conditional_bias=False,  # conditional bias in modulation
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
            conditional_bias=conditional_bias,
        )

        self.noise = NoiseInjection()
        # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
        # self.activate = ScaledLeakyReLU(0.2)
        self.conditional_fused = fused_bias_linear is not None
        if self.conditional_fused:
            self.bias = fused_bias_linear(out_dim=out_channel)
            self.activate = FusedLeakyReLU(out_channel, bias=False)
        else:
            self.activate = FusedLeakyReLU(out_channel)
コード例 #2
0
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        with_bn=True,
    ):
        super(conv2DBatchNormRelu, self).__init__()

        conv_mod = th.nn.Conv2d(
            int(in_channels),
            int(n_filters),
            kernel_size=k_size,
            padding=padding,
            stride=stride,
            bias=bias,
            dilation=dilation,
        )

        if with_bn:
            self.cbr_unit = th.nn.Sequential(conv_mod,
                                             th.nn.BatchNorm2d(int(n_filters)),
                                             FusedLeakyReLU(int(n_filters)))
        else:
            self.cbr_unit = th.nn.Sequential(conv_mod,
                                             FusedLeakyReLU(int(n_filters)))
コード例 #3
0
ファイル: model.py プロジェクト: Yan98/S2FGAN
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
    ):
        """
        Return, None
        Parameters
        ----------
        in_channels, int, the channels of input
        out_channels, int, the channles expanded by the convolution
        kernel_size, int, the size of kernel needed.
        style_dim, int, dimensionality of attribute latent space.
        upsample, bool, decide if upsample the input
        blur_kernel, [int], the kernel used to blur input.
        demoulated, bool, decide applying demodulation
        Returns
        -------
        None
        """
        
        super().__init__()

        self.conv1 = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=True,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
        )

        self.activate1 = FusedLeakyReLU(out_channel)
        
        self.conv2 = ModulatedConv2d(
            out_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=False,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
        )

        self.activate2 = FusedLeakyReLU(out_channel)
コード例 #4
0
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        upsample=False,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
        layerID=-1,
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
        )

        self.noise = NoiseInjection()
        self.activate = FusedLeakyReLU(out_channel)
        self.manipulation = ManipulationLayer(layerID)
コード例 #5
0
 def __init__(self, latent_dim, channel, size=4):
     super().__init__()
     self.channel = channel
     self.size = size
     self.linear = EqualLinear(latent_dim, channel * size * size, activation="fused_lrelu")
     self.activate = FusedLeakyReLU(channel * size * size)
     self.input = nn.Parameter(th.randn(1))
コード例 #6
0
ファイル: blocks.py プロジェクト: liuguoyou/CIPS
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        upsample=False,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
        activation=None,
        downsample=False,
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
            downsample=downsample,
        )

        self.activation = activation
        self.noise = NoiseInjection()
        if activation == 'sinrelu':
            self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
            self.activate = ScaledLeakyReLUSin()
        elif activation == 'sin':
            self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
            self.activate = SinActivation()
        else:
            self.activate = FusedLeakyReLU(out_channel)
コード例 #7
0
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        upsample=False,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
        )

        self.noise = NoiseInjection()
        # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
        # self.activate = ScaledLeakyReLU(0.2)
        self.activate = FusedLeakyReLU(out_channel)
コード例 #8
0
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        upsample=False,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
        is_spade=False,
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
        )

        self.is_spade = is_spade
        if is_spade:
            self.spade = SPADE(norm_nc=out_channel, label_nc=1)
        self.noise = NoiseInjection()
        self.activate = FusedLeakyReLU(out_channel)
コード例 #9
0
 def _create_downsampling_module(self, input_channels, pooling_kenel):
     return th.nn.Sequential(
         th.nn.AvgPool2d(pooling_kenel),
         th.nn.Conv2d(input_channels, input_channels * 2, kernel_size=1),
         th.nn.BatchNorm2d(input_channels * 2),
         FusedLeakyReLU(input_channels * 2),
     )
コード例 #10
0
 def _create_upsampling_module(self, input_channels, pooling_kenel):
     return th.nn.Sequential(
         th.nn.ConvTranspose2d(input_channels,
                               input_channels // 2,
                               kernel_size=pooling_kenel,
                               stride=pooling_kenel),
         th.nn.BatchNorm2d(input_channels // 2),
         FusedLeakyReLU(input_channels // 2),
     )
コード例 #11
0
def create_decoder_single_conv(in_chs, out_chs, kernel):
    assert kernel % 2 == 1
    return th.nn.Sequential(
        th.nn.ConvTranspose2d(in_chs,
                              out_chs,
                              kernel_size=kernel,
                              padding=(kernel - 1) // 2),
        th.nn.BatchNorm2d(out_chs),
        FusedLeakyReLU(out_chs),
    )
コード例 #12
0
ファイル: model.py プロジェクト: Yan98/S2FGAN
    def __init__(
        self,
        c_dim,
        style_dim = 512,
        n_mlp = 8,
        channel_multiplier= 1,
        blur_kernel=[1, 3, 3, 1],
        lr_mlp=0.01,
    ):
        super().__init__()

        self.channels = {
            4: 512,
            8: 512,
            16: 512,
            32: 512,
            64: 256 * channel_multiplier,
            128: 128 * channel_multiplier,
            256: 64 * channel_multiplier,
            512: 32 * channel_multiplier,
            1024: 16 * channel_multiplier,
        }

        self.input = ConstantInput(self.channels[4])
        self.conv1 = ModulatedConv2d(
                        512,
                        512,
                        3,
                        style_dim,
                        upsample= False,
                        blur_kernel=blur_kernel,
                        demodulate=True,
        )
        self.activate1 = FusedLeakyReLU(512)
        self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)

        self.convs = nn.ModuleList([
            StyledConv(512,512,3,style_dim,blur_kernel),                                             #4   -  8
            StyledConv(512,512,3,style_dim,blur_kernel),                                             #8   -  16
            StyledConv(512,512,3,style_dim,blur_kernel),                                             #16  -  32
            StyledConv(512,256 * channel_multiplier,3,style_dim,blur_kernel),                        #32  -  64
            StyledConv(256 * channel_multiplier, 128 * channel_multiplier,3,style_dim,blur_kernel),  #64  -  128
            StyledConv(128 * channel_multiplier, 64 * channel_multiplier,3,style_dim,blur_kernel),   #128 -  256
            ])

        self.to_rgbs = nn.ModuleList([
            ToRGB(512, style_dim),                       #8
            ToRGB(512, style_dim),                       #16
            ToRGB(512, style_dim),                       #32
            ToRGB(256 * channel_multiplier, style_dim),  #64
            ToRGB(128 * channel_multiplier, style_dim),  #128
            ToRGB(64 * channel_multiplier, style_dim),   #256
            ])
コード例 #13
0
ファイル: model.py プロジェクト: nazarblch/stylegan2-pytorch
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        downsample=False,
        blur_kernel=[1, 3, 3, 1],
        bias=True,
        activate=True,
    ):
        layers = []

        if downsample:
            factor = 2
            p = (len(blur_kernel) - factor) + (kernel_size - 1)
            pad0 = (p + 1) // 2
            pad1 = p // 2

            layers.append(Blur(blur_kernel, pad=(pad0, pad1)))

            stride = 2
            self.padding = 0

        else:
            stride = 1
            self.padding = kernel_size // 2

        layers.append(
            EqualConv2d(
                in_channel,
                out_channel,
                kernel_size,
                padding=self.padding,
                stride=stride,
                bias=bias and not activate,
            )
        )

        if activate:
            if bias:
                layers.append(FusedLeakyReLU(out_channel))

            else:
                layers.append(ScaledLeakyReLU(0.2))

        super().__init__(*layers)
コード例 #14
0
ファイル: layers.py プロジェクト: zymale/StyleRenderer
 def __init__(self, in_channel, out_channel, kernel_size, \
 downsample = False, blur_kernel = [1, 3, 3, 1], \
 bias = True, activate = 'lrelu'):
     layers = []
     if downsample:
         factor = 2
         p = (len(blur_kernel) - factor) + (kernel_size - 1)
         pad0 = (p + 1) // 2
         pad1 = p // 2
         layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
         stride = 2
         self.padding = 0
     else:
         stride = 1
         self.padding = kernel_size // 2
     if 'sp' in activate.lower():
         layers.append(SpectralNorm(EqualConv2d( \
          in_channel, \
          out_channel, \
          kernel_size, \
          padding = self.padding, \
          stride = stride, \
          bias = bias)))
     else:
         layers.append(EqualConv2d( \
          in_channel, \
          out_channel, \
          kernel_size, \
          padding = self.padding, \
          stride = stride, \
          bias = bias))
         if activate == 'lrelu':
             if bias:
                 layers.append(FusedLeakyReLU(out_channel))
             else:
                 layers.append(ScaledLeakyReLU(0.2))
     super(ConvLayer, self).__init__(*layers)
コード例 #15
0
ファイル: model_seg_input.py プロジェクト: apchenstu/sofgan
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        style_dim,
        upsample=False,
        downsample=False,
        blur_kernel=[1, 3, 3, 1],
        demodulate=True,
        with_condition_img=False,
        style_merge=False,
        with_noise = True,
        classwiseStyle=False,
    ):
        super().__init__()

        self.conv = ModulatedConv2d(
            in_channel,
            out_channel,
            kernel_size,
            style_dim,
            upsample=upsample,
            downsample=downsample,
            blur_kernel=blur_kernel,
            demodulate=demodulate,
            classwiseStyle=classwiseStyle,
        )
        self.style_merge = style_merge
        self.with_condition_img = with_condition_img
        if with_condition_img:
            self.segEncoder = SegEncoder(out_channel)
        self.with_noise = with_noise
        if with_noise:
            self.noise = NoiseInjection()
        self.activate = FusedLeakyReLU(out_channel)
コード例 #16
0
    def __init__(self,
                 in_channels,
                 latent_dim,
                 hidden_dims=None,
                 alpha=10.0,
                 beta=1.0,
                 kld_weight=1):
        super(LogCoshVAE, self).__init__()

        my_hidden_dims = copy(hidden_dims)

        self.latent_dim = latent_dim
        self.alpha = alpha
        self.beta = beta
        self.kld_weight = kld_weight

        modules = []
        if my_hidden_dims is None:
            my_hidden_dims = [32, 64, 128, 256, 512]

        # Build Encoder
        for h_dim in my_hidden_dims:
            modules.append(
                th.nn.Sequential(
                    th.nn.Conv2d(in_channels,
                                 out_channels=h_dim,
                                 kernel_size=3,
                                 stride=2,
                                 padding=1),
                    th.nn.BatchNorm2d(h_dim),
                    FusedLeakyReLU(h_dim),
                ))
            in_channels = h_dim

        self.encoder = th.nn.Sequential(*modules)
        self.fc_mu = th.nn.Linear(my_hidden_dims[-1] * 4, latent_dim)
        self.fc_var = th.nn.Linear(my_hidden_dims[-1] * 4, latent_dim)

        # Build Decoder
        modules = []

        self.decoder_input = th.nn.Linear(latent_dim, my_hidden_dims[-1] * 4)

        my_hidden_dims.reverse()

        for i in range(len(my_hidden_dims) - 1):
            modules.append(
                th.nn.Sequential(
                    th.nn.Upsample(scale_factor=2,
                                   mode="bilinear",
                                   align_corners=False),
                    th.nn.Conv2d(my_hidden_dims[i],
                                 my_hidden_dims[i + 1],
                                 kernel_size=3,
                                 padding=1),
                    th.nn.BatchNorm2d(my_hidden_dims[i + 1]),
                    FusedLeakyReLU(my_hidden_dims[i + 1]),
                ))

        self.decoder = th.nn.Sequential(*modules)

        self.final_layer = th.nn.Sequential(
            th.nn.Upsample(scale_factor=2,
                           mode="bilinear",
                           align_corners=False),
            th.nn.Conv2d(my_hidden_dims[-1],
                         my_hidden_dims[-1],
                         kernel_size=3,
                         padding=1),
            th.nn.BatchNorm2d(my_hidden_dims[-1]),
            FusedLeakyReLU(my_hidden_dims[-1]),
            th.nn.Conv2d(my_hidden_dims[-1],
                         out_channels=3,
                         kernel_size=3,
                         padding=1),
            th.nn.Tanh(),
        )
コード例 #17
0
ファイル: blocks.py プロジェクト: liuguoyou/CIPS
    def __init__(
        self,
        in_channel,
        out_channel,
        kernel_size,
        downsample=False,
        blur_kernel=[1, 3, 3, 1],
        bias=True,
        activate=True,
        upsample=False,
        padding="zero",
    ):
        layers = []

        self.padding = 0
        stride = 1

        if downsample:
            factor = 2
            p = (len(blur_kernel) - factor) + (kernel_size - 1)
            pad0 = (p + 1) // 2
            pad1 = p // 2

            layers.append(Blur(blur_kernel, pad=(pad0, pad1)))

            stride = 2

        if upsample:
            layers.append(
                EqualConvTranspose2d(
                    in_channel,
                    out_channel,
                    kernel_size,
                    padding=0,
                    stride=2,
                    bias=bias and not activate,
                )
            )

            factor = 2
            p = (len(blur_kernel) - factor) - (kernel_size - 1)
            pad0 = (p + 1) // 2 + factor - 1
            pad1 = p // 2 + 1

            layers.append(Blur(blur_kernel, pad=(pad0, pad1)))

        else:
            if not downsample:
                if padding == "zero":
                    self.padding = (kernel_size - 1) // 2

                elif padding == "reflect":
                    padding = (kernel_size - 1) // 2

                    if padding > 0:
                        layers.append(nn.ReflectionPad2d(padding))

                    self.padding = 0

                elif padding != "valid":
                    raise ValueError('Padding should be "zero", "reflect", or "valid"')

            layers.append(
                EqualConv2d(
                    in_channel,
                    out_channel,
                    kernel_size,
                    padding=self.padding,
                    stride=stride,
                    bias=bias and not activate,
                )
            )

        if activate:
            if bias:
                layers.append(FusedLeakyReLU(out_channel))

            else:
                layers.append(ScaledLeakyReLU(0.2))

        super().__init__(*layers)