Пример #1
0
 def forward(self,
             input: Tensor,
             output_size: Optional[List[int]] = None,
             eps: float = 1e-4) -> Tensor:
     weight = self.standardize_weight(eps)
     return F.conv_transpose2d(input, self.weight, self.bias, self.stride,
                               self.padding, self.output_padding,
                               self.groups, self.dilation)
Пример #2
0
    def forward(self, input, style):
        batch, in_channel, height, width = input.shape

        # Modulate the style
        style = self.modulation(style).view(batch, 1, in_channel, 1, 1)

        # Adding style modulation to weight
        weight = self.scale * self.weight * style

        if self.demodulate:
            demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
            weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
        #A good explenation of modulation/demodulation https://youtu.be/MYCTn80qSk0?t=142

        weight = weight.view(batch * self.out_channel, in_channel,
                             self.kernel_size, self.kernel_size)

        if self.up_sample:

            # Reshape input for deconvolution
            input = input.view(1, batch * in_channel, height, width)

            # Reshape weights for deconvolution
            weight = weight.view(batch, self.out_channel, in_channel,
                                 self.kernel_size, self.kernel_size)
            weight = weight.transpose(1, 2).reshape(batch * in_channel,
                                                    self.out_channel,
                                                    self.kernel_size,
                                                    self.kernel_size)

            # Upsampling convolution
            out = F.conv_transpose2d(input,
                                     weight,
                                     padding=0,
                                     stride=2,
                                     groups=batch)
            _, _, height, width = out.shape

            # Reshape output to initial shape
            out = out.view(batch, self.out_channel, height, width)

            # Blur the output
            out = self.blur(out)

        elif self.down_sample:
            # blur image before convolution
            input = self.blur(input)

            # Reshape input for convolution
            _, _, height, width = input.shape
            input = input.view(1, batch * in_channel, height, width)

            # Downsampling convolution
            out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
            _, _, height, width = out.shape

            # Reshape output to initial shape
            out = out.view(batch, self.out_channel, height, width)

        else:
            # Reshape input for convolution
            input = input.view(1, batch * in_channel, height, width)

            # Downsampling convolution
            out = F.conv2d(input, weight, padding=self.padding, groups=batch)
            _, _, height, width = out.shape

            # Reshape output to initial shape
            out = out.view(batch, self.out_channel, height, width)

        return out