Пример #1
0
    def __init__(self,
                 depth=7,
                 latent_size=512,
                 dilation=1,
                 use_spectral_norm=True):
        """
        constructor for the Generator class
        :param depth: required depth of the Network
        :param latent_size: size of the latent manifold
        :param dilation: amount of dilation to be used by the 3x3 convs
                         in the Generator module.
        :param use_spectral_norm: whether to use spectral normalization
        """
        from torch.nn import ModuleList, Conv2d
        from MSG_GAN.CustomLayers import GenGeneralConvBlock, GenInitialBlock

        super().__init__()

        assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
            "latent size not a power of 2"
        if depth >= 4:
            assert latent_size >= np.power(
                2, depth - 4), "latent size will diminish to zero"

        # state of the generator:
        self.depth = depth
        self.latent_size = latent_size
        self.spectral_norm_mode = None
        self.dilation = dilation

        # register the modules required for the GAN Below ...
        # create the ToRGB layers for various outputs:
        def to_rgb(in_channels):
            return Conv2d(in_channels, 3, (1, 1), bias=True)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList([GenInitialBlock(self.latent_size)])
        self.rgb_converters = ModuleList([to_rgb(self.latent_size)])

        # create the remaining layers
        for i in range(self.depth - 1):
            if i <= 2:
                layer = GenGeneralConvBlock(self.latent_size,
                                            self.latent_size,
                                            dilation=dilation)
                rgb = to_rgb(self.latent_size)
            else:
                layer = GenGeneralConvBlock(
                    int(self.latent_size // np.power(2, i - 3)),
                    int(self.latent_size // np.power(2, i - 2)),
                    dilation=dilation)
                rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))
            self.layers.append(layer)
            self.rgb_converters.append(rgb)

        # if spectral normalization is on:
        if use_spectral_norm:
            self.turn_on_spectral_norm()
Пример #2
0
    def __init__(self, depth=7, latent_size=512, use_eql=True):
        """
        constructor for the Generator class
        :param depth: required depth of the Network
        :param latent_size: size of the latent manifold
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import ModuleList, Conv2d
        from MSG_GAN.CustomLayers import GenGeneralConvBlock, \
            GenInitialBlock, _equalized_conv2d

        super().__init__()

        assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
            "latent size not a power of 2"
        if depth >= 4:
            assert latent_size >= np.power(2, depth - 4), "latent size will diminish to zero"

        # state of the generator:
        self.use_eql = use_eql
        self.depth = depth
        self.latent_size = latent_size

        # register the modules required for the Generator Below ...
        # create the ToRGB layers for various outputs:
        if self.use_eql:
            def to_rgb(in_channels):
                #TODO: Make this respect 1 or 3 channels
                outchannels = 1
                #return _equalized_conv2d(in_channels, 3, (1, 1), bias=True)
                return _equalized_conv2d(in_channels, outchannels, (1, 1), bias=True)
        else:
            def to_rgb(in_channels):
                #TODO: Make this respect 1 or 3 channels
                outchannels = 1
                #return Conv2d(in_channels, 3, (1, 1), bias=True)
                return Conv2d(in_channels, outchannels, (1, 1), bias=True)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])
        self.rgb_converters = ModuleList([to_rgb(self.latent_size)])

        # create the remaining layers
        for i in range(self.depth - 1):
            if i <= 2:
                layer = GenGeneralConvBlock(self.latent_size, self.latent_size,
                                            use_eql=self.use_eql)
                rgb = to_rgb(self.latent_size)
            else:
                layer = GenGeneralConvBlock(
                    int(self.latent_size // np.power(2, i - 3)),
                    int(self.latent_size // np.power(2, i - 2)),
                    use_eql=self.use_eql
                )
                rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))
            self.layers.append(layer)
            self.rgb_converters.append(rgb)
Пример #3
0
    def __init__(self,
                 depth=7,
                 latent_size=512,
                 use_eql=True,
                 n_mlp=8,
                 base_channel=512):
        """
        constructor for the Generator class
        :param depth: required depth of the Network
        :param latent_size: size of the latent manifold
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import ModuleList, Conv2d
        from MSG_GAN.CustomLayers import GenGeneralConvBlock, PixelNorm, EqualConv2d, EqualLinear

        super().__init__()

        assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
            "latent size not a power of 2"
        if depth >= 4:
            assert latent_size >= np.power(
                2, depth - 4), "latent size will diminish to zero"

        # state of the generator:
        self.use_eql = use_eql
        self.depth = depth
        self.latent_size = latent_size
        self.base_channel = base_channel

        # n_MLP
        layers = [PixelNorm()]
        for i in range(n_mlp):
            if self.use_eql:
                layers.append(EqualLinear(latent_size, latent_size))
            else:
                layers.append(th.nn.Linear(latent_size, latent_size))
            layers.append(th.nn.LeakyReLU(0.2))
        self.style = th.nn.Sequential(*layers)

        # register the modules required for the Generator Below ...
        # create the ToRGB layers for various outputs:
        if self.use_eql:

            def to_rgb(in_channels):
                return EqualConv2d(in_channels, 3, (1, 1), bias=True)
        else:

            def to_rgb(in_channels):
                return Conv2d(in_channels, 3, (1, 1), bias=True)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList([
            GenGeneralConvBlock(in_channels=base_channel,
                                out_channels=base_channel,
                                style_dim=latent_size,
                                fused=False,
                                initial=True)
        ])
        self.rgb_converters = ModuleList([to_rgb(base_channel)])

        # create the remaining layers
        for i in range(self.depth - 1):
            if i <= 2:
                layer = GenGeneralConvBlock(in_channels=base_channel,
                                            out_channels=base_channel,
                                            style_dim=latent_size,
                                            fused=False,
                                            initial=False)
                rgb = to_rgb(base_channel)
            else:
                layer = GenGeneralConvBlock(
                    int(base_channel // np.power(2, i - 3)),
                    int(base_channel // np.power(2, i - 2)),
                    style_dim=latent_size,
                    fused=False,
                    initial=False)
                rgb = to_rgb(int(base_channel // np.power(2, i - 2)))
            self.layers.append(layer)
            self.rgb_converters.append(rgb)
Пример #4
0
    def __init__(self, depth=7, latent_size=512, use_eql=True):
        """
        constructor for the Generator class
        :param depth: required depth of the Network
        :param latent_size: size of the latent manifold
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import ModuleList, Conv2d
        from MSG_GAN.CustomLayers import GenGeneralConvBlock, \
            GenInitialBlock, _equalized_conv2d

        super().__init__()

        assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
            "latent size not a power of 2"
        #   这里的&可能是按位与的意思。如果是2的整数次方,二进制一定是10000这样的,减去1恰好为01111

        if depth >= 4:
            assert latent_size >= np.power(
                2, depth - 4), "latent size will diminish to zero"

        # state of the generator:
        self.use_eql = use_eql
        self.depth = depth
        self.latent_size = latent_size

        # register the modules required for the Generator Below ...
        # create the ToRGB layers for various outputs:
        if self.use_eql:

            def to_rgb(in_channels):
                return _equalized_conv2d(in_channels, 3, (1, 1), bias=True)
        else:

            def to_rgb(in_channels):
                return Conv2d(in_channels, 3, (1, 1), bias=True)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList(
            [GenInitialBlock(self.latent_size, use_eql=self.use_eql)])
        self.rgb_converters = ModuleList([to_rgb(self.latent_size)])

        #   注:这里的layers存的是卷积块,rgb_converters则用于将中间的表征转换为rgb图

        # create the remaining layers
        for i in range(self.depth - 1):
            #   从这里的实现来看,i=0,1,2时候,latent size维持不变
            #   后面的层级,latent以2为倍率递减
            if i <= 2:
                layer = GenGeneralConvBlock(self.latent_size,
                                            self.latent_size,
                                            use_eql=self.use_eql)
                rgb = to_rgb(self.latent_size)
            else:
                layer = GenGeneralConvBlock(
                    int(self.latent_size // np.power(2, i - 3)),
                    int(self.latent_size // np.power(2, i - 2)),
                    use_eql=self.use_eql)
                rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))
            self.layers.append(layer)
            self.rgb_converters.append(rgb)