コード例 #1
0
    def __init__(self,
                 self_attention=True,
                 sn=True,
                 norm=None,
                 use_class=False):
        super().__init__()
        self.use_class = use_class

        bias = False
        if norm is None:
            bias = True

        # DOWN:
        self.dn_block1 = FirstBlockDown2d(
            in_channels=3,
            out_channels=8,
            activation="leaky_relu",
            normalization=norm,
            downscale=False,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block2 = InvertedRes2d(
            in_channels=8,
            planes=16,  # 64
            out_channels=16,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block3 = InvertedRes2d(
            in_channels=16,
            planes=32,  # 128
            out_channels=32,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.sa_layer = None
        if self_attention is True:
            self.sa_layer = SelfAttention2d(in_channels=32, sn=sn)

        self.dn_block4 = InvertedRes2d(
            in_channels=32,
            planes=64,  # 256
            out_channels=64,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block5 = InvertedRes2d(
            in_channels=64,
            planes=128,  # 256
            out_channels=128,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block6 = InvertedRes2d(
            in_channels=128,
            planes=256,  # 512
            out_channels=256,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block7 = InvertedRes2d(
            in_channels=256,
            planes=512,  # 512
            out_channels=512,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.global_avg_pool = nn.AdaptiveAvgPool2d([1, 1])

        self.last_norm = None
        if norm is not None:
            self.last_norm = NORMS[norm](num_channels=512)

        self.last_act = getattr(F, "leaky_relu")

        if use_class is True:
            self.output = perform_sn(
                nn.Conv2d(
                    in_channels=512,
                    out_channels=1001,
                    kernel_size=1,
                    bias=True,
                    padding=0,
                    stride=1,
                ),
                sn=sn,
            )
        else:
            self.output = perform_sn(
                nn.Conv2d(
                    in_channels=512,
                    out_channels=1,
                    kernel_size=1,
                    bias=True,
                    padding=0,
                    stride=1,
                ),
                sn=sn,
            )
コード例 #2
0
    def __init__(self, self_attention=True, sn=False, norm="BN", dropout=0):
        super().__init__()
        self.imgfeat2img = ImageFeature2Image(self_attention=self_attention,
                                              sn=sn,
                                              norm=norm,
                                              dropout=dropout)

        bias = False
        if norm is None:
            bias = True

        # DOWN:
        self.dn_block1 = FirstBlockDown2d(
            in_channels=2,
            out_channels=16,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block2 = InvertedRes2d(
            in_channels=16,
            planes=32,  # 64
            out_channels=32,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block3 = InvertedRes2d(
            in_channels=32,
            planes=64,  # 128
            out_channels=64,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block4 = InvertedRes2d(
            in_channels=64,
            planes=128,  # 256
            out_channels=128,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.sa_layer = None
        if self_attention is True:
            self.sa_layer = SelfAttention2d(in_channels=128, sn=sn)

        self.dn_block5 = InvertedRes2d(
            in_channels=128,
            planes=256,  # 512
            out_channels=256,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=True,
            sn=sn,
            bias=bias,
        )

        self.dn_block6 = InvertedRes2d(
            in_channels=256,
            planes=512,
            out_channels=512,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.global_avg_pool = nn.AdaptiveAvgPool2d([2, 2])

        self.last_norm = None
        if norm is not None:
            self.last_norm = NORMS[norm](num_channels=512)

        self.last_act = getattr(F, "relu")

        self.last_conv = perform_sn(
            nn.Conv2d(
                in_channels=512,
                out_channels=4096,
                kernel_size=2,
                bias=True,
                padding=0,
                stride=1,
            ),
            sn=sn,
        )
コード例 #3
0
    def __init__(self,
                 self_attention=True,
                 sn=False,
                 norm="BN",
                 dropout=0,
                 use_class=False):
        super().__init__()
        self.use_class = use_class

        bias = False
        if norm is None:
            bias = True

        # DOWN:
        self.dn_block1 = FirstBlockDown2d(
            in_channels=2,
            out_channels=16,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block2 = InvertedRes2d(
            in_channels=16,
            planes=32,  # 64
            out_channels=32,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block3 = InvertedRes2d(
            in_channels=32,
            planes=64,  # 128
            out_channels=64,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.sa_layer1 = None
        if self_attention is True:
            self.sa_layer1 = SelfAttention2d(in_channels=64, sn=sn)

        self.dn_block4 = InvertedRes2d(
            in_channels=64,
            planes=128,  # 256
            out_channels=128,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block5 = InvertedRes2d(
            in_channels=128,
            planes=256,  # 512
            out_channels=256,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.dn_block6 = InvertedRes2d(
            in_channels=256,
            planes=512,
            out_channels=512,
            dropout=0,
            activation="leaky_relu",
            normalization=norm,
            downscale=True,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.global_avg_pool = nn.AdaptiveAvgPool2d([2, 2])

        # SKIP:
        self.skip1 = nn.Conv2d(
            in_channels=512,
            out_channels=512,
            kernel_size=[8, 1],
            stride=[8, 1],
            padding=[0, 0],
            groups=512,
        )
        self.skip2 = nn.Conv2d(
            in_channels=256,
            out_channels=256,
            kernel_size=[8, 1],
            stride=[8, 1],
            padding=[0, 0],
            groups=256,
        )
        self.skip3 = nn.Conv2d(
            in_channels=128,
            out_channels=256,
            kernel_size=[8, 1],
            stride=[8, 1],
            padding=[0, 0],
            groups=128,
        )
        self.skip4 = nn.Conv2d(
            in_channels=64,
            out_channels=128,
            kernel_size=[8, 1],
            stride=[8, 1],
            padding=[0, 0],
            groups=64,
        )

        # UP:
        self.up_block1 = BlockUpsample2d(
            in_channels=512,
            out_channels=256,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block2 = BlockUpsample2d(
            in_channels=256,
            out_channels=256,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block3 = BlockUpsample2d(
            in_channels=256,
            out_channels=128,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block4 = BlockUpsample2d(
            in_channels=128,
            out_channels=128,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block5 = BlockUpsample2d(
            in_channels=128,
            out_channels=64,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.sa_layer2 = None
        if self_attention is True:
            self.sa_layer2 = SelfAttention2d(in_channels=64, sn=sn)

        self.up_block6 = BlockUpsample2d(
            in_channels=64,
            out_channels=32,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block7 = BlockUpsample2d(
            in_channels=32,
            out_channels=16,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.last_norm = None
        if norm is not None:
            self.last_norm = NORMS[norm](num_channels=16)

        self.last_act = getattr(F, "relu")

        self.last_conv = perform_sn(
            nn.Conv2d(
                in_channels=16,
                out_channels=3,
                kernel_size=1,
                bias=True,
                padding=0,
                stride=1,
            ),
            sn=sn,
        )

        self.last_tanh = nn.Tanh()

        ################
        # class output #
        ################

        self.class_norm = None
        self.class_act = None
        self.class_conv = None
        if use_class is True:

            if norm is not None:
                self.class_norm = NORMS[norm](num_channels=512)

            self.class_act = getattr(F, "leaky_relu")

            self.class_conv = perform_sn(
                nn.Conv2d(
                    in_channels=512,
                    out_channels=1000,
                    kernel_size=2,
                    bias=True,
                    padding=0,
                    stride=1,
                ),
                sn=sn,
            )
コード例 #4
0
    def __init__(self, self_attention=True, sn=False, norm="BN", dropout=0):
        super().__init__()

        bias = False
        if norm is None:
            bias = True

        # UP:
        self.up_block1 = BlockUpsample2d(
            in_channels=4096,
            out_channels=512,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block2 = BlockUpsample2d(
            in_channels=512,
            out_channels=256,
            dropout=dropout,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block3 = BlockUpsample2d(
            in_channels=256,
            out_channels=128,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block4 = BlockUpsample2d(
            in_channels=128,
            out_channels=64,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block5 = BlockUpsample2d(
            in_channels=64,
            out_channels=64,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=True,
            sn=sn,
            bias=bias,
        )

        self.sa_layer = None
        if self_attention is True:
            self.sa_layer = SelfAttention2d(in_channels=64, sn=sn)

        self.up_block6 = BlockUpsample2d(
            in_channels=64,
            out_channels=32,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block7 = BlockUpsample2d(
            in_channels=32,
            out_channels=32,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.up_block8 = BlockUpsample2d(
            in_channels=32,
            out_channels=16,
            dropout=0,
            activation="relu",
            normalization=norm,
            seblock=False,
            sn=sn,
            bias=bias,
        )

        self.last_norm = None
        if norm is not None:
            self.last_norm = NORMS[norm](num_channels=16)

        self.last_act = getattr(F, "relu")

        self.last_conv = perform_sn(
            nn.Conv2d(
                in_channels=16,
                out_channels=3,
                kernel_size=1,
                bias=True,
                padding=0,
                stride=1,
            ),
            sn=sn,
        )

        self.last_tanh = nn.Tanh()