Example #1
0
    def __init__(self, channels, bn=False, sn=False):
        super().__init__()

        _norm = lambda *args, **kwargs: nn.Identity()
        if bn:
            _norm = norm

        # self.res_block = nn.Sequential(
        #     _norm(channels),
        #     activation(),
        #     conv(channels, channels, kernel_size=3, padding=1),
        #
        #     _norm(channels),
        #     activation(),
        #     conv(channels, channels, kernel_size=3, padding=1),
        # )

        self.blocks = nn.Sequential(
            conv(channels, channels, kernel_size=3, padding=1),
            # _norm(channels),
            activation(),
            _norm(channels),
            conv(channels, channels, kernel_size=3, padding=1),
            # _norm(channels),
            activation(),
            _norm(channels))
Example #2
0
    def __init__(self, in_dim):
        super().__init__()

        self.chanel_in = in_dim

        self.query_conv = conv(in_dim, in_dim // 8, kernel_size=1)
        self.key_conv = conv(in_dim, in_dim // 8, kernel_size=1)
        self.value_conv = conv(in_dim, in_dim, kernel_size=1)

        self.gamma = nn.Parameter(torch.tensor(0.05), requires_grad=True)

        self.softmax = nn.Softmax(dim=-1)
Example #3
0
    def __init__(self, base_channel, max_channel, depth, in_dim, attn_at=2):
        super().__init__()

        self.num_classes = in_dim

        self.encoder_blocks = nn.ModuleList()
        self.down_blocks = nn.ModuleList()

        self.up_blocks = nn.ModuleList()

        self.first = conv(in_dim, base_channel, kernel_size=1)
        self.final = conv(base_channel, 1, kernel_size=1)

        def ch_for_depth(d):
            return min(max_channel, base_channel * 2**d)

        if attn_at is not None:
            self.attn = SelfAttention(ch_for_depth(attn_at))
            self.attn_at = attn_at
        else:
            self.attn = None
            self.attn_at = None

        final_ch = ch_for_depth(depth - 1)

        for i in range(depth):
            ch_in = ch_for_depth(i)
            ch_out = ch_for_depth(i + 1)

            self.encoder_blocks.append(nn.Sequential(ResBlock(ch_in)))
            if i + 1 != depth:
                self.down_blocks.append(
                    nn.Sequential(
                        conv(ch_in, ch_out, kernel_size=4, stride=2,
                             padding=1),
                        activation(),
                    ))

        for i in range(depth)[:0:-1]:
            self.up_blocks.append(
                UpBlock(ch_for_depth(i), ch_for_depth(i - 1),
                        ch_for_depth(i - 1)))

        self.minibatch_std_dev = MinibatchStdDev()
        self.conv = conv(final_ch + 1, final_ch, kernel_size=1)

        self.activation = activation()
        self.linear = nn.Linear(final_ch, 1)
Example #4
0
    def __init__(self, num_clases, out_channels, use_embedding=False, embedding_dropout=0.3):
        super().__init__()

        self.num_classes = num_clases
        self.out_channels = out_channels

        if use_embedding:
            self.embed = nn.Sequential(
                nn.Embedding(num_clases, out_channels, max_norm=1.0),
                nn.Dropout(embedding_dropout)
            )
            self.conv = conv(out_channels, out_channels, kernel_size=1)
        else:
            self.embed = None
            self.conv = conv(num_clases, out_channels, kernel_size=1)

        self.activation = activation()
Example #5
0
    def __init__(self, latent_size, block_channels, extract_dim, attn=False):
        super().__init__()

        channels = [latent_size] + block_channels

        self.blocks = nn.ModuleList()
        self.extract_layers = nn.ModuleList()
        for i, (in_channels, out_channels) in enumerate(zip(channels[:-1], channels[1:])):
            self.blocks.append(GeneratorBlockResidual(in_channels, out_channels, is_initial=i == 0, attn=attn))
            self.extract_layers.append(conv(out_channels, extract_dim, kernel_size=1))
Example #6
0
    def __init__(self, num_input_channels, block_channels, extract_dim):
        super().__init__()

        channels = [num_input_channels] + block_channels

        self.blocks = nn.ModuleList()
        self.extract_layers = nn.ModuleList()

        for i, (in_channels,
                out_channels) in enumerate(zip(channels[:-1], channels[1:])):
            self.blocks.append(DiscriminatorBlock(in_channels, out_channels))
            self.extract_layers.append(
                conv(channels[i + 1], extract_dim, kernel_size=1))
Example #7
0
    def __init__(self, x1_channels, x2_channels, out_channels):
        super().__init__()

        self.up = nn.Sequential(
            conv_transpose(x1_channels,
                           out_channels,
                           kernel_size=4,
                           stride=2,
                           padding=1), activation())
        self.merge = nn.Sequential(
            conv(out_channels + x2_channels, out_channels, kernel_size=1),
            activation())
        self.res_block = nn.Sequential(ResBlock(out_channels),
                                       ResBlock(out_channels))
Example #8
0
    def __init__(self, latent_size, block_channels, out_dim, attn_at=None):
        super().__init__()

        channels = [latent_size] + block_channels

        if attn_at is not None:
            self.attn = SelfAttention(channels[attn_at])
            self.attn_at = attn_at
        else:
            self.attn = None
            self.attn_at = None

        self.blocks = nn.ModuleList()
        for i, (in_channels,
                out_channels) in enumerate(zip(channels[:-1], channels[1:])):
            self.blocks.append(
                GeneratorBlock(in_channels, out_channels, is_initial=i == 0))

        self.final = conv(channels[-1], out_dim, kernel_size=1)
Example #9
0
    def __init__(self, block_channels, num_input_classes, use_minibatch_std_dev=True, use_embeddings=False, attn=False):
        super().__init__()

        channels = block_channels + [block_channels[-1]]

        num_blocks = len(block_channels)

        self.blocks = nn.ModuleList()
        self.inject_layers = nn.ModuleList()
        for i, (in_channels, out_channels) in enumerate(zip(channels[:-1], channels[1:])):
            self.inject_layers.append(InjectLayer(num_input_classes, in_channels, use_embedding=use_embeddings))

            if i + 1 == num_blocks:
                self.blocks.append(nn.Sequential(
                    MinibatchStdDev(),
                    DiscriminatorBlockResidual(in_channels + 1, out_channels, is_final=i + 1 == len(block_channels), attn=attn)
                ))
            else:
                self.blocks.append(
                    DiscriminatorBlockResidual(in_channels, out_channels, is_final=i + 1 == len(block_channels), attn=attn)
                )

        self.final = conv(channels[-1], 1, kernel_size=1)