Esempio n. 1
0
    def __init__(self, in_feat, out_feat):
        super(Conv3x3Drop, self).__init__()

        self.conv1 = nn.Sequential(spectral_norm(nn.Conv2d(in_feat, out_feat,
                                             kernel_size=3,
                                             stride=1,
                                             padding=1)),
                                   nn.ReLU())

        self.conv2 = nn.Sequential(spectral_norm(nn.Conv2d(out_feat, out_feat,
                                             kernel_size=3,
                                             stride=1,
                                             padding=1)),
                                   nn.ReLU())
Esempio n. 2
0
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        attention = False
        kw = 4
        padw = 1
        sequence = [
            spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)),
            nn.LeakyReLU(0.2, True)
        ]

        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                          # kernel_size=kw, stride=2, padding=padw, bias=use_bias),
                spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                          kernel_size=kw, stride=2, padding=padw, bias=use_bias)),
                nn.LeakyReLU(0.2, True)
            ]
        if attention:
            sequence += Self_Attn(ndf * nf_mult, 'relu')

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                      # kernel_size=kw, stride=1, padding=padw, bias=use_bias),
            spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                      kernel_size=kw, stride=1, padding=padw, bias=use_bias)),
            nn.LeakyReLU(0.2, True)
        ]

        if attention:
            sequence += Self_Attn(ndf * nf_mult, 'relu')

        sequence += [spectral_norm(nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw))]

        if use_sigmoid:
            sequence += [nn.Sigmoid()]

        self.model = nn.Sequential(*sequence)
Esempio n. 3
0
    def __init__(self, in_feat, out_feat):
        super(UpSample, self).__init__()

        self.up = nn.Upsample(scale_factor=2, mode='nearest')

        self.deconv = spectral_norm(nn.ConvTranspose2d(in_feat,
                                         out_feat,
                                         kernel_size=2,
                                         stride=2))
Esempio n. 4
0
    def __init__(self, num_channels=1, num_classes=1):
        super(KUnetGenerator, self).__init__()
        num_feat = [64, 128, 256, 512, 1024]

        self.down1 = nn.Sequential(Conv3x3(num_channels, num_feat[0]))

        self.down2 = nn.Sequential(nn.MaxPool2d(kernel_size=2),
                                   Conv3x3(num_feat[0], num_feat[1]))

        self.down3 = nn.Sequential(nn.MaxPool2d(kernel_size=2),
                                   Conv3x3(num_feat[1], num_feat[2]))

        self.down4 = nn.Sequential(nn.MaxPool2d(kernel_size=2),
                                   Conv3x3(num_feat[2], num_feat[3]))

        # self.bottom = nn.Sequential(nn.MaxPool2d(kernel_size=2),
        #                             Conv3x3(num_feat[3], num_feat[4]))
        self.bottom = nn.Sequential(spectral_norm(nn.Conv2d(num_feat[3], num_feat[3],
                                             kernel_size=3,
                                             stride=1,
                                             padding=1)),
                                   nn.ReLU())

        self.up1 = UpConcat(num_feat[3], num_feat[3])
        # self.upconv1 = Conv3x3(num_feat[4], num_feat[3])
        self.upconv1 = nn.Sequential(spectral_norm(nn.Conv2d(num_feat[3], num_feat[3],
                                              kernel_size=3,
                                              stride=1,
                                              padding=1)),
                                    nn.ReLU())

        self.up2 = UpConcat(num_feat[3], num_feat[2])
        self.upconv2 = Conv3x3(num_feat[3], num_feat[2])

        self.up3 = UpConcat(num_feat[2], num_feat[1])
        self.upconv3 = Conv3x3(num_feat[2], num_feat[1])

        self.up4 = UpConcat(num_feat[1], num_feat[0])
        self.upconv4 = Conv3x3(num_feat[1], num_feat[0])

        self.final = nn.Sequential(spectral_norm(nn.Conv2d(num_feat[0],
                                             num_classes,
                                             kernel_size=1)),
                                   nn.Tanh())
Esempio n. 5
0
    def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
        super(PixelDiscriminator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        self.net = [
            spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0)),
            nn.LeakyReLU(0.2, True),
            spectral_norm(nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias)),
            # spectral_norm(nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias)),
            norm_layer(ndf * 2),
            nn.LeakyReLU(0.2, True),
            spectral_norm(nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias))]

        if use_sigmoid:
            self.net.append(nn.Sigmoid())

        self.net = nn.Sequential(*self.net)
Esempio n. 6
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, spec_norm):
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)

        if spec_norm:
            conv_block += [spectral_norm(nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)),
                           norm_layer(dim),
                           nn.ReLU(True)]
        else:
            conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                           norm_layer(dim),
                           nn.ReLU(True)]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)

        if spec_norm:
            conv_block += [spectral_norm(nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)),
                           norm_layer(dim)]
        else:
            conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                           norm_layer(dim)]

        return nn.Sequential(*conv_block)
Esempio n. 7
0
    def __init__(self, input_channels=64, hidden_channels=[32],
                 kernel_size=5, bias=True, num_classes=1):

        super(BDCLSTM, self).__init__()
        self.forward_net = CLSTM(
            input_channels, hidden_channels, kernel_size, bias)
        self.reverse_net = CLSTM(
            input_channels, hidden_channels, kernel_size, bias)
        self.pad = nn.ReflectionPad2d(3)
        self.conv = spectral_norm(nn.Conv2d(
            2 * hidden_channels[-1], num_classes, kernel_size=7))
        # self.soft = nn.Softmax2d()
        self.soft = nn.Tanh()
Esempio n. 8
0
    def __init__(self, in_feat, out_feat):
        super(UpConcat, self).__init__()

        self.up = nn.UpsamplingBilinear2d(scale_factor=2)

        # self.deconv = nn.ConvTranspose2d(in_feat, out_feat,
        #                                  kernel_size=3,
        #                                  stride=1,
        #                                  dilation=1)

        self.deconv = spectral_norm(nn.ConvTranspose2d(in_feat,
                                         out_feat,
                                         kernel_size=2,
                                         stride=2))
Esempio n. 9
0
def conv_trans_block(in_dim,out_dim,act_fn,spec_norm=True):
    if spec_norm:
        model = nn.Sequential(
            spectral_norm(nn.ConvTranspose2d(in_dim,out_dim, kernel_size=3, stride=2, padding=1,output_padding=1)),
            # nn.BatchNorm2d(out_dim),
            act_fn,
        )
    else:
        model = nn.Sequential(
            nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),
            nn.BatchNorm2d(out_dim),
            act_fn,
        )
    return model
Esempio n. 10
0
def conv_block_3(in_dim,out_dim,act_fn,spec_norm=True):
    if spec_norm:
        model = nn.Sequential(
            conv_block(in_dim,out_dim,act_fn,spec_norm),
            conv_block(out_dim,out_dim,act_fn,spec_norm),
            spectral_norm(nn.Conv2d(out_dim,out_dim, kernel_size=3, stride=1, padding=1)),
            # nn.BatchNorm2d(out_dim),
        )
    else:
        model = nn.Sequential(
            conv_block(in_dim, out_dim, act_fn, spec_norm=False),
            conv_block(out_dim, out_dim, act_fn, spec_norm=False),
            nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(out_dim),
        )
    return model
Esempio n. 11
0
    def __init__(self, input_nc, output_nc, ngf=64, num_downs=4, spec_norm=True):
        super(FusionGenerator, self).__init__()
        self.in_dim = input_nc
        self.out_dim = ngf
        self.final_out_dim = output_nc
        act_fn = nn.LeakyReLU(0.2, inplace=True)
        act_fn_2 = nn.ReLU()

        print("\n------Initiating FusionNet------\n")
        print "%d fusion blocks, %d filters \n" %(num_downs, self.out_dim * (2 ** num_downs))

        # encoder

        self.down_1 = Conv_residual_conv(self.in_dim, self.out_dim, act_fn, spec_norm)
        self.pool_1 = maxpool()
        self.down_2 = Conv_residual_conv(self.out_dim, self.out_dim * 2, act_fn, spec_norm)
        self.pool_2 = maxpool()
        self.down_3 = Conv_residual_conv(self.out_dim * 2, self.out_dim * 4, act_fn, spec_norm)
        self.pool_3 = maxpool()
        self.down_4 = Conv_residual_conv(self.out_dim * 4, self.out_dim * 8, act_fn, spec_norm)
        self.pool_4 = maxpool()

        # bridge

        self.bridge = Conv_residual_conv(self.out_dim * 8, self.out_dim * 16, act_fn, spec_norm)

        # decoder

        self.deconv_1 = conv_trans_block(self.out_dim * 16, self.out_dim * 8, act_fn_2, spec_norm)
        self.up_1 = Conv_residual_conv(self.out_dim * 8, self.out_dim * 8, act_fn_2, spec_norm)
        self.deconv_2 = conv_trans_block(self.out_dim * 8, self.out_dim * 4, act_fn_2, spec_norm)
        self.up_2 = Conv_residual_conv(self.out_dim * 4, self.out_dim * 4, act_fn_2, spec_norm)
        self.deconv_3 = conv_trans_block(self.out_dim * 4, self.out_dim * 2, act_fn_2, spec_norm)
        self.up_3 = Conv_residual_conv(self.out_dim * 2, self.out_dim * 2, act_fn_2, spec_norm)
        self.deconv_4 = conv_trans_block(self.out_dim * 2, self.out_dim, act_fn_2, spec_norm)
        self.up_4 = Conv_residual_conv(self.out_dim, self.out_dim, act_fn_2, spec_norm)

        # output

        if spec_norm:
            self.out = spectral_norm(nn.Conv2d(self.out_dim, self.final_out_dim, kernel_size=3, stride=1, padding=1))
        else:
            self.out = nn.Conv2d(self.out_dim, self.final_out_dim, kernel_size=3, stride=1, padding=1)

        self.out_2 = nn.Tanh()
Esempio n. 12
0
    def __init__(self, input_channels, hidden_channels,
                 kernel_size, bias=True):
        super(CLSTMCell, self).__init__()

        assert hidden_channels % 2 == 0

        self.input_channels = input_channels
        self.hidden_channels = hidden_channels
        self.bias = bias
        self.kernel_size = kernel_size
        self.num_features = 4

        self.padding = (kernel_size - 1) // 2
        self.conv = nn.Sequential(spectral_norm(nn.Conv2d(self.input_channels + self.hidden_channels,
                              self.num_features * self.hidden_channels,
                              self.kernel_size,
                              1,
                              self.padding)),
                              nn.Dropout(0.1))
Esempio n. 13
0
    def __init__(self, outer_nc, inner_nc, input_nc=None,
                 submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, spec_norm=True, return_feature=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        self.return_feature = return_feature
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d
        if input_nc is None:
            input_nc = outer_nc
        # check if GAN or simple U-Net
        if spec_norm == False:
            print('spectral norm disabled')
            downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
                                 stride=2, padding=1, bias=use_bias)
            downrelu = nn.LeakyReLU(0.2, True)
            downnorm = norm_layer(inner_nc)
            uprelu = nn.ReLU(True)
            upnorm = norm_layer(outer_nc)

            if outermost:
                upconv = nn.ConvTranspose2d(inner_nc * 2, inner_nc,
                                            kernel_size=4, stride=2,
                                            padding=1)
                conv = nn.Conv2d(inner_nc, outer_nc, kernel_size=1)
                down = [downconv]
                up = [uprelu, upconv, conv, nn.Tanh()]
                model = down + [submodule] + up
            elif innermost:
                upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, bias=use_bias)
                down = [downrelu, downconv]
                up = [uprelu, upconv, upnorm]
                model = down + up
            else:
                upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, bias=use_bias)
                down = [downrelu, downconv, downnorm]
                up = [uprelu, upconv, upnorm]

                if use_dropout:
                    model = down + [submodule] + up + [nn.Dropout(0.5)]
                else:
                    model = down + [submodule] + up
        else:
            downconv = spectral_norm(nn.Conv2d(input_nc, inner_nc, kernel_size=4,
                                               stride=2, padding=1, bias=use_bias))
            downrelu = nn.LeakyReLU(0.2, True)
            downnorm = norm_layer(inner_nc)
            uprelu = nn.ReLU(True)
            upnorm = norm_layer(outer_nc)

            if outermost:
                upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, inner_nc,
                                                          kernel_size=4, stride=2,
                                                          padding=1))
                down = [downconv]
                conv = spectral_norm(nn.Conv2d(inner_nc, outer_nc, kernel_size=1))
                up = [uprelu, upconv, conv, nn.Tanh()]
                model = down + [submodule] + up
            elif innermost:
                upconv = spectral_norm(nn.ConvTranspose2d(inner_nc, outer_nc,
                                                          kernel_size=4, stride=2,
                                                          padding=1, bias=use_bias))
                down = [downrelu, downconv]
                up = [uprelu, upconv, upnorm]
                model = down + up
            else:
                upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                                          kernel_size=4, stride=2,
                                                          padding=1, bias=use_bias))
                down = [downrelu, downconv, downnorm]
                up = [uprelu, upconv, upnorm]

                if use_dropout:
                    model = down + [submodule] + up + [nn.Dropout(0.5)]
                else:
                    model = down + [submodule] + up
        self.model = nn.Sequential(*model)
Esempio n. 14
0
    def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', spec_norm=True, return_feature=False):
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.return_feature = return_feature
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        if spec_norm:
            model = [nn.ReflectionPad2d(3),
                     spectral_norm(nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
                               bias=use_bias)),
                     norm_layer(ngf),
                     nn.ReLU(True)]
        else:
            model = [nn.ReflectionPad2d(3),
                     nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
                                             bias=use_bias),
                     norm_layer(ngf),
                     nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            if spec_norm:
                model += [spectral_norm(nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
                                stride=2, padding=1, bias=use_bias)),
                          norm_layer(ngf * mult * 2),
                          nn.ReLU(True)]
            else:
                model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
                                stride=2, padding=1, bias=use_bias),
                          norm_layer(ngf * mult * 2),
                          nn.ReLU(True)]


        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, spec_norm=spec_norm)]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            if spec_norm:
                model += [spectral_norm(nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                             kernel_size=3, stride=2,
                                             padding=1, output_padding=1,
                                             bias=use_bias)),
                          norm_layer(int(ngf * mult / 2)),
                          nn.ReLU(True)]
            else:
                model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                             kernel_size=3, stride=2,
                                             padding=1, output_padding=1,
                                             bias=use_bias),
                          norm_layer(int(ngf * mult / 2)),
                          nn.ReLU(True)]

        if self.return_feature:
            self.model = nn.Sequential(*model)
        else:
            model += [nn.ReflectionPad2d(3)]
            model += [spectral_norm(nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0))]
            model += [nn.Tanh()]
            self.model = nn.Sequential(*model)