コード例 #1
0
    def __init__(self, nlatent, input_nc, nef, norm_layer):
        super(Latent_Encoder, self).__init__()
        
        use_bias = False
        kw = 3

        sequence = [
            nn.Conv2d(input_nc, nef, kernel_size=kw, stride=2, padding=1, bias=True),
            nn.ReLU(True),

            nn.Conv2d(nef, 2*nef, kernel_size=kw, stride=2, padding=1, bias=use_bias),
            norm_layer(2*nef),
            nn.ReLU(True),

            nn.Conv2d(2*nef, 4*nef, kernel_size=kw, stride=2, padding=1, bias=use_bias),
            norm_layer(4*nef),
            nn.ReLU(True),

            nn.Conv2d(4*nef, 8*nef, kernel_size=kw, stride=2, padding=1, bias=use_bias),
            norm_layer(8*nef),
            nn.ReLU(True),

            nn.Conv2d(8*nef, 8*nef, kernel_size=4, stride=1, padding=0, bias=use_bias),
            norm_layer(8*nef),
            nn.ReLU(True),

        ]

        self.conv_modules = nn.Sequential(*sequence)

        # make sure we return mu and logvar for latent code normal distribution
        self.enc_mu = nn.Conv2d(8*nef, nlatent, kernel_size=1, stride=1, padding=0, bias=True)
        self.enc_logvar = nn.Conv2d(8*nef, nlatent, kernel_size=1, stride=1, padding=0, bias=True)
        utils.init_weights(self)
コード例 #2
0
    def __init__(self, input_nc, ndf):
        super(Discriminator, self).__init__()

        norm_layer = InstanceNorm

        # A bunch of convolutions one after another
        model = [
            nn.Conv2d(input_nc, ndf, 4, stride=2, padding=1),
            nn.LeakyReLU(0.2, inplace=True)
        ]

        model += [
            nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1),
            norm_layer(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True)
        ]

        model += [
            nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1),
            norm_layer(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True)
        ]

        # Return 1 channel prediction map
        model += [nn.Conv2d(ndf * 4, 1, 4, padding=1)]

        self.model = nn.Sequential(*model)
        utils.init_weights(self)
コード例 #3
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf,
                 use_dropout=False,
                 n_res_blocks=6):
        super(Generator, self).__init__()
        # Initial convolution block
        model = [
            nn.ReflectionPad2d(3),  # Reference from CycleGAN
            nn.Conv2d(input_nc, ngf, 7),
            InstanceNorm(ngf),
            nn.ReLU(inplace=True)
        ]

        # Downsampling
        in_features = ngf
        out_features = in_features * 2
        for _ in range(2):
            model += [
                nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
                InstanceNorm(out_features),
                nn.ReLU(inplace=True)
            ]
            in_features = out_features
            out_features = in_features * 2

        # Residual blocks
        for _ in range(n_res_blocks):
            model += [ResBlock(in_features, use_dropout)]

        # Upsampling
        out_features = in_features // 2
        for _ in range(2):
            model += [
                nn.ConvTranspose2d(in_features,
                                   out_features,
                                   3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1),
                InstanceNorm(out_features),
                nn.ReLU(inplace=True)
            ]
            in_features = out_features
            out_features = in_features // 2

        # Output layer
        model += [
            nn.ReflectionPad2d(3),
            nn.Conv2d(64, output_nc, 7),
            nn.Tanh()
        ]

        self.model = nn.Sequential(*model)
        utils.init_weights(self)
コード例 #4
0
 def __init__(self,in_ch,out_ch,mid_ch,layers,kernel_size,bias):
     super(Sigma_mu_Net, self).__init__()
     #
     self.layers = layers
     self.relu = nn.ReLU(inplace=True)
     #
     self.lyr=[]
     self.lyr.append(nn.Conv2d(in_ch,mid_ch,kernel_size=1,bias=bias))
     self.lyr.append(nn.ReLU(inplace=True))
     for l in range(layers-2):
         self.lyr.append(nn.Conv2d(mid_ch,mid_ch,kernel_size=1,bias=bias))
         self.lyr.append(nn.ReLU(inplace=True))
     self.lyr.append(nn.Conv2d(mid_ch,out_ch,kernel_size=1,bias=bias))
     self.conv=nn.Sequential(*self.lyr)
     init_weights(self.conv)
コード例 #5
0
ファイル: backbone_net.py プロジェクト: liuguoyou/DBSN
    def __init__(self, in_ch, out_ch, mid_ch, blindspot_conv_type,
                 blindspot_conv_bias, br1_blindspot_conv_ks, br1_block_num,
                 br2_blindspot_conv_ks, br2_block_num, activate_fun):
        super(DBSN_Model, self).__init__()
        #
        if activate_fun == 'Relu':
            # self.relu = nn.ReLU(inplace=True)
            self.relu = partial(nn.ReLU, inplace=True)
        elif activate_fun == 'LeakyRelu':
            # self.relu = nn.LeakyReLU(0.1)
            self.relu = partial(nn.LeakyReLU, negative_slope=0.1)
        else:
            raise ValueError('activate_fun [%s] is not found.' %
                             (activate_fun))
        # Head of DBSN
        lyr = []
        lyr.append(
            nn.Conv2d(in_ch, mid_ch, kernel_size=1, bias=blindspot_conv_bias))
        lyr.append(self.relu())
        self.dbsn_head = nn.Sequential(*lyr)
        init_weights(self.dbsn_head)

        self.br1 = DBSN_branch(mid_ch, blindspot_conv_type,
                               blindspot_conv_bias, br1_blindspot_conv_ks,
                               br1_block_num, activate_fun)
        self.br2 = DBSN_branch(mid_ch, blindspot_conv_type,
                               blindspot_conv_bias, br2_blindspot_conv_ks,
                               br2_block_num, activate_fun)

        # Concat two branches
        self.concat = nn.Conv2d(mid_ch * 2,
                                mid_ch,
                                kernel_size=1,
                                bias=blindspot_conv_bias)
        self.concat.apply(weights_init_kaiming)
        # 1x1 convs
        lyr = []
        lyr.append(
            nn.Conv2d(mid_ch, mid_ch, kernel_size=1, bias=blindspot_conv_bias))
        lyr.append(self.relu())
        lyr.append(
            nn.Conv2d(mid_ch, mid_ch, kernel_size=1, bias=blindspot_conv_bias))
        lyr.append(self.relu())
        lyr.append(
            nn.Conv2d(mid_ch, out_ch, kernel_size=1, bias=blindspot_conv_bias))
        self.dbsn_tail = nn.Sequential(*lyr)
        init_weights(self.dbsn_tail)
コード例 #6
0
ファイル: backbone_net.py プロジェクト: liuguoyou/DBSN
 def __init__(self, inplanes, bs_conv_type, bs_conv_bias, bs_conv_ks,
              block_num, activate_fun):
     super(DBSN_branch, self).__init__()
     #
     if activate_fun == 'Relu':
         # self.relu = nn.ReLU(inplace=True)
         self.relu = partial(nn.ReLU, inplace=True)
     elif activate_fun == 'LeakyRelu':
         # self.relu = nn.LeakyReLU(0.1)
         self.relu = partial(nn.LeakyReLU, negative_slope=0.1)
     else:
         raise ValueError('activate_fun [%s] is not found.' %
                          (activate_fun))
     #
     dilation_base = (bs_conv_ks + 1) // 2
     #
     lyr = []
     lyr.append(
         BlindSpotConv(inplanes,
                       inplanes,
                       bs_conv_ks,
                       stride=1,
                       dilation=1,
                       bias=bs_conv_bias,
                       conv_type=bs_conv_type))
     lyr.append(self.relu())
     lyr.append(
         nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias))
     lyr.append(self.relu())
     lyr.append(
         nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias))
     lyr.append(self.relu())
     #
     for i in range(block_num):
         lyr.append(
             Inception_block(inplanes,
                             kernel_size=3,
                             dilation=dilation_base,
                             bias=bs_conv_bias,
                             activate_fun=activate_fun))
     #
     lyr.append(
         nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias))
     self.branch = nn.Sequential(*lyr)
     init_weights(self.branch)
コード例 #7
0
    def __init__(self, conf, device, is_train=True):
        super(InterpolationGAN, self).__init__()
        self.conf = conf
        self.device = device

        # Generator 생성 및 초기화
        self.G_S = Base.Stoch_Generator(
            conf['nlatent'], conf['T_nc'], conf['S_nc'], conf['ngf'],
            conf['use_dropout'], conf['n_res_blocks'])  # T를 S로 변환하는 Generator
        self.G_T = Base.Stoch_Generator(
            conf['nlatent'], conf['S_nc'], conf['T_nc'], conf['ngf'],
            conf['use_dropout'], conf['n_res_blocks'])  # S를 T로 변환하는 Generator

        # Domainess encoded latent vector Generator 생성 및 초기화
        self.G_D = nn.Linear(1, conf['nlatent'])
        utils.init_weights(self.G_D)

        if is_train:
            # Discriminator 생성 및 초기화
            self.D_S = Base.Discriminator(
                conf['S_nc'], conf['ndf'])  # domain S를 구분하는 Discriminator
            self.D_T = Base.Discriminator(
                conf['T_nc'], conf['ndf'])  # domain T를 구분하는 Discriminator

            # Criterion 및 Optimizer 생성
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.G_D.parameters(), self.G_S.parameters(),
                self.G_T.parameters()),
                                                lr=conf['lr'],
                                                betas=(conf['beta'], 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.D_S.parameters(), self.D_T.parameters()),
                                                lr=conf['lr'],
                                                betas=(conf['beta'], 0.999))
            self.criterionGAN = nn.MSELoss(reduction='mean')
            self.criterion_cycle = nn.L1Loss(reduction='mean')

            # Answer for discriminator
            self.ans_real = Variable(torch.ones(
                [self.conf['batch_size'], 1, 23, 23]),
                                     requires_grad=False).to(device)
            self.ans_fake = Variable(torch.zeros(
                [self.conf['batch_size'], 1, 23, 23]),
                                     requires_grad=False).to(device)
コード例 #8
0
 def __init__(self, in_ch, out_ch, mid_ch, layers, kernel_size, bias):
     super(Sigma_n_Net, self).__init__()
     #
     self.layers = layers
     self.relu = nn.ReLU(inplace=True)
     #
     if layers == 1:
         self.conv_final = nn.Conv2d(in_ch,
                                     out_ch,
                                     kernel_size,
                                     padding=(kernel_size - 1) // 2,
                                     bias=bias)
         nn.init.zeros_(self.conv_final.weight)
         nn.init.zeros_(self.conv_final.bias)
     else:
         self.lyr = []
         self.lyr.append(
             nn.Conv2d(in_ch,
                       mid_ch,
                       kernel_size,
                       padding=(kernel_size - 1) // 2,
                       bias=bias))
         self.lyr.append(nn.ReLU(inplace=True))
         for l in range(layers - 2):
             self.lyr.append(
                 nn.Conv2d(mid_ch,
                           mid_ch,
                           kernel_size,
                           padding=(kernel_size - 1) // 2,
                           bias=bias))
             self.lyr.append(nn.ReLU(inplace=True))
         self.conv = nn.Sequential(*self.lyr)
         init_weights(self.conv)
         #
         self.conv_final = nn.Conv2d(mid_ch,
                                     out_ch,
                                     kernel_size,
                                     padding=(kernel_size - 1) // 2,
                                     bias=bias)
         nn.init.zeros_(self.conv_final.weight)
         nn.init.zeros_(self.conv_final.bias)
コード例 #9
0
    def __init__(self, nlatent, ndf):
        super(Latent_Discriminator, self).__init__()

        self.nlatent = nlatent

        use_bias = True
        sequence = [
            nn.Linear(nlatent, ndf),
            nn.BatchNorm1d(ndf),
            nn.LeakyReLU(0.2, True),
            nn.Linear(ndf, ndf),
            nn.BatchNorm1d(ndf),
            nn.LeakyReLU(0.2, True),
            nn.Linear(ndf, ndf),
            nn.BatchNorm1d(ndf),
            nn.LeakyReLU(0.2, True),
            nn.Linear(ndf, 1)
        ]

        self.model = nn.Sequential(*sequence)
        utils.init_weights(self)
コード例 #10
0
    def __init__(self, nlatent, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=9):
        super(Stoch_Generator, self).__init__()
        norm_layer = CondInstanceNorm

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, stride=1, bias=True),
            norm_layer(ngf, nlatent),
            nn.ReLU(True),

            nn.Conv2d(ngf, 2*ngf, kernel_size=3, padding=1, stride=1, bias=True),
            norm_layer(2*ngf, nlatent),
            nn.ReLU(True),

            nn.Conv2d(2*ngf, 4*ngf, kernel_size=3, padding=1, stride=2, bias=True),
            norm_layer(4*ngf, nlatent),
            nn.ReLU(True)
        ]
        
        for i in range(n_blocks):
            model += [  CINResnetBlock(x_dim=4*ngf, z_dim=nlatent, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=True)   ]

        model += [
            nn.ConvTranspose2d(4*ngf, 2*ngf, kernel_size=3, stride=2, padding=1,
                               output_padding=1, bias=True),
            norm_layer(2*ngf , nlatent),
            nn.ReLU(True),

            nn.Conv2d(2*ngf, ngf, kernel_size=3, padding=1, stride=1, bias=True),
            norm_layer(ngf, nlatent),
            nn.ReLU(True),

            nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3),
            nn.Tanh()
        ]

        self.model = TwoInputSequential(*model)
        utils.init_weights(self)
コード例 #11
0
    def __init__(self, params, is_train=True):
        super(CycleGAN, self).__init__()
        self.params = params
        # Device 설정
        if params.cuda:  # 후에 multi-GPU coding할 수 있으면 적용
            self.device = torch.device('cuda:{}'.format(params.gpu_id))
        else:
            self.device = torch.device('cpu')

        # Generator 생성 및 초기화
        self.G_S = Base.Generator(params.T_nc, params.S_nc, params.use_dropout,
                                  params.n_res_blocks)  # T를 S로 변환하는 Generator
        self.G_T = Base.Generator(params.S_nc, params.T_nc, params.use_dropout,
                                  params.n_res_blocks)  # S를 T로 변환하는 Generator
        if params.cuda:
            self.G_S.cuda()
            self.G_T.cuda()
        utils.init_weights(self.G_S)
        utils.init_weights(self.G_T)

        # Discriminator 생성 및 초기화
        if is_train:
            self.D_S = Base.Discriminator(
                params.S_nc)  # domain S를 구분하는 Discriminator
            self.D_T = Base.Discriminator(
                params.T_nc)  # domain T를 구분하는 Discriminator
            if params.cuda:
                self.D_S.cuda()
                self.D_T.cuda()
            utils.init_weights(self.D_S)
            utils.init_weights(self.D_T)
            Tensor = torch.cuda.FloatTensor if params.cuda else torch.Tensor
            self.real = Variable(Tensor(params.batch_size).fill_(1.0),
                                 requires_grad=False)
            self.fake = Variable(Tensor(params.batch_size).fill_(0.0),
                                 requires_grad=False)

        # Model 구성요소 이름 저장
        if is_train:
            self.model_names = ['G_S', 'G_T', 'D_S', 'D_T']
            self.loss_names = [
                'G_S', 'D_S', 'Cycle_S', 'Ident_S', 'G_T', 'D_T', 'Cycle_T',
                'Ident_T'
            ]
        else:
            self.model_names = ['G_S', 'G_T']
        # Visual name을 넣을 것인가 말 것인가.

        # Losses 및 Optimizer 생성
        if is_train:
            assert (params.S_nc == params.T_nc)  # Identity Loss를 사용하려면 필요
            # Buffers to save previously generated images
            self.save_fake_S = ImageBuffer(params.buf_size)
            self.save_fake_T = ImageBuffer(params.buf_size)
            # Losses
            self.criterion_GAN = nn.MSELoss(
            )  # Adversarial loss at CycleGAN section 3.1
            self.criterion_cycle = nn.L1Loss(
            )  # Cycle consistency loss at CycleGAN section 3.2
            self.criterion_identity = nn.L1Loss(
            )  # Identity loss at CycleGAN section 5.2
            # Optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.G_S.parameters(), self.G_T.parameters()),
                                                lr=params.lr,
                                                betas=(params.beta, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.D_S.parameters(), self.D_T.parameters()),
                                                lr=params.lr,
                                                betas=(params.beta, 0.999))
コード例 #12
0
ファイル: backbone_net.py プロジェクト: liuguoyou/DBSN
 def __init__(self, inplanes, kernel_size, dilation, bias, activate_fun):
     super(Inception_block, self).__init__()
     #
     if activate_fun == 'Relu':
         # self.relu = nn.ReLU(inplace=True)
         self.relu = partial(nn.ReLU, inplace=True)
     elif activate_fun == 'LeakyRelu':
         # self.relu = nn.LeakyReLU(0.1)
         self.relu = partial(nn.LeakyReLU, negative_slope=0.1)
     else:
         raise ValueError('activate_fun [%s] is not found.' %
                          (activate_fun))
     #
     pad_size = (kernel_size + (kernel_size - 1) * (dilation - 1) - 1) // 2
     # inception_br1 ----------------------------------------------
     lyr_br1 = []
     # 1x1 conv
     lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias))
     lyr_br1.append(self.relu())
     # # case1: two 3x3 dilated-conv
     # lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size, padding=pad_size, dilation=dilation, bias=bias))
     # lyr_br1.append(self.relu())
     # lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size, padding=pad_size, dilation=dilation, bias=bias))
     # lyr_br1.append(self.relu())
     # case2: one 5x5 dilated-conv
     tmp_kernel_size = 5
     tmp_pad_size = (tmp_kernel_size + (tmp_kernel_size - 1) *
                     (dilation - 1) - 1) // 2
     lyr_br1.append(
         nn.Conv2d(inplanes,
                   inplanes,
                   kernel_size=tmp_kernel_size,
                   padding=tmp_pad_size,
                   dilation=dilation,
                   bias=bias))
     lyr_br1.append(self.relu())
     self.inception_br1 = nn.Sequential(*lyr_br1)
     init_weights(self.inception_br1)
     #
     # inception_br2 ----------------------------------------------
     lyr_br2 = []
     # 1x1 conv
     lyr_br2.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias))
     lyr_br2.append(self.relu())
     # 3x3 dilated-conv
     lyr_br2.append(
         nn.Conv2d(inplanes,
                   inplanes,
                   kernel_size,
                   padding=pad_size,
                   dilation=dilation,
                   bias=bias))
     lyr_br2.append(self.relu())
     self.inception_br2 = nn.Sequential(*lyr_br2)
     init_weights(self.inception_br2)
     #
     # inception_br3 ----------------------------------------------
     lyr_br3 = []
     # 1x1 conv
     lyr_br3.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias))
     lyr_br3.append(self.relu())
     self.inception_br3 = nn.Sequential(*lyr_br3)
     init_weights(self.inception_br3)
     # Concat three inception branches
     self.concat = nn.Conv2d(inplanes * 3,
                             inplanes,
                             kernel_size=1,
                             bias=bias)
     self.concat.apply(weights_init_kaiming)
     # 1x1 convs
     lyr = []
     lyr.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias))
     lyr.append(self.relu())
     lyr.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias))
     lyr.append(self.relu())
     self.middle_1x1_convs = nn.Sequential(*lyr)
     init_weights(self.middle_1x1_convs)