コード例 #1
0
ファイル: gan_model.py プロジェクト: KarelZhang/CSDNet-CSDGAN
    def get_current_visuals(self):
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        real_B = util.tensor2im(self.real_B.data)
        edge = util.atten2im(self.edge_out.data)

        self.output_A_I_3 = torch.cat([self.gray, self.gray, self.gray], 1)
        out_A_I = util.tensor2im(self.output_A_I_3.data)

        if self.opt.skip > 0:
            latent_real_A = util.tensor2im(self.latent_real_A.data)
            latent_show = util.latent2im(self.latent_real_A.data)

            if self.opt.patchD:
                fake_patch = util.tensor2im(self.fake_patch.data)
                real_patch = util.tensor2im(self.real_patch.data)
                if self.opt.patch_vgg:
                    input_patch = util.tensor2im(self.input_patch.data)
                    if not self.opt.self_attention:
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch), ('input_patch', input_patch)])
                    else:
                        self_attention = util.atten2im(self.real_A_gray_o.data)
                        return OrderedDict(
                            [('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch), ('input_gray', self_attention),
                             ('latent', latent_show), ('out_A_I', out_A_I), ('edge', edge)])
                else:
                    if not self.opt.self_attention:
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch)])
                    else:
                        self_attention = util.atten2im(self.real_A_gray.data)
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch), ('self_attention', self_attention)])
            else:
                if not self.opt.self_attention:
                    return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                        ('latent_show', latent_show), ('real_B', real_B)])
                else:
                    self_attention = util.atten2im(self.real_A_gray_o.data)
                    return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                        ('latent_real_A', latent_real_A), ('latent_show', latent_show),
                                        ('self_attention', self_attention)])
        else:
            if not self.opt.self_attention:
                return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
            else:
                self_attention = util.atten2im(self.real_A_gray.data)
                return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                    ('self_attention', self_attention)])
コード例 #2
0
ファイル: single_model.py プロジェクト: sfwang20/EnlightenGAN
    def predict(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.real_A_gray = Variable(self.input_A_gray, volatile=True)
        if self.opt.noise > 0:
            self.noise = Variable(
                torch.cuda.FloatTensor(self.real_A.size()).normal_(
                    mean=0, std=self.opt.noise / 255.))
            self.real_A = self.real_A + self.noise
        if self.opt.input_linear:
            self.real_A = (self.real_A - torch.min(self.real_A)) / (
                torch.max(self.real_A) - torch.min(self.real_A))
        # print(np.transpose(self.real_A.data[0].cpu().float().numpy(),(1,2,0))[:2][:2][:])
        if self.opt.skip == 1:
            self.fake_B, self.latent_real_A = self.netG_A.forward(
                self.real_A, self.real_A_gray)
        else:
            self.fake_B = self.netG_A.forward(self.real_A, self.real_A_gray)
        # self.rec_A = self.netG_B.forward(self.fake_B)

        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        A_gray = util.atten2im(self.real_A_gray.data)
        # rec_A = util.tensor2im(self.rec_A.data)
        # if self.opt.skip == 1:
        #     latent_real_A = util.tensor2im(self.latent_real_A.data)
        #     latent_show = util.latent2im(self.latent_real_A.data)
        #     max_image = util.max2im(self.fake_B.data, self.latent_real_A.data)
        #     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
        #                     ('latent_show', latent_show), ('max_image', max_image), ('A_gray', A_gray)])
        # else:
        #     return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
        # return OrderedDict([('fake_B', fake_B)])
        return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
コード例 #3
0
 def get_current_visuals(self):
     real_A = util.tensor2im(self.real_A.data)
     fake_B = util.tensor2im(self.fake_B.data)
     real_B = util.tensor2im(self.real_B.data)
     if self.opt.skip > 0:
         latent_real_A = util.tensor2im(self.latent_real_A.data)
         latent_show = util.latent2im(self.latent_real_A.data)
         if self.opt.patchD:
             fake_patch = util.tensor2im(self.fake_patch.data)
             real_patch = util.tensor2im(self.real_patch.data)
             if self.opt.patch_vgg:
                 input_patch = util.tensor2im(self.input_patch.data)
                 if not self.opt.self_attention:
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch)])
                 else:
                     self_attention = util.atten2im(self.real_A_gray.data)
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch), ('self_attention', self_attention)])
             else:
                 if not self.opt.self_attention:
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch)])
                 else:
                     self_attention = util.atten2im(self.real_A_gray.data)
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('self_attention', self_attention)])
         else:
             if not self.opt.self_attention:
                 return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B)])
             else:
                 self_attention = util.atten2im(self.real_A_gray.data)
                 return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                 ('latent_real_A', latent_real_A), ('latent_show', latent_show),
                                 ('self_attention', self_attention)])
     else:
         if not self.opt.self_attention:
             return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
         else:
             self_attention = util.atten2im(self.real_A_gray.data)
             return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                 ('self_attention', self_attention)])
コード例 #4
0
ファイル: orcgan_model.py プロジェクト: Lucious915/PCECGAN
    def predict(self):

        self.real_A = Variable(self.input_A)
        self.real_A_gray = Variable(self.input_A_gray)
        self.real_A_U = Variable(self.input_A_U)
        self.real_A_V = Variable(self.input_A_V)

        self.Thr = random.uniform(0.01, 0.05)  #0~1

        if self.opt.noise > 0:
            self.noise = Variable(
                torch.cuda.FloatTensor(self.real_A.size()).normal_(
                    mean=0, std=self.opt.noise / 255.))
            self.real_A = self.real_A + self.noise
        if self.opt.input_linear:
            self.real_A = (self.real_A - torch.min(self.real_A)) / (
                torch.max(self.real_A) - torch.min(self.real_A))
        # print(np.transpose(self.real_A.data[0].cpu().float().numpy(),(1,2,0))[:2][:2][:])
        if self.opt.skip == 1:
            self.fake_B, self.latent_real_A, gray_out = self.netG_A.forward(
                self.real_A, self.real_A_gray, self.real_A_U, self.real_A_V,
                self.Thr)
        else:
            self.fake_B = self.netG_A.forward(self.real_A, self.real_A_gray,
                                              self.real_A_constraint)
        # self.rec_A = self.netG_B.forward(self.fake_B)

        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        A_gray = util.atten2im(gray_out.data)
        latent_V, latent_H, latent_S = util.latent_dff_2im(
            self.latent_real_A.data)
        # rec_A = util.tensor2im(self.rec_A.data)
        # if self.opt.skip == 1:
        #     latent_real_A = util.tensor2im(self.latent_real_A.data)
        #     latent_show = util.latent2im(self.latent_real_A.data)
        #     max_image = util.max2im(self.fake_B.data, self.latent_real_A.data)
        #     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
        #                     ('latent_show', latent_show), ('max_image', max_image), ('A_gray', A_gray)])
        # else:
        #     return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
        # return OrderedDict([('fake_B', fake_B)])
        return OrderedDict([('real_A', real_A), ('fake_B', fake_B),
                            ('latent_V', latent_V), ('latent_H', latent_H),
                            ('latent_S', latent_S)])
コード例 #5
0
    def predict(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.real_A_gray = Variable(self.input_A_gray, volatile=True)
        if self.opt.noise > 0:
            self.noise = Variable(torch.cuda.FloatTensor(self.real_A.size()).normal_(mean=0, std=self.opt.noise/255.))
            self.real_A = self.real_A + self.noise
        if self.opt.input_linear:
            self.real_A = (self.real_A - torch.min(self.real_A))/(torch.max(self.real_A) - torch.min(self.real_A))
        if self.opt.skip == 1:
            self.fake_B, self.latent_real_A = self.netG_A.forward(self.real_A, self.real_A_gray)
        else:
            self.fake_B = self.netG_A.forward(self.real_A, self.real_A_gray)

        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        A_gray = util.atten2im(self.real_A_gray.data)

        return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])