예제 #1
0
    def get_current_visuals(self):
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        real_B = util.tensor2im(self.real_B.data)
        edge = util.atten2im(self.edge_out.data)

        self.output_A_I_3 = torch.cat([self.gray, self.gray, self.gray], 1)
        out_A_I = util.tensor2im(self.output_A_I_3.data)

        if self.opt.skip > 0:
            latent_real_A = util.tensor2im(self.latent_real_A.data)
            latent_show = util.latent2im(self.latent_real_A.data)

            if self.opt.patchD:
                fake_patch = util.tensor2im(self.fake_patch.data)
                real_patch = util.tensor2im(self.real_patch.data)
                if self.opt.patch_vgg:
                    input_patch = util.tensor2im(self.input_patch.data)
                    if not self.opt.self_attention:
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch), ('input_patch', input_patch)])
                    else:
                        self_attention = util.atten2im(self.real_A_gray_o.data)
                        return OrderedDict(
                            [('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch), ('input_gray', self_attention),
                             ('latent', latent_show), ('out_A_I', out_A_I), ('edge', edge)])
                else:
                    if not self.opt.self_attention:
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch)])
                    else:
                        self_attention = util.atten2im(self.real_A_gray.data)
                        return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                            ('latent_show', latent_show), ('real_B', real_B),
                                            ('real_patch', real_patch),
                                            ('fake_patch', fake_patch), ('self_attention', self_attention)])
            else:
                if not self.opt.self_attention:
                    return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                                        ('latent_show', latent_show), ('real_B', real_B)])
                else:
                    self_attention = util.atten2im(self.real_A_gray_o.data)
                    return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                        ('latent_real_A', latent_real_A), ('latent_show', latent_show),
                                        ('self_attention', self_attention)])
        else:
            if not self.opt.self_attention:
                return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
            else:
                self_attention = util.atten2im(self.real_A_gray.data)
                return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                    ('self_attention', self_attention)])
예제 #2
0
 def get_current_visuals(self):
     real_A = util.tensor2im(self.real_A.data)
     fake_B = util.tensor2im(self.fake_B.data)
     real_B = util.tensor2im(self.real_B.data)
     if self.opt.skip > 0:
         latent_real_A = util.tensor2im(self.latent_real_A.data)
         latent_show = util.latent2im(self.latent_real_A.data)
         return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B)])
예제 #3
0
 def get_current_visuals(self):
     real_A = util.tensor2im(self.real_A.data)
     fake_B = util.tensor2im(self.fake_B.data)
     real_B = util.tensor2im(self.real_B.data)
     if self.opt.skip > 0:
         latent_real_A = util.tensor2im(self.latent_real_A.data)
         latent_show = util.latent2im(self.latent_real_A.data)
         if self.opt.patchD:
             fake_patch = util.tensor2im(self.fake_patch.data)
             real_patch = util.tensor2im(self.real_patch.data)
             if self.opt.patch_vgg:
                 input_patch = util.tensor2im(self.input_patch.data)
                 if not self.opt.self_attention:
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch)])
                 else:
                     self_attention = util.atten2im(self.real_A_gray.data)
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('input_patch', input_patch), ('self_attention', self_attention)])
             else:
                 if not self.opt.self_attention:
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch)])
                 else:
                     self_attention = util.atten2im(self.real_A_gray.data)
                     return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B), ('real_patch', real_patch),
                             ('fake_patch', fake_patch), ('self_attention', self_attention)])
         else:
             if not self.opt.self_attention:
                 return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('latent_real_A', latent_real_A),
                             ('latent_show', latent_show), ('real_B', real_B)])
             else:
                 self_attention = util.atten2im(self.real_A_gray.data)
                 return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                 ('latent_real_A', latent_real_A), ('latent_show', latent_show),
                                 ('self_attention', self_attention)])
     else:
         if not self.opt.self_attention:
             return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
         else:
             self_attention = util.atten2im(self.real_A_gray.data)
             return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B),
                                 ('self_attention', self_attention)])