示例#1
0
 def compute_vgg_loss(self, vgg, img, target):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg(img_vgg)
     target_fea = vgg(target_vgg)
     return torch.mean(
         (self.instancenorm(img_fea) - self.instancenorm(target_fea))**2)
示例#2
0
 def compute_vgg_loss(self, vgg, img, target, hyperparameters):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg(img_vgg)
     target_fea = vgg(target_vgg)
     total_loss = 0
     for i in range(len(img_fea)):
         total_loss += hyperparameters['feature_weights'][i] * torch.mean(
             torch.abs(img_fea[i] - target_fea[i]))
     return total_loss
示例#3
0
    def compute_vgg_loss(self, vgg, img, target, all=0):
        img_vgg = vgg_preprocess(img)
        target_vgg = vgg_preprocess(target)
        # img_fea = vgg(img_vgg)
        # target_fea = vgg(target_vgg)
        img_fea_dict = self.VggExtract(img_vgg)
        target_fea_dict = self.VggExtract(target_vgg)
        loss=0
        if all:
            # for feature in img_fea_dict:
            #     loss+= torch.mean((img_fea_dict[feature] - (target_fea_dict[feature])) ** 2)
            loss += torch.mean((img_fea_dict['relu4_3'] - (target_fea_dict['relu4_3'])) ** 2)
        else:
            loss += torch.mean(
                (self.instancenorm(img_fea_dict['relu4_3']) - self.instancenorm(target_fea_dict['relu4_3'])) ** 2)

        return loss
示例#4
0
 def compute_vgg_loss(self, vgg, img, target):
     """ 
     Compute the domain-invariant perceptual loss
     
     Arguments:
         vgg {model} -- popular Convolutional Network for Classification and Detection
         img {torch.Tensor} -- image before translation
         target {torch.Tensor} -- image after translation
     
     Returns:
         torch.Float -- domain invariant perceptual loss
     """
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg(img_vgg)
     target_fea = vgg(target_vgg)
     return torch.mean(
         (self.instancenorm(img_fea) - self.instancenorm(target_fea))**2)
示例#5
0
def get_image(img_rows, img_cols, image_file_path):
    """
    TODO Doctring.
    """
    # DO NOT CHANGE THE ORDER OF THE NEXT 4 LINES - JUST. DO. NOT.
    img = image.load_img(image_file_path, target_size=(img_rows, img_cols))
    img = image.img_to_array(img)
    img = utils.vgg_preprocess(img)
    img = np.expand_dims(img, axis=0)

    return img
示例#6
0
    def compute_vgg_loss(self, img, target, mask):
        """ 
        Compute the domain-invariant perceptual loss
        
        Arguments:
            vgg {model} -- popular Convolutional Network for Classification and Detection
            img {torch.Tensor} -- image before translation
            target {torch.Tensor} -- image after translation
        
        Returns:
            torch.Float -- domain invariant perceptual loss
        """
        img_vgg = vgg_preprocess(img)
        target_vgg = vgg_preprocess(target)

        # Mask input to VGG:
        img_vgg = img_vgg * (1.0 - mask)
        target_vgg = target_vgg * (1.0 - mask)

        loss_G_VGG = self.criterionVGG(img_vgg, target_vgg)

        return loss_G_VGG
示例#7
0
 def compute_vgg_loss(self, vgg, img, target):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg(img_vgg)
     target_fea = vgg(target_vgg)
     return torch.mean(torch.abs(img_fea - target_fea))
示例#8
0
文件: trainer.py 项目: phonx/MUNIT
 def compute_vgg_loss(self, vgg, img, target):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg(img_vgg)
     target_fea = vgg(target_vgg)
     return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2)
示例#9
0
 def compute_vgg_loss_new(self, vgg, img, target):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_feat = vgg(img_vgg)
     target_feat = vgg(target_vgg)
     return self.recon_criterion(img_feat, target_feat)
示例#10
0
 def compute_vgg_loss(self, vgg, img, target):
     img_vgg = vgg_preprocess(img)
     target_vgg = vgg_preprocess(target)
     img_fea = vgg.features(img_vgg)
     target_fea = vgg.features(target_vgg)
     return contextual_loss(img_fea, target_fea)