Ejemplo n.º 1
0
 def calc_style_loss(self, input, target):
     assert (input.size() == target.size())
     assert (target.requires_grad is False)
     input_mean, input_std = calc_mean_std(input)
     target_mean, target_std = calc_mean_std(target)
     return self.mse_loss(input_mean, target_mean) + \
            self.mse_loss(input_std, target_std)
Ejemplo n.º 2
0
def calc_style_loss(input, target):
    assert (input.size() == target.size())
    assert (target.requires_grad is False)

    input_mean, input_std = calc_mean_std(input)
    target_mean, target_std = calc_mean_std(target)

    mean_loss = nn.MSELoss()(input_mean, target_mean)
    std_loss = nn.MSELoss()(input_std, target_std)
    style_loss = mean_loss + std_loss

    return style_loss
def normalized_mse_loss(input, target):
    assert (input.size() == target.size())
    assert (target.requires_grad is False)
    size = target.size()
    eps = 1e-6
    cs_mean, cs_std = calc_mean_std(input)
    c_mean, c_std = calc_mean_std(target)
    cs_normalized_feat = (input -
                          cs_mean.expand(size)) / (cs_std.expand(size) + eps)
    c_normalized_feat = (target - c_mean.expand(size)) / (c_std.expand(size) +
                                                          eps)
    return mse_loss(cs_normalized_feat, c_normalized_feat)
Ejemplo n.º 4
0
 def adain(self, content_feat, style_mean, style_std):
     size = content_feat.size()
     content_mean, content_std = calc_mean_std(content_feat)
     normalized_feat = (content_feat - content_mean.expand(size)
                        ) / content_std.expand(size)
     return normalized_feat * style_std.expand(size) + style_mean.expand(
         size)
Ejemplo n.º 5
0
 def forward(self, input):
     mres = []
     sres = []
     for i in range(4):
         input = getattr(self, 'enc_{:d}'.format(i + 1))(input)
         fm, fs = calc_mean_std(input)
         mres.append(fm.view(-1))
         sres.append(fs.view(-1))
     return mres, sres
Ejemplo n.º 6
0
    def thumb_adaptive_instance_normalization(self, content_thumb_feat,
                                              content_patch_feat,
                                              style_thumb_feat):
        size = content_thumb_feat.size()
        style_mean, style_std = calc_mean_std(style_thumb_feat)
        content_thumb_mean, content_thumb_std = calc_mean_std(
            content_thumb_feat)

        content_thumb_feat = (
            content_thumb_feat -
            content_thumb_mean.expand(size)) / content_thumb_std.expand(size)
        content_thumb_feat = content_thumb_feat * style_std.expand(
            size) + style_mean.expand(size)

        content_patch_feat = (
            content_patch_feat -
            content_thumb_mean.expand(size)) / content_thumb_std.expand(size)
        content_patch_feat = content_patch_feat * style_std.expand(
            size) + style_mean.expand(size)
        return content_thumb_feat, content_patch_feat
    def op(self, x, s, fusion_style, isTraining, normal, eps=1e-5):
        batch_size = x.size(0)

        #recalibrate features
        g_s = self.g(fusion_style).view(batch_size, self.inter_channels, -1)
        g_s = g_s.permute(0, 2, 1)
        if (normal == True):
            x_size = x.size()
            s_size = s.size()
            x_mean, x_std = calc_mean_std(x)
            s_mean, s_std = calc_mean_std(s)
            x_normalized_feat = (x -
                                 x_mean.expand(x_size)) / x_std.expand(x_size)
            s_normalized_feat = (s -
                                 s_mean.expand(s_size)) / s_std.expand(s_size)
        theta_x = self.theta(x_normalized_feat)
        phi_s = self.phi(s_normalized_feat)

        theta_x = self.CA(
            self.extract_PatchesAndNorm(theta_x, p_size=self.p_size))
        phi_s = self.CA(self.extract_PatchesAndNorm(phi_s)).permute(0, 2, 1)

        # calculate attention map, f_div_C->normalized feature map
        f = torch.matmul(theta_x, phi_s) * self.bandwidth

        f_div_C = F.softmax(f, dim=-1)
        if (isTraining == False):
            conf_map = f_div_C * f
            conf_map = conf_map.sum(-1).view(batch_size, 1, *x.size()[2:])
        else:
            conf_map = 0

        # swap style features
        y = torch.matmul(f_div_C, g_s)
        y = y.permute(0, 2, 1).contiguous()
        y = y.view(batch_size, self.inter_channels, *x.size()[2:])
        W_y = self.W(y)

        return W_y, conf_map
Ejemplo n.º 8
0
 def calc_style_loss(self, input, target_mean, target_std):
     input_mean, input_std = calc_mean_std(input)
     # target_mean, target_std = calc_mean_std(target)
     return self.mse_loss(input_mean, target_mean) + \
            self.mse_loss(input_std, target_std)
 def calc_style_loss(self, img, style):
     # Use IN to calc style loss
     img_mean, img_std = calc_mean_std(img)
     style_mean, style_std = calc_mean_std(style)
     return self.mse_loss(img_mean, style_mean) + self.mse_loss(
         img_std, style_std)
def adaptive_instance_normalization(content_feat):
    size = content_feat.size()
    content_mean, content_std = calc_mean_std(content_feat)
    normalized_feat = (content_feat -
                       content_mean.expand(size)) / content_std.expand(size)
    return content_mean, content_std, normalized_feat