コード例 #1
0
    def forward(self, content, style):
        style_feats = self.encode_with_intermediate(style)
        content_feats = self.encode_with_intermediate(content)
        Ics = self.decoder(self.sa_module(content_feats, style_feats))
        Ics_feats = self.encode_with_intermediate(Ics)
        # Content loss
        loss_c = self.calc_content_loss(normal(
            Ics_feats[-1]), normal(
                content_feats[-1])) + self.calc_content_loss(
                    normal(Ics_feats[-2]), normal(content_feats[-2]))
        # Style loss
        loss_s = self.calc_style_loss(Ics_feats[0], style_feats[0])
        for i in range(1, 5):
            loss_s += self.calc_style_loss(Ics_feats[i], style_feats[i])

        #Identity losses lambda 1
        Icc = self.decoder(self.sa_module(content_feats, content_feats))
        Iss = self.decoder(self.sa_module(style_feats, style_feats))

        loss_lambda1 = self.calc_content_loss(
            Icc, content) + self.calc_content_loss(Iss, style)

        #Identity losses lambda 2
        Icc_feats = self.encode_with_intermediate(Icc)
        Iss_feats = self.encode_with_intermediate(Iss)
        loss_lambda2 = self.calc_content_loss(
            Icc_feats[0], content_feats[0]) + self.calc_content_loss(
                Iss_feats[0], style_feats[0])
        for i in range(1, 5):
            loss_lambda2 += self.calc_content_loss(
                Icc_feats[i], content_feats[i]) + self.calc_content_loss(
                    Iss_feats[i], style_feats[i])
        return loss_c, loss_s, loss_lambda1, loss_lambda2
コード例 #2
0
    def forward(self, content_feat, style_feat):
        B, C, H, W = content_feat.size()

        F_Fc_norm = self.f(normal(content_feat))

        #F_Fc_norm = torch.mul(F_Fc_norm, content_a.view(B,-1,H*W).permute(0,2,1))

        B, C, H, W = style_feat.size()
        G_Fs_norm = self.g(normal(style_feat)).view(-1, 1, H * W)
        #print(G_Fs)
        #G_Fs_sum = torch.abs(G_Fs_norm.view(B,C,H*W)).sum(-1)
        G_Fs_sum = G_Fs_norm.view(B, C, H * W).sum(-1)
        #print(G_Fs_sum.size())
        #print(G_Fs_norm)
        FC_S = torch.bmm(G_Fs_norm, G_Fs_norm.permute(0, 2, 1)).view(
            B, C) / G_Fs_sum  #14
        #FC_S = torch.bmm(self.softmax(G_Fs_norm),G_Fs_norm.permute(0,2,1)).view(B,C)  #16
        #FC_S = torch.bmm(G_Fs, self.softmax(G_Fs_norm.permute(0,2,1))).view(B,C)   #17
        #FC_S = torch.bmm(G_Fs, G_Fs_norm.permute(0,2,1)).view(B,C)/G_Fs_sum   #18
        FC_S = self.fc(FC_S).view(B, C, 1, 1)
        #print(G_Fs_norm.size(),style_a.size())
        #G_Fs_norm = torch.mul(G_Fs_norm,style_a.view(B,-1,H*W) )
        #print(F_Fc_norm.size(),G_Fs_norm.size(),)

        out = F_Fc_norm * FC_S
        B, C, H, W = content_feat.size()
        out = out.contiguous().view(B, -1, H, W)
        out = self.out_conv(out)
        out = content_feat + out

        return out
コード例 #3
0
    def forward(self, content, style):
        s = torch.empty(1)
        t = torch.empty(content.size())

        std = torch.nn.init.uniform_(s, a=0.01, b=0.02)
        noise = torch.nn.init.normal(t, mean=0, std=std[0]).cuda()
        content_noise = content + noise

        style_feats = self.encode_with_intermediate(style)
        content_feats = self.encode_with_intermediate(content)
        content_feats_N = self.encode_with_intermediate(content_noise)

        Ics = self.decoder(self.mcc_module(content_feats, style_feats))
        Ics_feats = self.encode_with_intermediate(Ics)
        # Content loss
        loss_c = self.calc_content_loss(normal(
            Ics_feats[-1]), normal(
                content_feats[-1])) + self.calc_content_loss(
                    normal(Ics_feats[-2]), normal(content_feats[-2]))
        # Style loss
        loss_s = self.calc_style_loss(Ics_feats[0], style_feats[0])
        for i in range(1, 5):
            loss_s += self.calc_style_loss(Ics_feats[i], style_feats[i])

        # total variation loss
        y = Ics
        tv_loss = torch.sum(
            torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.sum(
                torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]))

        Ics_N = self.decoder(self.mcc_module(content_feats_N, style_feats))
        loss_noise = self.calc_content_loss(Ics_N, Ics)

        #Identity losses lambda 1
        Icc = self.decoder(self.mcc_module(content_feats, content_feats))
        Iss = self.decoder(self.mcc_module(style_feats, style_feats))

        loss_lambda1 = self.calc_content_loss(
            Icc, content) + self.calc_content_loss(Iss, style)

        #Identity losses lambda 2
        Icc_feats = self.encode_with_intermediate(Icc)
        Iss_feats = self.encode_with_intermediate(Iss)
        loss_lambda2 = self.calc_content_loss(
            Icc_feats[0], content_feats[0]) + self.calc_content_loss(
                Iss_feats[0], style_feats[0])
        for i in range(1, 5):
            loss_lambda2 += self.calc_content_loss(
                Icc_feats[i], content_feats[i]) + self.calc_content_loss(
                    Iss_feats[i], style_feats[i])
        return loss_noise, loss_c, loss_s, loss_lambda1, loss_lambda2, tv_loss, Ics
コード例 #4
0
 def forward(self, content_feat, style_feat):
     B, C, H, W = content_feat.size()
     F_Fc_norm = self.f(normal(content_feat)).view(B, -1,
                                                   H * W).permute(0, 2, 1)
     B, C, H, W = style_feat.size()
     G_Fs_norm = self.g(normal(style_feat)).view(B, -1, H * W)
     energy = torch.bmm(F_Fc_norm, G_Fs_norm)
     attention = self.softmax(energy)
     H_Fs = self.h(style_feat).view(B, -1, H * W)
     out = torch.bmm(H_Fs, attention.permute(0, 2, 1))
     B, C, H, W = content_feat.size()
     out = out.view(B, C, H, W)
     out = self.out_conv(out)
     out += content_feat
     return out
コード例 #5
0
def style_transfer(vgg, decoder, ma_module, content, style, alpha=1.0,
                   interpolation_weights=None):
    assert (0.0 <= alpha <= 1.0)
    style_fs, content_f, style_f=feat_extractor(vgg, content, style)
    Fccc = ma_module(content_f,content_f)
    #content_f[-2] = content_f.permute(0,1,3,2)
    if interpolation_weights:
        _, C, H, W = Fccc.size()
        feat = torch.FloatTensor(1, C, H, W).zero_().to(device)
        base_feat = ma_module(content_f, style_f)
        for i, w in enumerate(interpolation_weights):
            feat = feat + w * base_feat[i:i + 1]
        Fccc=Fccc[0:1]
    else:
        feat = ma_module(content_f, style_f)
    print(type(feat),type(Fccc))
    feat = feat * alpha + Fccc * (1 - alpha)
    feat_norm = normal(feat)
    feat = feat 
    return decoder(feat)
コード例 #6
0
for i in tqdm(range(args.max_iter)):
    adjust_learning_rate(optimizer, iteration_count=i)
    content_images = next(content_iter).to(device)
    style_images = next(style_iter).to(device)
    B, C, W, H = content_images.size()
    content_images1 = next(content_iter).to(device)
    style_images1 = next(style_iter).to(device)

    content_images1 = content_images1.expand(B, C, W, H)
    style_images1 = style_images1.expand(B, C, W, H)

    style_feats, content_feats, style_feats1, content_feats1, Ics_feats, Ics1_feats, Ic1s_feats, Icc, Iss, Icc_feats, Iss_feats = network(
        content_images, content_images1, style_images, style_images1)

    loss_c = calc_content_loss(normal(Ics_feats[-1]), normal(
        content_feats[-1])) + calc_content_loss(normal(Ics_feats[-2]),
                                                normal(content_feats[-2]))
    # Style loss
    loss_s = calc_style_loss(Ics_feats[0], style_feats[0])
    for j in range(1, 5):
        loss_s += calc_style_loss(Ics_feats[j], style_feats[j])

    dis_loss_c = calc_content_loss(normal(shuffle(
        Ic1s_feats[-1])), normal(shuffle(Ic1s_feats[-1]))) + calc_content_loss(
            normal(shuffle(Ic1s_feats[-2])), normal(shuffle(Ic1s_feats[-2])))
    dis_loss_s = calc_style_loss(shuffle(Ics1_feats[0]),
                                 shuffle(Ics1_feats[0]))
    for j in range(1, 5):
        dis_loss_s += calc_style_loss(shuffle(Ics1_feats[j]),
                                      shuffle(Ics1_feats[j]))