def patch_step(self, x, x_tilde, is_dis=True):
     D_real, real_logits = self.PatchDiscriminator(x, classify=True)
     D_fake, fake_logits = self.PatchDiscriminator(x_tilde, classify=True)
     if is_dis:
         w_dis = torch.mean(D_real - D_fake)
         gp = calculate_gradients_penalty(self.PatchDiscriminator, x, x_tilde)
         return w_dis, real_logits, gp
     else:
         return -torch.mean(D_fake), fake_logits
Exemplo n.º 2
0
 def patch_discriminate_step(self, x, x_tilde, cal_gp=True):
     # w-distance
     D_real, real_logits = self.PatchDiscriminator(x, classify=True)
     D_fake, fake_logits = self.PatchDiscriminator(x_tilde, classify=True)
     w_dis = torch.mean(D_real - D_fake)
     if cal_gp:
         gp = calculate_gradients_penalty(self.PatchDiscriminator, x,
                                          x_tilde)
         return w_dis, real_logits, fake_logits, gp
     else:
         return w_dis, real_logits, fake_logits
Exemplo n.º 3
0
 def latent_discriminate_step(self, enc_i_t, enc_i_tk, enc_i_prime, enc_j, is_dis=True):
     same_pair = torch.cat([enc_i_t, enc_i_tk], dim=1)
     diff_pair = torch.cat([enc_i_prime, enc_j], dim=1)
     if is_dis:
         same_val = self.LatentDiscriminator(same_pair)
         diff_val = self.LatentDiscriminator(diff_pair)
         w_dis = torch.mean(same_val - diff_val)
         gp = calculate_gradients_penalty(self.LatentDiscriminator, same_pair, diff_pair)
         return w_dis, gp
     else:
         diff_val = self.LatentDiscriminator(diff_pair)
         loss_adv = -torch.mean(diff_val)
         return loss_adv
Exemplo n.º 4
0
            w_dis = torch.mean(same_val - diff_val)
            gp = calculate_gradients_penalty(self.LatentDiscriminator, same_pair, diff_pair)
            return w_dis, gp
        else:
            diff_val = self.LatentDiscriminator(diff_pair)
            loss_adv = -torch.mean(diff_val)
            return loss_adv

    def patch_discriminate_step(self, x, x_tilde, cal_gp=True):
        # w-distance
>>>>>>> 4b2f701ba47b7c326213842f90ded5a8f429ec15
        D_real, real_logits = self.PatchDiscriminator(x, classify=True)
        D_fake, fake_logits = self.PatchDiscriminator(x_tilde, classify=True)
        if is_dis:
            w_dis = torch.mean(D_real - D_fake)
            gp = calculate_gradients_penalty(self.PatchDiscriminator, x, x_tilde)
            return w_dis, real_logits, gp
        else:
            return -torch.mean(D_fake), fake_logits

    def gen_step(self, enc, c):
        x_gen = self.Decoder(enc, c) + self.Generator(enc, c)
        return x_gen 

    def clf_step(self, enc):
        logits = self.SpeakerClassifier(enc)
        return logits

    def cal_loss(self, logits, y_true):
        # calculate loss 
        criterion = nn.CrossEntropyLoss()