コード例 #1
0
                # discriminator.train()
                for _ in range(args.d_iter):
                    d_optimizer.zero_grad()
                    disc_d_loss_dict = disc_d_loss(discriminator,
                                                   (real_data.detach(), ),
                                                   (fake_data.detach(), ),
                                                   backward=True,
                                                   opts=dis_opts)
                    d_optimizer.step()
                # discriminator.eval()

                e_loss = 0
                # add l1/l2 regularization
                if args.l1_alpha > 0:
                    l1_penalty = l1_regularization(enhancer, "weight")
                    e_loss = e_loss + args.l1_alpha * l1_penalty
                if args.l2_alpha > 0:
                    l2_penalty = l2_regularization(enhancer, "weight")
                    e_loss = e_loss + args.l2_alpha * l2_penalty
                if args.dist_alpha > 0:
                    # label_slices, real_data, fake_data = get_speech_slices(label, results, frame_number)
                    # get discriminative loss for generator
                    enhanced_logits, enhanced_feat = discriminator(
                        (enhanced_data, ))
                    real_logits, real_feat = discriminator(
                        (real_data.detach(), ))
                    rand_logits, rand_feat = discriminator(
                        (torch.rand_like(real_data) * 2. - 1., ))
                    # dist_loss = (enhanced_logits - real_logits).pow(2.).mean()
                    dist_loss = (enhanced_logits - 1).pow(2.).mean()
コード例 #2
0
                        # train discriminator
                        d_optimizer.zero_grad()
                        disc_d_loss_dict = disc_d_loss(discriminator, (real_data.detach(),),
                                                       (fake_data.detach(),), backward=True, opts=dis_opts)
                        d_optimizer.step()

                    # get discriminative loss for generator
                    disc_g_loss_dict = disc_g_loss(discriminator, (fake_data,), backward=False, opts=dis_opts)
                    g_loss = g_loss + disc_g_loss_dict["g_loss"] * dist_alpha

                recon_loss = reconstruction_loss(predict, target, batch_mask, backward=False, opts=None)["loss"] / BATCH_SIZE
                g_loss = g_loss + recon_loss

                # add l1/l2 regularization
                if args.l1_alpha > 0:
                    l1_penalty = l1_regularization(generator, "weight")
                    g_loss = g_loss + args.l1_alpha * l1_penalty
                if args.l2_alpha > 0:
                    l2_penalty = l2_regularization(generator, "weight")
                    g_loss = g_loss + args.l2_alpha * l2_penalty

                g_optimizer.zero_grad()
                g_loss.backward()
                if args.glc_alpha > 0:
                    if args.target_type.lower() == "mapping_mag":
                        batch = feat.sqrt()
                    else:
                        batch = log_feat
                    gp = calc_gradient_penalty(generator, batch, batch_mask)
                    (gp * args.glc_alpha).backward()
                g_optimizer.step()