示例#1
0
                                "fake_logits": fake_logits.mean().item(),
                                "rand_logits": rand_logits.mean().item()
                            }, global_step // args.log_interval)
                        discriminator.train()

                if global_step % args.summary_interval == 0:
                    log_feat = torch.log(
                        train_dataloader.extract_fbank(feat.squeeze(1)) + 1e-8)
                    log_label = torch.log(
                        train_dataloader.extract_fbank(label.squeeze(1)) +
                        1e-8)
                    log_enhanced = torch.log(
                        train_dataloader.extract_fbank(predict.squeeze(1)) +
                        1e-8)
                    add_tf_image(writer, log_feat[:, :480, :],
                                 "train/noisy_feat", args.batch_size,
                                 args.batch_size // 8,
                                 global_step // args.summary_interval)
                    add_tf_image(writer, log_label[:, :480, :],
                                 "train/clean_feat", args.batch_size,
                                 args.batch_size // 8,
                                 global_step // args.summary_interval)
                    add_tf_image(writer, log_enhanced[:, :480, :],
                                 "train/enhanced_feat", args.batch_size,
                                 args.batch_size // 8,
                                 global_step // args.summary_interval)

                global_step += 1
                pbar.update(BATCH_SIZE)

        # Save checkpoint at `global step`.
        logger.info("Save models at step %d" % global_step)
                        writer.add_scalar(
                            "train/gradient_penalty",
                            disc_d_loss_dict["gradient_penalty"].item(),
                            global_step // args.log_interval)
                    # add min, max of clean feature
                    if args.target_type == "mapping_log_pow" and opts[
                            'compress_label']:
                        writer.add_scalars(
                            "train", {
                                "log_label_min": opts['log_label_min'],
                                "log_label_max": opts["log_label_max"],
                            }, global_step // args.log_interval)

                if global_step % args.summary_interval == 0:
                    add_tf_image(writer, log_feat[:, :480, :],
                                 "train/noisy_feat", 4, 1,
                                 global_step // args.summary_interval)
                    add_tf_image(writer, log_label[:, :480, :],
                                 "train/clean_feat", 4, 1,
                                 global_step // args.summary_interval)
                    add_tf_image(writer, log_enhanced[:, :480, :],
                                 "train/enhanced_feat", 4, 1,
                                 global_step // args.summary_interval)
                    if "mask" in target_type or "SigApp" in target_type:
                        add_tf_image(writer, predict[:, :480, :], "train/mask",
                                     4, 1,
                                     global_step // args.summary_interval)
                        add_tf_image(writer, target[:, :480, :],
                                     "train/ideal_mask", 4, 1,
                                     global_step // args.summary_interval)
                        writer.add_histogram(
示例#3
0
                writer.add_scalar('train/w_dis', disc_d_loss_dict["w_dis"].item(), global_step // args.log_interval)
                writer.add_scalar('train/real_data_min', real_data_min, global_step // args.log_interval)
                writer.add_scalar('train/real_data_max', real_data_max, global_step // args.log_interval)
                if "gp" in adversarial_loss:
                    writer.add_scalar("train/gradient_penalty", disc_d_loss_dict["gradient_penalty"].item(),
                                      global_step // args.log_interval)
                # add log of regularization
                if args.l1_alpha > 0:
                    writer.add_scalar("train/l1_penalty", args.l1_alpha * disc_d_loss_dict["l1_penalty"].item(),
                                      global_step // args.log_interval)
                if args.l2_alpha > 0:
                    writer.add_scalar("train/l2_penalty", args.l2_alpha * disc_d_loss_dict["l2_penalty"].item(),
                                      global_step // args.log_interval)

            if global_step % args.summary_interval == 0:
                add_tf_image(writer, real_data.squeeze(1), "train/real_data", 64, 8,
                             global_step // args.summary_interval)
                add_tf_image(writer, fake_data.squeeze(1), "train/fake_data", 64, 8,
                             global_step // args.summary_interval)

            global_step += 1
            pbar.update(BATCH_SIZE)
        pbar.close()

        # Save checkpoint at `global step`.
        logger.info("Save models at step %d" % global_step)
        save_check_point(generator, discriminator, g_optimizer, d_optimizer, global_step, model_name)
        logger.info("Done.")

        with torch.no_grad():
            logger.info("Complete train %d epochs, start to plot generated spectrogram." % epoch)
            generator.eval()
示例#4
0
                g_optimizer.zero_grad()
                g_loss.backward()
                g_optimizer.step()

                # log and summary
                if global_step % args.log_interval == 0:
                    writer.add_scalar('train/recon_loss', recon_loss.item(), global_step // args.log_interval)
                    if adversarial_loss:
                        writer.add_scalar('train/d_real_loss', disc_d_loss_dict["real_loss"].item(),
                                          global_step // args.log_interval)
                        writer.add_scalar('train/d_fake_loss', disc_d_loss_dict["fake_loss"].item(),
                                          global_step // args.log_interval)
                        writer.add_scalar('train/g_loss', disc_g_loss_dict["g_loss"].item(),
                                          global_step // args.log_interval)
                if global_step % args.summary_interval == 0:
                    add_tf_image(writer, log_feat[:, :480, :], "train/noisy_feat", 4, 1, global_step // args.summary_interval)
                    add_tf_image(writer, log_label[:, :480, :], "train/clean_feat", 4, 1, global_step // args.summary_interval)
                    add_tf_image(writer, log_enhanced[:, :480, :], "train/enhanced_feat", 4, 1, global_step // args.summary_interval)
                    if "mask" in target_type:
                        add_tf_image(writer, predict[:, :480, :], "train/mask", 4, 1, global_step // args.summary_interval)
                        add_tf_image(writer, target[:, :480, :], "train/ideal_mask", 4, 1, global_step // args.summary_interval)
                        writer.add_histogram("train/hist_predict_mask", predict[0, :frame_number[0], :], global_step // args.summary_interval)
                        writer.add_histogram("train/hist_ideal_mask", target[0, :frame_number[0], :], global_step // args.summary_interval)

                global_step += 1
                pbar.update(BATCH_SIZE)

        logger.info("Save models at step %d" % global_step)
        if adversarial_loss:
            save_check_point(generator, discriminator, g_optimizer, d_optimizer, global_step, model_name)
        else:
示例#5
0
                writer.add_scalar('train/w_dis', disc_d_loss_dict["w_dis"].item(), global_step // args.log_interval)
                writer.add_scalar('train/real_data_min', real_data_min, global_step // args.log_interval)
                writer.add_scalar('train/real_data_max', real_data_max, global_step // args.log_interval)
                if adversarial_loss == "wgangp":
                    writer.add_scalar("train/gradient_penalty", disc_d_loss_dict["gradient_penalty"].item(),
                                      global_step // args.log_interval)
                # add log of regularization
                if args.l1_alpha > 0:
                    writer.add_scalar("train/l1_penalty", args.l1_alpha * disc_d_loss_dict["l1_penalty"].item(),
                                      global_step // args.log_interval)
                if args.l2_alpha > 0:
                    writer.add_scalar("train/l2_penalty", args.l2_alpha * disc_d_loss_dict["l2_penalty"].item(),
                                      global_step // args.log_interval)

            if global_step % args.summary_interval == 0:
                add_tf_image(writer, real_data.squeeze(1), "train/real_data", 64, 8, global_step // args.summary_interval)
                add_tf_image(writer, fake_data.squeeze(1), "train/fake_data", 64, 8, global_step // args.summary_interval)
                c_plot = torch.argmax(conditions, 2, True).expand(-1, -1, 40)
                add_tf_image(writer, c_plot, "train/conditions", 64, 8,
                             global_step // args.summary_interval, colorful=True)
            global_step += 1
            pbar.update(BATCH_SIZE)
        pbar.close()

        # Save checkpoint at `global step`.
        logger.info("Save models at step %d" % global_step)
        save_check_point(generator, discriminator, g_optimizer, d_optimizer, global_step, model_name)
        logger.info("Done.")

        with torch.no_grad():
            logger.info("Complete train %d epochs, start to plot generated spectrogram." % epoch)