Example #1
0
def main(config, resume):
    torch.manual_seed(config["seed"])
    np.random.seed(config["seed"])

    def pad_to_longest(batch):
        mixture_list = []
        clean_list = []
        names = []
        n_frames_list = []

        for mixture, clean, n_frames, name in batch:
            mixture_list.append(torch.tensor(mixture).reshape(-1, 1))
            clean_list.append(torch.tensor(clean).reshape(-1, 1))
            n_frames_list.append(n_frames)
            names.append(name)

        # seq_list = [(L_1, 1), (L_2, 1), ...]
        #   item.size() must be (L, *)
        #   return (longest_len, len(seq_list), *)
        mixture_list = pad_sequence(mixture_list).squeeze(2).permute(1, 0)
        clean_list = pad_sequence(clean_list).squeeze(2).permute(1, 0)

        return mixture_list, clean_list, n_frames_list, names

    train_dataset = initialize_config(config["train_dataset"])
    train_data_loader = DataLoader(
        shuffle=config["train_dataloader"]["shuffle"],
        dataset=train_dataset,
        batch_size=config["train_dataloader"]["batch_size"],
        num_workers=config["train_dataloader"]["num_workers"],
        collate_fn=pad_to_longest,
        drop_last=True)

    validation_dataset = initialize_config(config["validation_dataset"])
    valid_data_loader = DataLoader(
        dataset=validation_dataset,
        num_workers=config["validation_dataloader"]["num_workers"],
        batch_size=config["validation_dataloader"]["batch_size"],
        collate_fn=pad_to_longest,
        shuffle=config["validation_dataloader"]["shuffle"])

    model = initialize_config(config["model"])

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=config["optimizer"]["lr"],
                                 betas=(config["optimizer"]["beta1"], 0.999))

    loss_function = initialize_config(config["loss_function"])

    trainer = Trainer(config=config,
                      resume=resume,
                      model=model,
                      optimizer=optimizer,
                      loss_function=loss_function,
                      train_dataloader=train_data_loader,
                      validation_dataloader=valid_data_loader)

    trainer.train()
Example #2
0
def main(config, resume):
    """
    训练脚本的入口函数
    
    Notes:
        1. 加载数据集
        2. 初始化模型
        3. 设置优化器
        4. 选择损失函数
        5. 训练脚本 run

    Args:
        config (dict): 配置项
        resume (bool): 是否加载最近一次存储的模型断点
    """
    torch.manual_seed(config["seed"])
    np.random.seed(config["seed"])

    train_dataset = TrainDataset(
        mixture_dataset=config["train_dataset"]["mixture"],
        mask_dataset=config["train_dataset"]["clean"],
        limit=config["train_dataset"]["limit"],
        offset=config["train_dataset"]["offset"],
    )
    train_data_loader = DataLoader(
        dataset=train_dataset,
        batch_size=config["train_dataset"]["batch_size"],
        num_workers=config["train_dataset"]["num_workers"],
        shuffle=config["train_dataset"]["shuffle"])

    valid_dataset = TestDataset(
        mixture_dataset=config["valid_dataset"]["mixture"],
        clean_dataset=config["valid_dataset"]["clean"],
        limit=config["valid_dataset"]["limit"],
        offset=config["valid_dataset"]["offset"],
    )

    valid_data_loader = DataLoader(dataset=valid_dataset)

    model = initialize_config(config["model"])

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=config["optimizer"]["lr"])

    loss_function = initialize_config(config["loss_function"])

    trainer = Trainer(
        config=config,
        resume=resume,
        model=model,
        loss_function=loss_function,
        optim=optimizer,
        train_dl=train_data_loader,
        validation_dl=valid_data_loader,
    )

    trainer.train()
Example #3
0
def main(config, epoch):
    root_dir = Path(config["save_location"]) / config["name"]
    checkpoints_dir = root_dir / "checkpoints"
    """============== 加载数据集 =============="""
    dataset = initialize_config(config["dataset"])
    collate_fn = lambda data_list: data_list
    dataloader = DataLoader(dataset=dataset,
                            batch_size=40,
                            num_workers=40,
                            collate_fn=collate_fn,
                            drop_last=True)
    """============== 加载模型断点(best,latest,通过数字指定) =============="""
    model = initialize_config(config["model"])
    device = torch.device("cpu")
    model = model.double()

    if epoch == "best":
        model_path = checkpoints_dir / "best_model.tar"
    elif epoch == "latest":
        model_path = checkpoints_dir / "latest_model.tar"
    else:
        model_path = checkpoints_dir / f"generator_{str(epoch).zfill(3)}.pth"

    model_checkpoint = torch.load(model_path.as_posix(), map_location=device)
    model_static_dict = model_checkpoint["model_state_dict_G"]
    print(f"Loading model checkpoint, epoch = {model_checkpoint['epoch']}")
    model.load_state_dict(model_static_dict)
    model.eval()
    """============== 增强语音 =============="""
    results_dir = root_dir / f"epoch_{epoch}_results"
    results_dir.mkdir(parents=False, exist_ok=True)
    spectrum_type = config["spectrum_type"]

    headers = ("语音编号", "噪声类型", "信噪比", "STOI 纯净与带噪", "STOI 纯净与降噪 ",
               "PESQ 纯净与带噪", "PESQ 纯净与降噪", "STOI 提升", "PESQ 提升"
               )  # 定义导出为 Excel 文件的格式
    metrics_seq = []

    for i, data in enumerate(dataloader):
        # batch_size个结果被汇总在一个 list 中了
        # [(return_1,return_2, ... ,return_40), ..., ]
        metrics = Parallel(n_jobs=40, require="sharedmem")(
            delayed(enhance_spectrum)(model, *data[j], spectrum_type)
            for j in range(dataloader.batch_size))

        metrics_seq += metrics
    """============== 存储结果 =============="""
    data = tablib.Dataset(*metrics_seq, headers=headers)
    metrics_save_dir = results_dir / f"epoch_{epoch}.xls"
    with open(metrics_save_dir.as_posix(), 'wb') as f:
        f.write(data.export('xls'))
def main(config, resume):
    """
    训练脚本的入口函数

    Args:
        config (dict): 配置项
        resume (bool): 是否加载最近一次存储的模型断点
    """
    torch.manual_seed(config["seed"])
    np.random.seed(config["seed"])

    train_dataset = initialize_config(config["train_dataset"])
    validation_dataset = initialize_config(config["validation_dataset"])
    train_data_loader = DataLoader(
        dataset=train_dataset,
        batch_size=config["train_dataloader"]["batch_size"],
        num_workers=config["train_dataloader"]["num_workers"],
        shuffle=config["train_dataloader"]["shuffle"])

    collate_all_data = lambda data_list: data_list
    valid_data_loader = DataLoader(
        dataset=validation_dataset,
        batch_size=config["validation_dataloader"]["batch_size"],
        num_workers=config["validation_dataloader"]["num_workers"],
        collate_fn=collate_all_data)

    generator = initialize_config(config["generator_model"])
    discriminator = initialize_config(config["discriminator_model"])

    generator_optimizer = torch.optim.Adam(params=generator.parameters(),
                                           lr=config["optimizer"]["G_lr"])
    discriminator_optimizer = torch.optim.Adam(
        params=discriminator.parameters(),
        lr=config["optimizer"]["D_lr"],
        betas=(config["optimizer"]["beta1"], 0.999))

    loss_function = initialize_config(config["loss_function"])

    trainer = Trainer(
        config=config,
        resume=resume,
        G=generator,
        D=discriminator,
        optim_G=generator_optimizer,
        optim_D=discriminator_optimizer,
        loss_function=loss_function,
        train_dl=train_data_loader,
        validation_dl=valid_data_loader,
    )

    trainer.train()
args = parser.parse_args()

"""
Preparation
"""
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
config = json.load(open(args.config))
model_checkpoint_path = args.model_checkpoint_path
output_dir = args.output_dir
assert os.path.exists(output_dir), "Enhanced directory should be exist."

"""
DataLoader
"""
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
dataloader = DataLoader(dataset=initialize_config(config["dataset"]), batch_size=1, num_workers=0)

"""
Model
"""
model = initialize_config(config["model"])
model.load_state_dict(load_checkpoint(model_checkpoint_path, device))
model.to(device)
model.eval()

"""
Enhancement
"""
sample_length = dataloader.dataset.sample_length
for mixture, name in tqdm(dataloader):
    assert len(name) == 1, "Only support batch size is 1 in enhancement stage."