Пример #1
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Data loader.
    vcc_loader = get_loader(config.data_dir, config.batch_size, config.len_crop)
    
    solver = Solver(vcc_loader, config)

    solver.train()
Пример #2
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    print('starting up')

    # Data loader.
    vcc_loader = get_loader(config.data_dir, config.batch_size, config.len_crop)

    print('data loaded')

    solver = Solver(vcc_loader, config)

    print('solver loaded, training...')

    solver.train()
Пример #3
0
def main(config):
    # For fast training.
    cudnn.benchmark = True
    random.seed(1)
    with open(config.spmel_dir +'/spmel_params.yaml') as File:
        spmel_params = yaml.load(File, Loader=yaml.FullLoader)
    if config.use_loader == 'PathSpecDataset':
        dataset = PathSpecDataset(config, spmel_params)
    elif config.use_loader == 'SpecChunksFromPkl':
        dataset = SpecChunksFromPkl(config, spmel_params)
    elif config.use_loader == 'VctkFromMeta':
        dataset = VctkFromMeta(config)
    else: raise NameError('use_loader string not valid')
    
    d_idx_list = list(range(len(dataset)))
    train_song_idxs = random.sample(d_idx_list, int(len(dataset)*0.8)) 
    train_sampler = SubsetRandomSampler(train_song_idxs)

    if config.eval_all == True:
        vctk = VctkFromMeta(config)
        medleydb = SpecChunksFromPkl(config, spmel_params)
        vocalset = PathSpecDataset(config, spmel_params)
        datasets = [vocalset, medleydb, vctk]
        print('Finished loading the datasets...')
        accum_ds_size = 0
        all_ds_test_idxs = []
        d_idx_list = list(range(len(datasets)))
        for ds in datasets:
            random.seed(1) # reinstigating this at every iteration ensures the same random numbers are for each dataset
            current_ds_size = len(ds)
            d_idx_list = list(range(current_ds_size))
            train_song_idxs = random.sample(d_idx_list, int(current_ds_size*0.8))
            test_song_idxs = [x for x in d_idx_list if x not in train_song_idxs]
            idxs_with_offset = [idx + accum_ds_size for idx in test_song_idxs]
            all_ds_test_idxs.extend(idxs_with_offset)
            accum_ds_size += current_ds_size
        c_datasets = ConcatDataset(datasets)
        test_sampler = SubsetRandomSampler(all_ds_test_idxs)
        config.test_idxs = all_ds_test_idxs
        test_loader = DataLoader(c_datasets, batch_size=config.batch_size, sampler=test_sampler, shuffle=False, drop_last=True)
    else:
        test_song_idxs = [x for x in d_idx_list if x not in train_song_idxs]
        config.test_idxs = test_song_idxs
        test_sampler = SubsetRandomSampler(test_song_idxs)
        test_loader = DataLoader(dataset, batch_size=config.batch_size, sampler=test_sampler, shuffle=False, drop_last=True)

    train_loader = DataLoader(dataset, batch_size=config.batch_size, sampler=train_sampler, shuffle=False, drop_last=True)
    solver = Solver(train_loader, config, spmel_params)
    current_iter = solver.get_current_iters()
    log_list = []
    pdb.set_trace()
    while current_iter < config.num_iters:
       current_iter, log_list = solver.iterate('train', train_loader, current_iter, config.train_iter, log_list)
       current_iter, log_list = solver.iterate('test', test_loader, current_iter, int(config.train_iter*0.2), log_list)
    solver.closeWriter()
    with open(config.data_dir +'/' +config.file_name +'/log_list.pkl', 'wb') as File:
        pickle.dump(log_list, File)
Пример #4
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Data loader.
    vcc_loader = get_loader(config.data_dir,
                            config.data_train_meta_path,
                            config.batch_size,
                            config.max_len,
                            shuffle=True)
    print(vcc_loader)
    print('len:', len(vcc_loader))
    # 对于验证集, 也存在每个音频起点的随机性, 不过先不管
    val_loader = get_loader(config.data_dir,
                            config.data_val_meta_path,
                            config.batch_size,
                            config.max_len,
                            shuffle=False)
    print(val_loader)
    print('len:', len(val_loader))
    solver = Solver(vcc_loader, val_loader, config)

    solver.train()
Пример #5
0
                          name="test")
    test_scorer.extract_embeddings(Encoder)
    logging.info("EER on test set : " + str(test_scorer.compute_EER()) + " %")

    train_scorer = Scoring(train_loader,
                           loader.get_dataset("train"),
                           device,
                           name="train")
    train_scorer.extract_embeddings(Encoder)
    logging.info("EER on train set : " + str(train_scorer.compute_EER()) +
                 " %")

    val_scorer = Scoring(val_loader,
                         loader.get_dataset("val"),
                         device,
                         name="val")
    val_scorer.extract_embeddings(Encoder)
    logging.info("EER on val set : " + str(val_scorer.compute_EER()) + " %")

    # Initiating Solver
    solver = Solver((train_loader, val_loader),
                    config,
                    Encoder,
                    scorers=(train_scorer, val_scorer, test_scorer))
    logging.info("solver initialized")

    # Training
    solver.train()

    logging.info("### Training Finished ###")
Пример #6
0
    train_scorer.extract_embeddings(Encoder)
    logging.info("EER on train set : " + str(train_scorer.compute_EER()) +
                 " %")

    val_scorer = Scoring(val_loader,
                         loader.get_dataset("val"),
                         device,
                         name="val",
                         n_uttrs=1)
    val_scorer.extract_embeddings(Encoder)
    logging.info("EER on val set : " + str(val_scorer.compute_EER()) + " %")

    # Initiating Solver
    config["model"]["from_loading"] = True
    solver = Solver((train_loader, val_loader),
                    config,
                    Encoder,
                    scorers=(train_scorer, val_scorer, test_scorer))
    logging.info("solver initialized")
    """solver.tar_eval("train", 0.01)
    solver.tar_eval("val", 0.01)
    solver.tar_eval("eval", 0.01)

    solver.tar_eval("train", 0.0182)
    solver.tar_eval("val", 0.0229)
    solver.tar_eval("eval", 0.0432)
    """
    #solver.evaluation("train")
    #solver.evaluation("val")
    #solver.evaluation("eval")

    solver.tar_eval("train", -1)