def main(): pprint(vars(arguments)) arguments.output.mkdir(exist_ok=True) save_arguments(arguments, arguments.output / 'arguments.json') path_feature1 = { Path(p).stem: Path(p) for p in glob.glob(arguments.input_feature_glob1) } path_feature2 = { Path(p).stem: Path(p) for p in glob.glob(arguments.input_feature_glob2) } path_indexes = { Path(p).stem: Path(p) for p in glob.glob(arguments.input_indexes) } fn_both_list = set(path_feature1.keys()) & set(path_indexes.keys()) pool = multiprocessing.Pool() generate = partial( generate_aligned_wave, sampling_rate=arguments.sampling_rate, frame_period=arguments.frame_period, alpha=arguments.alpha, ) it = pool.imap(generate, ((path_feature1[fn], path_feature2[fn], path_indexes[fn]) for fn in fn_both_list)) list(tqdm.tqdm(it, total=len(path_feature1)))
def main(): arguments.output.mkdir(exist_ok=True) save_arguments(arguments, arguments.output / 'arguments.json') paths = [Path(p) for p in glob.glob(arguments.input_glob)] pool = multiprocessing.Pool() it = pool.imap(generate_feature, paths) list(tqdm.tqdm(it, total=len(paths)))
def main(): save_arguments(arguments, output / 'arguments.json') # f0 converter if f0_trans_model_dir is not None: model = _get_predictor_model_path(f0_trans_model_dir, f0_trans_model_iteration) f0_converter = AcousticConverter(create_config(f0_trans_config), model, gpu=gpu) elif input_statistics is not None: f0_converter = F0Converter(input_statistics=input_statistics, target_statistics=target_statistics) else: f0_converter = None # acoustic converter config = create_config(voice_changer_config) model = _get_predictor_model_path(voice_changer_model_dir, voice_changer_model_iteration) acoustic_converter = AcousticConverter( config, model, gpu=gpu, f0_converter=f0_converter, out_sampling_rate=arguments.out_sampling_rate, ) print(f'Loaded acoustic converter model "{model}"') # super resolution sr_config = create_sr_config(super_resolution_config) super_resolution = SuperResolution(sr_config, super_resolution_model, gpu=gpu) print(f'Loaded super resolution model "{super_resolution_model}"') # dataset's test if not disable_dataset_test: input_paths = list( sorted( [Path(p) for p in glob.glob(str(config.dataset.input_glob))])) numpy.random.RandomState(config.dataset.seed).shuffle(input_paths) paths_test = input_paths[-config.dataset.num_test:] else: paths_test = [] # test data if test_wave_dir is not None: paths_test += list(test_wave_dir.glob('*.wav')) process_partial = partial(process, acoustic_converter=acoustic_converter, super_resolution=super_resolution) if gpu is None: list(multiprocessing.Pool().map(process_partial, paths_test)) else: list(map(process_partial, paths_test))
def main(): arguments.output.mkdir(exist_ok=True) save_arguments(arguments, arguments.output / 'arguments.json') paths1 = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob1))] paths2 = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob2))] assert len(paths1) == len(paths2) pool = multiprocessing.Pool() it = pool.imap(generate_align_indexes, zip(paths1, paths2)) list(tqdm.tqdm(it, total=len(paths1)))
def main(): arguments.output.mkdir(exist_ok=True) save_arguments(arguments, arguments.output / 'arguments.json') paths1 = {Path(p).stem: Path(p) for p in glob.glob(arguments.input_glob1)} paths2 = {Path(p).stem: Path(p) for p in glob.glob(arguments.input_glob2)} fn_both_list = set(paths1.keys()) & set(paths2.keys()) pool = multiprocessing.Pool() it = pool.imap(generate_align_indexes, ((paths1[fn], paths2[fn]) for fn in fn_both_list)) list(tqdm.tqdm(it, total=len(fn_both_list)))
def main(): arguments.output.mkdir(exist_ok=True) save_arguments(arguments, arguments.output / 'arguments.json') config = create_config(arguments.vc_config) acoustic_converter = AcousticConverter(config, arguments.vc_model, gpu=arguments.gpu) paths = [Path(p) for p in glob.glob(arguments.input_glob)] pool = multiprocessing.Pool() it = pool.imap( partial(convert_feature, acoustic_converter=acoustic_converter), paths) list(tqdm.tqdm(it, total=len(paths)))
def main(): output.mkdir(exist_ok=True) save_arguments(arguments, output / 'arguments.json') paths1 = {Path(p).stem: Path(p) for p in glob.glob(str(input_glob))} paths2 = {Path(p).stem: Path(p) for p in glob.glob(str(target_glob))} paths_index = { Path(p).stem: Path(p) for p in glob.glob(str(aligned_index_glob)) } fn_both_list = set(paths1.keys()) & set(paths2.keys()) & set( paths_index.keys()) pool = multiprocessing.Pool() it = pool.imap(generate_file, ((paths1[fn], paths2[fn], paths_index[fn]) for fn in fn_both_list)) list(tqdm.tqdm(it, total=len(fn_both_list)))
def main(): save_arguments(arguments, output_dir / 'arguments.json') # f0 converter if input_statistics is not None: f0_converter = F0Converter(input_statistics=input_statistics, target_statistics=target_statistics) else: f0_converter = None # acoustic converter config = create_config(config_path) model = _get_predictor_model_path(model_dir, model_iteration) acoustic_converter = AcousticConverter( config, model, gpu=gpu, f0_converter=f0_converter, out_sampling_rate=output_sampling_rate, ) print(f'Loaded acoustic converter model "{model}"') # dataset test if not disable_dataset_test: input_paths = list( sorted( [Path(p) for p in glob.glob(str(config.dataset.input_glob))])) numpy.random.RandomState(config.dataset.seed).shuffle(input_paths) paths_test = input_paths[-config.dataset.num_test:] else: paths_test = [] # additional test if test_wave_dir is not None: paths_test += list(test_wave_dir.glob('*.wav')) process_partial = partial(process, acoustic_converter=acoustic_converter) if gpu is None: list(multiprocessing.Pool().map(process_partial, paths_test)) else: list(map(process_partial, paths_test))