def test_acoustic(basic_corpus_dir, generated_dir): output_directory = os.path.join(generated_dir, 'acoustic') d = Corpus(basic_corpus_dir, output_directory) n = no_dictionary(d, output_directory) d.initialize_corpus(n) assert n.words['should'][0][0] == ('s', 'h', 'o', 'u', 'l', 'd') assert '<vocnoise>' not in n.words assert n.words['here\'s'][0][0] == ('h', 'e', 'r', 'e', 's')
def test_vietnamese(vietnamese_corpus_dir, temp_dir): output_directory = os.path.join(temp_dir, 'vietnamese') d = Corpus(vietnamese_corpus_dir, output_directory) n = no_dictionary(d, output_directory) d.initialize_corpus(n) assert n.words['chăn'][0][0] == ('c', 'h', 'ă', 'n') assert '<vocnoise>' not in n.words assert n.words['tập'][0][0] == ('t', 'ậ', 'p')
def test_acoustic(basic_dir, generated_dir): output_directory = os.path.join(generated_dir, 'acoustic') d = Corpus(basic_dir, output_directory) d.write() d.create_mfccs() n = no_dictionary(d, output_directory) d.setup_splits(n) assert n.words['should'] == [['s', 'h', 'o', 'u', 'l', 'd']] assert '<vocnoise>' not in n.words assert n.words['here\'s'] == [['h', 'e', 'r', 'e', 's']]
def test_vietnamese(textgrid_directory, generated_dir): output_directory = os.path.join(generated_dir, 'vietnamese') d = Corpus(os.path.join(textgrid_directory, 'vietnamese'), output_directory) d.write() d.create_mfccs() n = no_dictionary(d, output_directory) d.setup_splits(n) assert n.words['chăn'] == [['c', 'h', 'ă', 'n']] assert '<vocnoise>' not in n.words assert n.words['tập'] == [['t','ậ','p']]
def align_corpus_no_dict(args): if not args.temp_directory: temp_dir = TEMP_DIR else: temp_dir = os.path.expanduser(args.temp_directory) corpus_name = os.path.basename(args.corpus_directory) data_directory = os.path.join(temp_dir, corpus_name) if args.clean: shutil.rmtree(data_directory, ignore_errors=True) shutil.rmtree(args.output_directory, ignore_errors=True) os.makedirs(data_directory, exist_ok=True) os.makedirs(args.output_directory, exist_ok=True) corpus = Corpus(args.corpus_directory, data_directory, args.speaker_characters, num_jobs=getattr(args, 'num_jobs', 3), debug=getattr(args, 'debug', False), ignore_exceptions=getattr(args, 'ignore_exceptions', False)) print(corpus.speaker_utterance_info()) dictionary = no_dictionary(corpus, data_directory) mono_params = {'align_often': not args.fast} tri_params = {'align_often': not args.fast} tri_fmllr_params = {'align_often': not args.fast} a = TrainableAligner(corpus, dictionary, args.output_directory, temp_directory=data_directory, mono_params=mono_params, tri_params=tri_params, tri_fmllr_params=tri_fmllr_params, num_jobs=args.num_jobs, debug=args.debug, skip_input=getattr(args, 'quiet', False)) a.verbose = args.verbose a.train_mono() a.export_textgrids() a.train_tri() a.export_textgrids() a.train_tri_fmllr() a.export_textgrids() if args.output_model_path is not None: a.save(args.output_model_path)
def align_corpus_no_dict(corpus_dir, output_directory, temp_dir, output_model_path, args): if temp_dir == '': temp_dir = TEMP_DIR else: temp_dir = os.path.expanduser(temp_dir) corpus_name = os.path.basename(corpus_dir) data_directory = os.path.join(temp_dir, corpus_name) if args.clean: shutil.rmtree(data_directory, ignore_errors=True) shutil.rmtree(output_directory, ignore_errors=True) os.makedirs(data_directory, exist_ok=True) os.makedirs(output_directory, exist_ok=True) corpus = Corpus(corpus_dir, data_directory, args.speaker_characters, num_jobs=args.num_jobs) print(corpus.speaker_utterance_info()) dictionary = no_dictionary(corpus, data_directory) dictionary.write() corpus.write() corpus.create_mfccs() corpus.setup_splits(dictionary) mono_params = {'align_often': not args.fast} tri_params = {'align_often': not args.fast} tri_fmllr_params = {'align_often': not args.fast} a = TrainableAligner(corpus, dictionary, output_directory, temp_directory=data_directory, mono_params=mono_params, tri_params=tri_params, tri_fmllr_params=tri_fmllr_params, num_jobs=args.num_jobs) a.verbose = args.verbose a.train_mono() a.export_textgrids() a.train_tri() a.export_textgrids() a.train_tri_fmllr() a.export_textgrids() if output_model_path is not None: a.save(output_model_path)
def align_corpus_no_dict(corpus_dir, output_directory, temp_dir, output_model_path, args): if not temp_dir: temp_dir = TEMP_DIR else: temp_dir = os.path.expanduser(temp_dir) corpus_name = os.path.basename(corpus_dir) data_directory = os.path.join(temp_dir, corpus_name) if args.clean: shutil.rmtree(data_directory, ignore_errors = True) shutil.rmtree(output_directory, ignore_errors = True) os.makedirs(data_directory, exist_ok = True) os.makedirs(output_directory, exist_ok = True) corpus = Corpus(corpus_dir, data_directory, args.speaker_characters, num_jobs = args.num_jobs) print(corpus.speaker_utterance_info()) dictionary = no_dictionary(corpus, data_directory) dictionary.write() corpus.write() corpus.create_mfccs() corpus.setup_splits(dictionary) mono_params = {'align_often': not args.fast} tri_params = {'align_often': not args.fast} tri_fmllr_params = {'align_often': not args.fast} a = TrainableAligner(corpus, dictionary, output_directory, temp_directory = data_directory, mono_params = mono_params, tri_params = tri_params, tri_fmllr_params = tri_fmllr_params, num_jobs = args.num_jobs) a.verbose = args.verbose a.train_mono() a.export_textgrids() a.train_tri() a.export_textgrids() a.train_tri_fmllr() a.export_textgrids() if output_model_path is not None: a.save(output_model_path)