def test_specaugment_2d_input_raises_error(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") feats = torch.from_numpy(cuts[0].load_features()) tfnm = SpecAugment(p=1.0, time_warp_factor=10) with pytest.raises(AssertionError): augmented = tfnm(feats) assert (feats != augmented).any()
def libri_cut_set(): cuts = CutSet.from_json("test/fixtures/libri/cuts.json") return CutSet.from_cuts([ cuts[0], cuts[0].with_id("copy-1"), cuts[0].with_id("copy-2"), cuts[0].append(cuts[0]), ])
def libri_cut_set(): cs = CutSet.from_json('test/fixtures/libri/cuts.json') return CutSet.from_cuts([ cs[0], cs[0].with_id('copy-1'), cs[0].with_id('copy-2'), cs[0].append(cs[0]) ])
def test_collate_audio_padding(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") assert len(set(cut.num_samples for cut in cuts)) > 1 correct_pad = max(cut.num_samples for cut in cuts) audio, audio_lens = collate_audio(cuts) assert audio.shape[-1] == correct_pad assert max(audio_lens).item() == correct_pad
def test_collate_feature_padding(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") assert len(set(cut.num_frames for cut in cuts)) > 1 correct_pad = max(cut.num_frames for cut in cuts) features, features_lens = collate_features(cuts) assert features.shape[1] == correct_pad assert max(features_lens).item() == correct_pad
def test_collate_audio_padding_fault_tolerant_return_vals(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") assert len(set(cut.num_samples for cut in cuts)) > 1 correct_pad = max(cut.num_samples for cut in cuts) audio, audio_lens, cuts_ok = collate_audio(cuts, fault_tolerant=True) assert len(cuts) == len(cuts_ok) assert audio.shape[-1] == correct_pad assert max(audio_lens).item() == correct_pad
def test_specaugment_batch(num_feature_masks, num_frame_masks): cuts = CutSet.from_json('test/fixtures/ljspeech/cuts.json') feats, feat_lens = collate_features(cuts) tfnm = SpecAugment(p=1.0, time_warp_factor=10, features_mask_size=5, frames_mask_size=20, num_feature_masks=num_feature_masks, num_frame_masks=num_frame_masks) augmented = tfnm(feats) assert (feats != augmented).any()
def test_cut_set_serialization(cut_set, format, compressed): with NamedTemporaryFile(suffix=".gz" if compressed else "") as f: if format == "yaml": cut_set.to_yaml(f.name) restored = CutSet.from_yaml(f.name) if format == "json": cut_set.to_json(f.name) restored = CutSet.from_json(f.name) if format == "jsonl": cut_set.to_jsonl(f.name) restored = CutSet.from_jsonl(f.name) assert cut_set == restored
def test_cut_set_serialization(cut_set, format, compressed): with NamedTemporaryFile(suffix='.gz' if compressed else '') as f: if format == 'yaml': cut_set.to_yaml(f.name) restored = CutSet.from_yaml(f.name) if format == 'json': cut_set.to_json(f.name) restored = CutSet.from_json(f.name) if format == 'jsonl': cut_set.to_jsonl(f.name) restored = CutSet.from_jsonl(f.name) assert cut_set == restored
def test_collate_custom_numbers(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") expected_snrs = [] for cut in cuts: expected_snrs.append(random.random() * 20) cut.snr = expected_snrs[-1] snrs = collate_custom_field(cuts, "snr") assert isinstance(snrs, torch.Tensor) assert snrs.dtype == torch.float32 assert snrs.shape == (len(cuts), ) for idx, snr in enumerate(expected_snrs): assert isclose(snrs[idx], snr, abs_tol=1e-5)
def test_collate_custom_temporal_array_ints(pad_value): CODEBOOK_SIZE = 512 FRAME_SHIFT = 0.04 cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") max_num_frames = max( seconds_to_frames(cut.duration, FRAME_SHIFT) for cut in cuts) with NamedTemporaryFile(suffix=".h5") as f, NumpyHdf5Writer( f.name) as writer: expected_codebook_indices = [] for cut in cuts: expected_codebook_indices.append( np.random.randint(CODEBOOK_SIZE, size=(seconds_to_frames( cut.duration, FRAME_SHIFT), )).astype(np.int16)) cut.codebook_indices = writer.store_array( cut.id, expected_codebook_indices[-1], frame_shift=FRAME_SHIFT, temporal_dim=0, ) codebook_indices, codebook_indices_lens = collate_custom_field( cuts, "codebook_indices", pad_value=pad_value) assert isinstance(codebook_indices_lens, torch.Tensor) assert codebook_indices_lens.dtype == torch.int32 assert codebook_indices_lens.shape == (len(cuts), ) assert codebook_indices_lens.tolist() == [ seconds_to_frames(c.duration, FRAME_SHIFT) for c in cuts ] assert isinstance(codebook_indices, torch.Tensor) assert codebook_indices.dtype == torch.int16 assert codebook_indices.shape == (len(cuts), max_num_frames) for idx, cbidxs in enumerate(expected_codebook_indices): exp_len = cbidxs.shape[0] # PyTorch < 1.9.0 doesn't have an assert_equal function. np.testing.assert_equal(codebook_indices[idx, :exp_len].numpy(), cbidxs) expected_pad_value = 0 if pad_value is None else pad_value np.testing.assert_equal(codebook_indices[idx, exp_len:].numpy(), expected_pad_value)
def test_collate_custom_array(): EMBEDDING_SIZE = 300 cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") with NamedTemporaryFile(suffix=".h5") as f, NumpyHdf5Writer( f.name) as writer: expected_xvectors = [] for cut in cuts: expected_xvectors.append( np.random.randn(EMBEDDING_SIZE).astype(np.float32)) cut.xvector = writer.store_array(cut.id, expected_xvectors[-1]) xvectors = collate_custom_field(cuts, "xvector") assert isinstance(xvectors, torch.Tensor) assert xvectors.dtype == torch.float32 assert xvectors.shape == (len(cuts), EMBEDDING_SIZE) for idx, xvec in enumerate(expected_xvectors): torch.testing.assert_allclose(xvectors[idx], xvec)
def test_collate_custom_temporal_array_floats(pad_value): VOCAB_SIZE = 500 cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") max_num_frames = max(cut.num_frames for cut in cuts) with NamedTemporaryFile(suffix=".h5") as f, NumpyHdf5Writer( f.name) as writer: expected_posteriors = [] for cut in cuts: expected_posteriors.append( np.random.randn(cut.num_frames, VOCAB_SIZE).astype(np.float32)) cut.posterior = writer.store_array( cut.id, expected_posteriors[-1], frame_shift=cut.frame_shift, temporal_dim=0, ) posteriors, posterior_lens = collate_custom_field(cuts, "posterior", pad_value=pad_value) assert isinstance(posterior_lens, torch.Tensor) assert posterior_lens.dtype == torch.int32 assert posterior_lens.shape == (len(cuts), ) assert posterior_lens.tolist() == [c.num_frames for c in cuts] assert isinstance(posteriors, torch.Tensor) assert posteriors.dtype == torch.float32 assert posteriors.shape == (len(cuts), max_num_frames, VOCAB_SIZE) for idx, post in enumerate(expected_posteriors): exp_len = post.shape[0] torch.testing.assert_allclose(posteriors[idx, :exp_len], post) expected_pad_value = 0 if pad_value is None else pad_value torch.testing.assert_allclose( posteriors[idx, exp_len:], expected_pad_value * torch.ones_like(posteriors[idx, exp_len:]), )
def main(): # load L, G, symbol_table lang_dir = 'data/lang_nosp' with open(lang_dir + '/L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) with open(lang_dir + '/G.fsa.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=True) with open(lang_dir + '/words.txt') as f: symbol_table = k2.SymbolTable.from_str(f.read()) L = k2.arc_sort(L.invert_()) G = k2.arc_sort(G) graph = k2.intersect(L, G) graph = k2.arc_sort(graph) # load dataset feature_dir = 'exp/data1' cuts_train = CutSet.from_json(feature_dir + '/cuts_train-clean-100.json.gz') cuts_dev = CutSet.from_json(feature_dir + '/cuts_dev-clean.json.gz') train = K2SpeechRecognitionIterableDataset(cuts_train, shuffle=True) validate = K2SpeechRecognitionIterableDataset(cuts_dev, shuffle=False) train_dl = torch.utils.data.DataLoader(train, batch_size=None, num_workers=1) valid_dl = torch.utils.data.DataLoader(validate, batch_size=None, num_workers=1) dir = 'exp' setup_logger('{}/log/log-train'.format(dir)) if not torch.cuda.is_available(): logging.error('No GPU detected!') sys.exit(-1) device_id = 0 device = torch.device('cuda', device_id) model = Wav2Letter(num_classes=364, input_type='mfcc', num_features=40) model.to(device) learning_rate = 0.001 start_epoch = 0 num_epochs = 10 best_objf = 100000 best_epoch = start_epoch best_model_path = os.path.join(dir, 'best_model.pt') best_epoch_info_filename = os.path.join(dir, 'best-epoch-info') optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4) # optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) for epoch in range(start_epoch, num_epochs): curr_learning_rate = learning_rate * pow(0.4, epoch) for param_group in optimizer.param_groups: param_group['lr'] = curr_learning_rate logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf = train_one_epoch(dataloader=train_dl, valid_dataloader=valid_dl, model=model, device=device, graph=graph, symbols=symbol_table, optimizer=optimizer, current_epoch=epoch, num_epochs=num_epochs) if objf < best_objf: best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=best_objf, best_objf=best_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) epoch_info_filename = os.path.join(dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, best_epoch=best_epoch) logging.warning('Done')
def main(): exp_dir = Path('exp-lstm-adam') setup_logger('{}/log/log-decode'.format(exp_dir), log_level='debug') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') ctc_topo = build_ctc_topo(list(phone_symbol_table._id2sym.keys())) ctc_topo = k2.arc_sort(ctc_topo) if not os.path.exists(lang_dir / 'LG.pt'): print("Loading L_disambig.fst.txt") with open(lang_dir / 'L_disambig.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) print("Loading G.fsa.txt") with open(lang_dir / 'G.fsa.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=True) first_phone_disambig_id = find_first_disambig_symbol( phone_symbol_table) first_word_disambig_id = find_first_disambig_symbol(symbol_table) LG = compile_LG(L=L, G=G, ctc_topo=ctc_topo, labels_disambig_id_start=first_phone_disambig_id, aux_labels_disambig_id_start=first_word_disambig_id) torch.save(LG.as_dict(), lang_dir / 'LG.pt') else: print("Loading pre-compiled LG") d = torch.load(lang_dir / 'LG.pt') LG = k2.Fsa.from_dict(d) # load dataset feature_dir = Path('exp/data') print("About to get test cuts") cuts_test = CutSet.from_json(feature_dir / 'cuts_test.json.gz') print("About to create test dataset") test = K2SpeechRecognitionIterableDataset(cuts_test, max_frames=100000, shuffle=False, concat_cuts=False) print("About to create test dataloader") test_dl = torch.utils.data.DataLoader(test, batch_size=None, num_workers=1) # if not torch.cuda.is_available(): # logging.error('No GPU detected!') # sys.exit(-1) print("About to load model") # Note: Use "export CUDA_VISIBLE_DEVICES=N" to setup device id to N # device = torch.device('cuda', 1) device = torch.device('cuda') model = TdnnLstm1b(num_features=40, num_classes=len(phone_symbol_table)) checkpoint = os.path.join(exp_dir, 'epoch-9.pt') load_checkpoint(checkpoint, model) model.to(device) model.eval() print("convert LG to device") LG = LG.to(device) LG.aux_labels = k2.ragged.remove_values_eq(LG.aux_labels, 0) LG.requires_grad_(False) print("About to decode") results = decode(dataloader=test_dl, model=model, device=device, LG=LG, symbols=symbol_table) s = '' results2 = [] for ref, hyp in results: s += f'ref={ref}\n' s += f'hyp={hyp}\n' results2.append((list(''.join(ref)), list(''.join(hyp)))) logging.info(s) # compute WER dists = [edit_distance(r, h) for r, h in results] dists2 = [edit_distance(r, h) for r, h in results2] errors = { key: sum(dist[key] for dist in dists) for key in ['sub', 'ins', 'del', 'total'] } errors2 = { key: sum(dist[key] for dist in dists2) for key in ['sub', 'ins', 'del', 'total'] } total_words = sum(len(ref) for ref, _ in results) total_chars = sum(len(ref) for ref, _ in results2) # Print Kaldi-like message: # %WER 8.20 [ 4459 / 54402, 695 ins, 427 del, 3337 sub ] logging.info( f'%WER {errors["total"] / total_words:.2%} ' f'[{errors["total"]} / {total_words}, {errors["ins"]} ins, {errors["del"]} del, {errors["sub"]} sub ]' ) logging.info( f'%WER {errors2["total"] / total_chars:.2%} ' f'[{errors2["total"]} / {total_chars}, {errors2["ins"]} ins, {errors2["del"]} del, {errors2["sub"]} sub ]' )
def main(): args = get_parser().parse_args() print('World size:', args.world_size, 'Rank:', args.local_rank) setup_dist(rank=args.local_rank, world_size=args.world_size) fix_random_seed(42) start_epoch = 0 num_epochs = 10 use_adam = True exp_dir = f'exp-lstm-adam-mmi-bigram-musan-dist' setup_logger('{}/log/log-train'.format(exp_dir), use_console=args.local_rank == 0) tb_writer = SummaryWriter( log_dir=f'{exp_dir}/tensorboard') if args.local_rank == 0 else None # load L, G, symbol_table lang_dir = Path('data/lang_nosp') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') word_symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') logging.info("Loading L.fst") if (lang_dir / 'Linv.pt').exists(): L_inv = k2.Fsa.from_dict(torch.load(lang_dir / 'Linv.pt')) else: with open(lang_dir / 'L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) L_inv = k2.arc_sort(L.invert_()) torch.save(L_inv.as_dict(), lang_dir / 'Linv.pt') graph_compiler = MmiTrainingGraphCompiler(L_inv=L_inv, phones=phone_symbol_table, words=word_symbol_table) phone_ids = get_phone_symbols(phone_symbol_table) P = create_bigram_phone_lm(phone_ids) P.scores = torch.zeros_like(P.scores) # load dataset feature_dir = Path('exp/data') logging.info("About to get train cuts") cuts_train = CutSet.from_json(feature_dir / 'cuts_train-clean-100.json.gz') logging.info("About to get dev cuts") cuts_dev = CutSet.from_json(feature_dir / 'cuts_dev-clean.json.gz') logging.info("About to get Musan cuts") cuts_musan = CutSet.from_json(feature_dir / 'cuts_musan.json.gz') logging.info("About to create train dataset") transforms = [CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20))] if not args.bucketing_sampler: # We don't mix concatenating the cuts and bucketing # Here we insert concatenation before mixing so that the # noises from Musan are mixed onto almost-zero-energy # padding frames. transforms = [CutConcatenate()] + transforms train = K2SpeechRecognitionDataset(cuts_train, cut_transforms=transforms) if args.bucketing_sampler: logging.info('Using BucketingSampler.') train_sampler = BucketingSampler(cuts_train, max_frames=40000, shuffle=True, num_buckets=30) else: logging.info('Using regular sampler with cut concatenation.') train_sampler = SingleCutSampler( cuts_train, max_frames=30000, shuffle=True, ) logging.info("About to create train dataloader") train_dl = torch.utils.data.DataLoader(train, sampler=train_sampler, batch_size=None, num_workers=4) logging.info("About to create dev dataset") validate = K2SpeechRecognitionDataset(cuts_dev) # Note: we explicitly set world_size to 1 to disable the auto-detection of # distributed training inside the sampler. This way, every GPU will # perform the computation on the full dev set. It is a bit wasteful, # but unfortunately loss aggregation between multiple processes with # torch.distributed.all_reduce() tends to hang indefinitely inside # NCCL after ~3000 steps. With the current approach, we can still report # the loss on the full validation set. valid_sampler = SingleCutSampler(cuts_dev, max_frames=90000, world_size=1, rank=0) logging.info("About to create dev dataloader") valid_dl = torch.utils.data.DataLoader(validate, sampler=valid_sampler, batch_size=None, num_workers=1) if not torch.cuda.is_available(): logging.error('No GPU detected!') sys.exit(-1) logging.info("About to create model") device_id = args.local_rank device = torch.device('cuda', device_id) model = TdnnLstm1b( num_features=40, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=3) model.P_scores = nn.Parameter(P.scores.clone(), requires_grad=True) model.to(device) describe(model) if use_adam: learning_rate = 1e-3 weight_decay = 5e-4 optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay) # Equivalent to the following in the epoch loop: # if epoch > 6: # curr_learning_rate *= 0.8 lr_scheduler = optim.lr_scheduler.LambdaLR( optimizer, lambda ep: 1.0 if ep < 7 else 0.8**(ep - 6)) else: learning_rate = 5e-5 weight_decay = 1e-5 momentum = 0.9 lr_schedule_gamma = 0.7 optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) lr_scheduler = optim.lr_scheduler.ExponentialLR( optimizer=optimizer, gamma=lr_schedule_gamma) best_objf = np.inf best_valid_objf = np.inf best_epoch = start_epoch best_model_path = os.path.join(exp_dir, 'best_model.pt') best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info') global_batch_idx_train = 0 # for logging only if start_epoch > 0: model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1)) ckpt = load_checkpoint(filename=model_path, model=model, optimizer=optimizer, scheduler=lr_scheduler) best_objf = ckpt['objf'] best_valid_objf = ckpt['valid_objf'] global_batch_idx_train = ckpt['global_batch_idx_train'] logging.info( f"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}" ) if args.world_size > 1: logging.info( 'Using DistributedDataParallel in training. ' 'The reported loss, num_frames, etc. for training steps include ' 'only the batches seen in the master process (the actual loss ' 'includes batches from all GPUs, and the actual num_frames is ' f'approx. {args.world_size}x larger.') # For now do not sync BatchNorm across GPUs due to NCCL hanging in all_gather... # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank) for epoch in range(start_epoch, num_epochs): train_sampler.set_epoch(epoch) # LR scheduler can hold multiple learning rates for multiple parameter groups; # For now we report just the first LR which we assume concerns most of the parameters. curr_learning_rate = lr_scheduler.get_last_lr()[0] if tb_writer is not None: tb_writer.add_scalar('train/learning_rate', curr_learning_rate, global_batch_idx_train) tb_writer.add_scalar('train/epoch', epoch, global_batch_idx_train) logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf, valid_objf, global_batch_idx_train = train_one_epoch( dataloader=train_dl, valid_dataloader=valid_dl, model=model, P=P, device=device, graph_compiler=graph_compiler, optimizer=optimizer, current_epoch=epoch, tb_writer=tb_writer, num_epochs=num_epochs, global_batch_idx_train=global_batch_idx_train, ) lr_scheduler.step() # the lower, the better if valid_objf < best_valid_objf: best_valid_objf = valid_objf best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, optimizer=None, scheduler=None, epoch=epoch, learning_rate=curr_learning_rate, objf=objf, local_rank=args.local_rank, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, optimizer=optimizer, scheduler=lr_scheduler, epoch=epoch, learning_rate=curr_learning_rate, objf=objf, local_rank=args.local_rank, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) logging.warning('Done') cleanup_dist()
def main(): # load L, G, symbol_table lang_dir = 'data/lang_nosp' with open(lang_dir + '/words.txt') as f: symbol_table = k2.SymbolTable.from_str(f.read()) ## This commented code created LG. We don't need that there. ## There were problems with disambiguation symbols; the G has ## disambiguation symbols which L.fst doesn't support. # if not os.path.exists(lang_dir + '/LG.pt'): # print("Loading L.fst.txt") # with open(lang_dir + '/L.fst.txt') as f: # L = k2.Fsa.from_openfst(f.read(), acceptor=False) # print("Loading G.fsa.txt") # with open(lang_dir + '/G.fsa.txt') as f: # G = k2.Fsa.from_openfst(f.read(), acceptor=True) # print("Arc-sorting L...") # L = k2.arc_sort(L.invert_()) # G = k2.arc_sort(G) # print(k2.is_arc_sorted(k2.get_properties(L))) # print(k2.is_arc_sorted(k2.get_properties(G))) # print("Intersecting L and G") # graph = k2.intersect(L, G) # graph = k2.arc_sort(graph) # print(k2.is_arc_sorted(k2.get_properties(graph))) # torch.save(graph.as_dict(), lang_dir + '/LG.pt') # else: # d = torch.load(lang_dir + '/LG.pt') # print("Loading pre-prepared LG") # graph = k2.Fsa.from_dict(d) print("Loading L.fst.txt") with open(lang_dir + '/L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) L = k2.arc_sort(L.invert_()) # load dataset feature_dir = 'exp/data1' print("About to get train cuts") #cuts_train = CutSet.from_json(feature_dir + # '/cuts_train-clean-100.json.gz') cuts_train = CutSet.from_json(feature_dir + '/cuts_dev-clean.json.gz') print("About to get dev cuts") cuts_dev = CutSet.from_json(feature_dir + '/cuts_dev-clean.json.gz') print("About to create train dataset") train = K2SpeechRecognitionIterableDataset(cuts_train, max_frames=1000, shuffle=True) print("About to create dev dataset") validate = K2SpeechRecognitionIterableDataset(cuts_dev, max_frames=1000, shuffle=False) print("About to create train dataloader") train_dl = torch.utils.data.DataLoader(train, batch_size=None, num_workers=1) print("About to create dev dataloader") valid_dl = torch.utils.data.DataLoader(validate, batch_size=None, num_workers=1) exp_dir = 'exp' setup_logger('{}/log/log-train'.format(exp_dir)) if not torch.cuda.is_available(): logging.error('No GPU detected!') sys.exit(-1) print("About to create model") device_id = 0 device = torch.device('cuda', device_id) model = Wav2Letter(num_classes=364, input_type='mfcc', num_features=40) model.to(device) learning_rate = 0.001 start_epoch = 0 num_epochs = 10 best_objf = 100000 best_epoch = start_epoch best_model_path = os.path.join(exp_dir, 'best_model.pt') best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info') optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4) # optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) for epoch in range(start_epoch, num_epochs): curr_learning_rate = learning_rate * pow(0.4, epoch) for param_group in optimizer.param_groups: param_group['lr'] = curr_learning_rate logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf = train_one_epoch(dataloader=train_dl, valid_dataloader=valid_dl, model=model, device=device, L=L, symbols=symbol_table, optimizer=optimizer, current_epoch=epoch, num_epochs=num_epochs) if objf < best_objf: best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=best_objf, best_objf=best_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, best_epoch=best_epoch) logging.warning('Done')
def test_collate_custom_attribute_missing(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") with pytest.raises(AttributeError): collate_custom_field(cuts, "nonexistent_attribute")
def test_collate_custom_temporal_array_ints(pad_direction): CODEBOOK_SIZE = 512 FRAME_SHIFT = 0.04 EXPECTED_PAD_VALUE = 0 cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") max_num_frames = max( seconds_to_frames(cut.duration, FRAME_SHIFT) for cut in cuts) with NamedTemporaryFile(suffix=".h5") as f, NumpyHdf5Writer( f.name) as writer: expected_codebook_indices = [] for cut in cuts: expected_codebook_indices.append( np.random.randint(CODEBOOK_SIZE, size=(seconds_to_frames( cut.duration, FRAME_SHIFT), )).astype(np.int16)) cut.codebook_indices = writer.store_array( cut.id, expected_codebook_indices[-1], frame_shift=FRAME_SHIFT, temporal_dim=0, ) codebook_indices, codebook_indices_lens = collate_custom_field( cuts, "codebook_indices", pad_direction=pad_direction) assert isinstance(codebook_indices_lens, torch.Tensor) assert codebook_indices_lens.dtype == torch.int32 assert codebook_indices_lens.shape == (len(cuts), ) assert codebook_indices_lens.tolist() == [ seconds_to_frames(c.duration, FRAME_SHIFT) for c in cuts ] assert isinstance(codebook_indices, torch.Tensor) assert (codebook_indices.dtype == torch.int64 ) # the dtype got promoted by default assert codebook_indices.shape == (len(cuts), max_num_frames) for idx, cbidxs in enumerate(expected_codebook_indices): exp_len = cbidxs.shape[0] # PyTorch < 1.9.0 doesn't have an assert_equal function. if pad_direction == "right": np.testing.assert_equal( codebook_indices[idx, :exp_len].numpy(), cbidxs) np.testing.assert_equal( codebook_indices[idx, exp_len:].numpy(), EXPECTED_PAD_VALUE) if pad_direction == "left": np.testing.assert_equal( codebook_indices[idx, -exp_len:].numpy(), cbidxs) np.testing.assert_equal( codebook_indices[idx, :-exp_len].numpy(), EXPECTED_PAD_VALUE) if pad_direction == "both": half = (max_num_frames - exp_len) // 2 np.testing.assert_equal(codebook_indices[idx, :half].numpy(), EXPECTED_PAD_VALUE) np.testing.assert_equal( codebook_indices[idx, half:half + exp_len].numpy(), cbidxs) if half > 0: # indexing like [idx, -0:] would return the whole array rather # than an empty slice. np.testing.assert_equal( codebook_indices[idx, -half:].numpy(), EXPECTED_PAD_VALUE)
def cut_set(): return CutSet.from_json('test/fixtures/ljspeech/cuts.json')
def global_mvn(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") return GlobalMVN.from_cuts(cuts)
def main(): args = get_parser().parse_args() epoch = args.epoch max_frames = args.max_frames avg = args.avg att_rate = args.att_rate # exp_dir = Path('/export/gpudisk2/data/hegc/audio_workspace/snowfall_aishell1/exp-transformer-noam-mmi-att-musan') exp_dir = Path('exp-transformer-noam-mmi-att-musan') setup_logger('{}/log/log-decode'.format(exp_dir), log_level='debug') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') phone_ids = get_phone_symbols(phone_symbol_table) P = create_bigram_phone_lm(phone_ids) phone_ids_with_blank = [0] + phone_ids ctc_topo = k2.arc_sort(build_ctc_topo(phone_ids_with_blank)) logging.debug("About to load model") # Note: Use "export CUDA_VISIBLE_DEVICES=N" to setup device id to N # device = torch.device('cuda', 1) device = torch.device('cuda') if att_rate != 0.0: num_decoder_layers = 6 else: num_decoder_layers = 0 model = Transformer( num_features=40, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=4, num_decoder_layers=num_decoder_layers) model.P_scores = torch.nn.Parameter(P.scores.clone(), requires_grad=False) if avg == 1: checkpoint = os.path.join(exp_dir, 'epoch-' + str(epoch - 1) + '.pt') load_checkpoint(checkpoint, model) else: checkpoints = [os.path.join(exp_dir, 'epoch-' + str(avg_epoch) + '.pt') for avg_epoch in range(epoch - avg, epoch)] average_checkpoint(checkpoints, model) model.to(device) model.eval() assert P.requires_grad is False P.scores = model.P_scores.cpu() print_transition_probabilities(P, phone_symbol_table, phone_ids, filename='model_P_scores.txt') P.set_scores_stochastic_(model.P_scores) print_transition_probabilities(P, phone_symbol_table, phone_ids, filename='P_scores.txt') if not os.path.exists(lang_dir / 'LG.pt'): logging.debug("Loading L_disambig.fst.txt") with open(lang_dir / 'L_disambig.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) logging.debug("Loading G.fst.txt") with open(lang_dir / 'G.fst.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=False) first_phone_disambig_id = find_first_disambig_symbol(phone_symbol_table) first_word_disambig_id = find_first_disambig_symbol(symbol_table) LG = compile_LG(L=L, G=G, ctc_topo=ctc_topo, labels_disambig_id_start=first_phone_disambig_id, aux_labels_disambig_id_start=first_word_disambig_id) torch.save(LG.as_dict(), lang_dir / 'LG.pt') else: logging.debug("Loading pre-compiled LG") d = torch.load(lang_dir / 'LG.pt') LG = k2.Fsa.from_dict(d) # load dataset # feature_dir = Path('/export/gpudisk2/data/hegc/audio_workspace/snowfall_aishell1/exp/data') feature_dir = Path('exp/data') logging.debug("About to get test cuts") cuts_test = CutSet.from_json(feature_dir / 'cuts_test.json.gz') logging.debug("About to create test dataset") test = K2SpeechRecognitionDataset(cuts_test) sampler = SingleCutSampler(cuts_test, max_frames=max_frames) logging.debug("About to create test dataloader") test_dl = torch.utils.data.DataLoader(test, batch_size=None, sampler=sampler, num_workers=1) # if not torch.cuda.is_available(): # logging.error('No GPU detected!') # sys.exit(-1) logging.debug("convert LG to device") LG = LG.to(device) LG.aux_labels = k2.ragged.remove_values_eq(LG.aux_labels, 0) LG.requires_grad_(False) logging.debug("About to decode") results = decode(dataloader=test_dl, model=model, device=device, LG=LG, symbols=symbol_table) s = '' results2 = [] for ref, hyp in results: s += f'ref={ref}\n' s += f'hyp={hyp}\n' results2.append((list(''.join(ref)), list(''.join(hyp)))) logging.info(s) # compute WER dists = [edit_distance(r, h) for r, h in results] dists2 = [edit_distance(r, h) for r, h in results2] errors = { key: sum(dist[key] for dist in dists) for key in ['sub', 'ins', 'del', 'total'] } errors2 = { key: sum(dist[key] for dist in dists2) for key in ['sub', 'ins', 'del', 'total'] } total_words = sum(len(ref) for ref, _ in results) total_chars = sum(len(ref) for ref, _ in results2) # Print Kaldi-like message: # %WER 8.20 [ 4459 / 54402, 695 ins, 427 del, 3337 sub ] logging.info( f'%WER {errors["total"] / total_words:.2%} ' f'[{errors["total"]} / {total_words}, {errors["ins"]} ins, {errors["del"]} del, {errors["sub"]} sub ]' ) logging.info( f'%WER {errors2["total"] / total_chars:.2%} ' f'[{errors2["total"]} / {total_chars}, {errors2["ins"]} ins, {errors2["del"]} del, {errors2["sub"]} sub ]' )
def main(): exp_dir = Path('exp-lstm-adam-mmi-mbr-musan') setup_logger('{}/log/log-decode'.format(exp_dir), log_level='debug') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') phone_ids = get_phone_symbols(phone_symbol_table) P = create_bigram_phone_lm(phone_ids) phone_ids_with_blank = [0] + phone_ids ctc_topo = k2.arc_sort(build_ctc_topo(phone_ids_with_blank)) logging.debug("About to load model") # Note: Use "export CUDA_VISIBLE_DEVICES=N" to setup device id to N # device = torch.device('cuda', 1) device = torch.device('cuda') model = TdnnLstm1b( num_features=40, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=3) model.P_scores = torch.nn.Parameter(P.scores.clone(), requires_grad=False) checkpoint = os.path.join(exp_dir, 'epoch-9.pt') load_checkpoint(checkpoint, model) model.to(device) model.eval() assert P.requires_grad is False P.scores = model.P_scores.cpu() print_transition_probabilities(P, phone_symbol_table, phone_ids, filename='model_P_scores.txt') P.set_scores_stochastic_(model.P_scores) print_transition_probabilities(P, phone_symbol_table, phone_ids, filename='P_scores.txt') if not os.path.exists(lang_dir / 'HLG.pt'): logging.debug("Loading L_disambig.fst.txt") with open(lang_dir / 'L_disambig.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) logging.debug("Loading G.fst.txt") with open(lang_dir / 'G.fst.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=False) first_phone_disambig_id = find_first_disambig_symbol( phone_symbol_table) first_word_disambig_id = find_first_disambig_symbol(symbol_table) HLG = compile_HLG(L=L, G=G, H=ctc_topo, labels_disambig_id_start=first_phone_disambig_id, aux_labels_disambig_id_start=first_word_disambig_id) torch.save(HLG.as_dict(), lang_dir / 'HLG.pt') else: logging.debug("Loading pre-compiled HLG") d = torch.load(lang_dir / 'HLG.pt') HLG = k2.Fsa.from_dict(d) # load dataset feature_dir = Path('exp/data') logging.debug("About to get test cuts") cuts_test = CutSet.from_json(feature_dir / 'cuts_test-clean.json.gz') logging.info("About to create test dataset") test = K2SpeechRecognitionDataset(cuts_test) sampler = SingleCutSampler(cuts_test, max_frames=100000) logging.info("About to create test dataloader") test_dl = torch.utils.data.DataLoader(test, batch_size=None, sampler=sampler, num_workers=1) # if not torch.cuda.is_available(): # logging.error('No GPU detected!') # sys.exit(-1) logging.debug("convert HLG to device") HLG = HLG.to(device) HLG.aux_labels = k2.ragged.remove_values_eq(HLG.aux_labels, 0) HLG.requires_grad_(False) logging.debug("About to decode") results = decode(dataloader=test_dl, model=model, device=device, HLG=HLG, symbols=symbol_table) s = '' for ref, hyp in results: s += f'ref={ref}\n' s += f'hyp={hyp}\n' logging.info(s) # compute WER dists = [edit_distance(r, h) for r, h in results] errors = { key: sum(dist[key] for dist in dists) for key in ['sub', 'ins', 'del', 'total'] } total_words = sum(len(ref) for ref, _ in results) # Print Kaldi-like message: # %WER 8.20 [ 4459 / 54402, 695 ins, 427 del, 3337 sub ] logging.info( f'%WER {errors["total"] / total_words:.2%} ' f'[{errors["total"]} / {total_words}, {errors["ins"]} ins, {errors["del"]} del, {errors["sub"]} sub ]' )
def main(): fix_random_seed(42) start_epoch = 0 num_epochs = 8 exp_dir = 'exp-lstm-adam-ctc-musan' setup_logger('{}/log/log-train'.format(exp_dir)) tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') word_symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') logging.info("Loading L.fst") if (lang_dir / 'Linv.pt').exists(): L_inv = k2.Fsa.from_dict(torch.load(lang_dir / 'Linv.pt')) else: with open(lang_dir / 'L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) L_inv = k2.arc_sort(L.invert_()) torch.save(L_inv.as_dict(), lang_dir / 'Linv.pt') graph_compiler = CtcTrainingGraphCompiler( L_inv=L_inv, phones=phone_symbol_table, words=word_symbol_table ) phone_ids = get_phone_symbols(phone_symbol_table) # load dataset feature_dir = Path('exp/data') logging.info("About to get train cuts") cuts_train = CutSet.from_json(feature_dir / 'cuts_train-clean-100.json.gz') logging.info("About to get dev cuts") cuts_dev = CutSet.from_json(feature_dir / 'cuts_dev-clean.json.gz') logging.info("About to get Musan cuts") cuts_musan = CutSet.from_json(feature_dir / 'cuts_musan.json.gz') logging.info("About to create train dataset") train = K2SpeechRecognitionDataset( cuts_train, cut_transforms=[ CutConcatenate(), CutMix( cuts=cuts_musan, prob=0.5, snr=(10, 20) ) ] ) train_sampler = SingleCutSampler( cuts_train, max_frames=90000, shuffle=True, ) logging.info("About to create train dataloader") train_dl = torch.utils.data.DataLoader( train, sampler=train_sampler, batch_size=None, num_workers=4 ) logging.info("About to create dev dataset") validate = K2SpeechRecognitionDataset(cuts_dev) valid_sampler = SingleCutSampler(cuts_dev, max_frames=90000) logging.info("About to create dev dataloader") valid_dl = torch.utils.data.DataLoader( validate, sampler=valid_sampler, batch_size=None, num_workers=1 ) if not torch.cuda.is_available(): logging.error('No GPU detected!') sys.exit(-1) logging.info("About to create model") device_id = 0 device = torch.device('cuda', device_id) model = TdnnLstm1b( num_features=40, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=3) model.to(device) describe(model) learning_rate = 1e-3 optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=5e-4) best_objf = np.inf best_valid_objf = np.inf best_epoch = start_epoch best_model_path = os.path.join(exp_dir, 'best_model.pt') best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info') global_batch_idx_train = 0 # for logging only if start_epoch > 0: model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1)) ckpt = load_checkpoint(filename=model_path, model=model, optimizer=optimizer) best_objf = ckpt['objf'] best_valid_objf = ckpt['valid_objf'] global_batch_idx_train = ckpt['global_batch_idx_train'] logging.info(f"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}") for epoch in range(start_epoch, num_epochs): train_sampler.set_epoch(epoch) curr_learning_rate = 1e-3 # curr_learning_rate = learning_rate * pow(0.4, epoch) # for param_group in optimizer.param_groups: # param_group['lr'] = curr_learning_rate tb_writer.add_scalar('learning_rate', curr_learning_rate, epoch) logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf, valid_objf, global_batch_idx_train = train_one_epoch(dataloader=train_dl, valid_dataloader=valid_dl, model=model, device=device, graph_compiler=graph_compiler, optimizer=optimizer, current_epoch=epoch, tb_writer=tb_writer, num_epochs=num_epochs, global_batch_idx_train=global_batch_idx_train) # the lower, the better if valid_objf < best_valid_objf: best_valid_objf = valid_objf best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, epoch=epoch, optimizer=None, scheduler=None, learning_rate=curr_learning_rate, objf=objf, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=best_objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, optimizer=optimizer, scheduler=None, epoch=epoch, learning_rate=curr_learning_rate, objf=objf, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) logging.warning('Done')
def test_global_mvn_from_cuts(): cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json") stats1 = GlobalMVN.from_cuts(cuts) stats2 = GlobalMVN.from_cuts(cuts, max_cuts=1) assert isinstance(stats1, GlobalMVN) assert isinstance(stats2, GlobalMVN)
def main(): parser = get_parser() args = parser.parse_args() model_type = args.model_type epoch = args.epoch avg = args.avg att_rate = args.att_rate num_paths = args.num_paths use_lm_rescoring = args.use_lm_rescoring use_whole_lattice = False if use_lm_rescoring and num_paths < 1: # It doesn't make sense to use n-best list for rescoring # when n is less than 1 use_whole_lattice = True output_beam_size = args.output_beam_size exp_dir = Path('exp-' + model_type + '-mmi-att-sa-vgg-normlayer') setup_logger('{}/log/log-decode'.format(exp_dir), log_level='debug') logging.info(f'output_beam_size: {output_beam_size}') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') phone_ids = get_phone_symbols(phone_symbol_table) phone_ids_with_blank = [0] + phone_ids ctc_topo = k2.arc_sort(build_ctc_topo(phone_ids_with_blank)) logging.debug("About to load model") # Note: Use "export CUDA_VISIBLE_DEVICES=N" to setup device id to N # device = torch.device('cuda', 1) device = torch.device('cuda') if att_rate != 0.0: num_decoder_layers = 6 else: num_decoder_layers = 0 if model_type == "transformer": model = Transformer( num_features=40, nhead=args.nhead, d_model=args.attention_dim, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=4, num_decoder_layers=num_decoder_layers, vgg_frontend=args.vgg_fronted) elif model_type == "conformer": model = Conformer( num_features=40, nhead=args.nhead, d_model=args.attention_dim, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=4, num_decoder_layers=num_decoder_layers, vgg_frontend=args.vgg_frontend, is_espnet_structure=args.is_espnet_structure) elif model_type == "contextnet": model = ContextNet(num_features=40, num_classes=len(phone_ids) + 1) # +1 for the blank symbol else: raise NotImplementedError("Model of type " + str(model_type) + " is not implemented") if avg == 1: checkpoint = os.path.join(exp_dir, 'epoch-' + str(epoch - 1) + '.pt') load_checkpoint(checkpoint, model) else: checkpoints = [ os.path.join(exp_dir, 'epoch-' + str(avg_epoch) + '.pt') for avg_epoch in range(epoch - avg, epoch) ] average_checkpoint(checkpoints, model) model.to(device) model.eval() if not os.path.exists(lang_dir / 'HLG.pt'): logging.debug("Loading L_disambig.fst.txt") with open(lang_dir / 'L_disambig.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) logging.debug("Loading G.fst.txt") with open(lang_dir / 'G.fst.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=False) first_phone_disambig_id = find_first_disambig_symbol( phone_symbol_table) first_word_disambig_id = find_first_disambig_symbol(symbol_table) HLG = compile_HLG(L=L, G=G, H=ctc_topo, labels_disambig_id_start=first_phone_disambig_id, aux_labels_disambig_id_start=first_word_disambig_id) torch.save(HLG.as_dict(), lang_dir / 'HLG.pt') else: logging.debug("Loading pre-compiled HLG") d = torch.load(lang_dir / 'HLG.pt') HLG = k2.Fsa.from_dict(d) logging.debug('Decoding without LM rescoring') G = None if num_paths > 1: logging.debug(f'Use n-best list decoding, n is {num_paths}') else: logging.debug('Use 1-best decoding') logging.debug("convert HLG to device") HLG = HLG.to(device) HLG.aux_labels = k2.ragged.remove_values_eq(HLG.aux_labels, 0) HLG.requires_grad_(False) if not hasattr(HLG, 'lm_scores'): HLG.lm_scores = HLG.scores.clone() # load dataset feature_dir = Path('exp/data') logging.info("About to get test cuts") cuts_test = CutSet.from_json(feature_dir / 'cuts_test.json.gz') logging.info("About to create test dataset") test = K2SpeechRecognitionDataset(cuts_test) test_sampler = SingleCutSampler(cuts_test, max_frames=12000) logging.info("About to create test dataloader") test_dl = torch.utils.data.DataLoader(test, sampler=test_sampler, batch_size=None, num_workers=1) logging.info("About to decode") results = decode(dataloader=test_dl, model=model, HLG=HLG, symbols=symbol_table, num_paths=num_paths, G=G, use_whole_lattice=use_whole_lattice, output_beam_size=output_beam_size) s = '' results2 = [] for ref, hyp in results: s += f'ref={ref}\n' s += f'hyp={hyp}\n' results2.append((list(''.join(ref)), list(''.join(hyp)))) logging.info(s) # compute WER dists = [edit_distance(r, h) for r, h in results] dists2 = [edit_distance(r, h) for r, h in results2] errors = { key: sum(dist[key] for dist in dists) for key in ['sub', 'ins', 'del', 'total'] } errors2 = { key: sum(dist[key] for dist in dists2) for key in ['sub', 'ins', 'del', 'total'] } total_words = sum(len(ref) for ref, _ in results) total_chars = sum(len(ref) for ref, _ in results2) # Print Kaldi-like message: # %WER 8.20 [ 4459 / 54402, 695 ins, 427 del, 3337 sub ] logging.info( f'%WER {errors["total"] / total_words:.2%} ' f'[{errors["total"]} / {total_words}, {errors["ins"]} ins, {errors["del"]} del, {errors["sub"]} sub ]' ) logging.info( f'%CER {errors2["total"] / total_chars:.2%} ' f'[{errors2["total"]} / {total_chars}, {errors2["ins"]} ins, {errors2["del"]} del, {errors2["sub"]} sub ]' )
def main(): fix_random_seed(42) exp_dir = 'exp-lstm-adam' setup_logger('{}/log/log-train'.format(exp_dir)) tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard') # load L, G, symbol_table lang_dir = Path('data/lang_nosp') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') word_symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') logging.info("Loading L.fst") if (lang_dir / 'Linv.pt').exists(): L_inv = k2.Fsa.from_dict(torch.load(lang_dir / 'Linv.pt')) else: with open(lang_dir / 'L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) L_inv = k2.arc_sort(L.invert_()) torch.save(L_inv.as_dict(), lang_dir / 'Linv.pt') graph_compiler = CtcTrainingGraphCompiler(L_inv=L_inv, phones=phone_symbol_table, words=word_symbol_table, oov='<SPOKEN_NOISE>') # load dataset feature_dir = Path('exp/data') logging.info("About to get train cuts") cuts_train = CutSet.from_json(feature_dir / 'cuts_train.json.gz') logging.info("About to get dev cuts") cuts_dev = CutSet.from_json(feature_dir / 'cuts_dev.json.gz') logging.info("About to create train dataset") train = K2SpeechRecognitionIterableDataset(cuts_train, max_frames=90000, shuffle=True) logging.info("About to create dev dataset") validate = K2SpeechRecognitionIterableDataset(cuts_dev, max_frames=90000, shuffle=False, concat_cuts=False) logging.info("About to create train dataloader") train_dl = torch.utils.data.DataLoader(train, batch_size=None, num_workers=4) logging.info("About to create dev dataloader") valid_dl = torch.utils.data.DataLoader(validate, batch_size=None, num_workers=1) if not torch.cuda.is_available(): logging.error('No GPU detected!') sys.exit(-1) logging.info("About to create model") device_id = 0 device = torch.device('cuda', device_id) model = TdnnLstm1b(num_features=40, num_classes=len(phone_symbol_table), subsampling_factor=3) learning_rate = 0.00001 start_epoch = 0 num_epochs = 10 best_objf = np.inf best_epoch = start_epoch best_model_path = os.path.join(exp_dir, 'best_model.pt') best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info') global_batch_idx_train = 0 # for logging only global_batch_idx_valid = 0 # for logging only if start_epoch > 0: model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1)) (epoch, learning_rate, objf) = load_checkpoint(filename=model_path, model=model) best_objf = objf logging.info("epoch = {}, objf = {}".format(epoch, objf)) model.to(device) describe(model) # optimizer = optim.SGD(model.parameters(), # lr=learning_rate, # momentum=0.9, # weight_decay=5e-4) optimizer = optim.AdamW( model.parameters(), # lr=learning_rate, weight_decay=5e-4) for epoch in range(start_epoch, num_epochs): curr_learning_rate = 1e-3 # curr_learning_rate = learning_rate * pow(0.4, epoch) # for param_group in optimizer.param_groups: # param_group['lr'] = curr_learning_rate tb_writer.add_scalar('learning_rate', curr_learning_rate, epoch) logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf = train_one_epoch(dataloader=train_dl, valid_dataloader=valid_dl, model=model, device=device, graph_compiler=graph_compiler, optimizer=optimizer, current_epoch=epoch, tb_writer=tb_writer, num_epochs=num_epochs, global_batch_idx_train=global_batch_idx_train, global_batch_idx_valid=global_batch_idx_valid) # the lower, the better if objf < best_objf: best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=best_objf, best_objf=best_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf) epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, best_epoch=best_epoch) logging.warning('Done')
def main(): if not torch.cuda.is_available(): logging.error("No GPU detected!") sys.exit(-1) device_id = 0 device = torch.device("cuda", device_id) # Reserve the GPU with a dummy variable reserve_variable = torch.ones(1).to(device) exp_dir = Path("exp-tl1a-adam-xent") setup_logger("{}/log/log-decode".format(exp_dir), log_level="debug") if not os.path.exists(exp_dir / "HCLG.pt"): logging.info("Preparing decoding graph") # sym_str = """ # <eps> 0 # silence 1 # speech 2 # """ # symbol_table = k2.SymbolTable.from_str(sym_str) HCLG = prepare_decoding_graph( min_silence_duration=0.03, min_speech_duration=0.3, max_speech_duration=10.0, ) # Arc sort the HCLG since it is needed for intersect logging.info("Sorting decoding graph by outgoing arcs") HCLG = k2.arc_sort(HCLG) # HCLG.symbols = symbol_table torch.save(HCLG.as_dict(), exp_dir / "HCLG.pt") else: logging.info("Loading pre-compiled decoding graph") d = torch.load(exp_dir / "HCLG.pt") HCLG = k2.Fsa.from_dict(d) # load dataset feature_dir = Path("exp/data") logging.info("About to get test cuts") cuts_test = CutSet.from_json(feature_dir / "cuts_test.json.gz") logging.info("About to create test dataset") test = K2VadDataset(cuts_test, return_cuts=True) sampler = SingleCutSampler(cuts_test, max_frames=100000) logging.info("About to create test dataloader") test_dl = torch.utils.data.DataLoader(test, batch_size=None, sampler=sampler, num_workers=1) logging.info("About to load model") model = TdnnLstm1a( num_features=80, num_classes=2, # speech/silence subsampling_factor=1, ) checkpoint = os.path.join(exp_dir, "best_model.pt") load_checkpoint(checkpoint, model) model.to(device) model.eval() logging.info("convert decoding graph to device") HCLG = HCLG.to(device) HCLG.requires_grad_(False) logging.info("About to decode") results = decode(dataloader=test_dl, model=model, device=device, HCLG=HCLG) # Compute frame-level accuracy and precision-recall metrics y_true = [] y_pred = [] for result in results: cut, ref, hyp = result y_true.append(ref) y_pred.append(hyp) y_true = torch.cat(y_true, dim=0).numpy() y_pred = torch.cat(y_pred, dim=0).numpy() logging.info("Results: \n{}".format( classification_report(y_true, y_pred, target_names=["silence", "speech"]))) # Create output segments per recording create_and_write_segments( [result[0] for result in results], # cuts [result[2] for result in results], # outputs exp_dir / "segments", # segments file )
def main(): fix_random_seed(42) exp_dir = f'exp-lstm-adam-mmi-mbr-musan' setup_logger('{}/log/log-train'.format(exp_dir)) tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard') if not torch.cuda.is_available(): logging.warn('No GPU detected!') logging.warn('USE CPU (very slow)!') device = torch.device('cpu') else: logging.info('Use GPU') device_id = 0 device = torch.device('cuda', device_id) # load L, G, symbol_table lang_dir = Path('data/lang_nosp') phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt') word_symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt') logging.info("Loading L.fst") if (lang_dir / 'Linv.pt').exists(): logging.info('Loading precompiled L') L_inv = k2.Fsa.from_dict(torch.load(lang_dir / 'Linv.pt')) else: logging.info('Compiling L') with open(lang_dir / 'L.fst.txt') as f: L = k2.Fsa.from_openfst(f.read(), acceptor=False) L_inv = k2.arc_sort(L.invert_()) torch.save(L_inv.as_dict(), lang_dir / 'Linv.pt') logging.info("Loading L_disambig.fst") if (lang_dir / 'L_disambig.pt').exists(): logging.info('Loading precompiled L_disambig') L_disambig = k2.Fsa.from_dict(torch.load(lang_dir / 'L_disambig.pt')) else: logging.info('Compiling L_disambig') with open(lang_dir / 'L_disambig.fst.txt') as f: L_disambig = k2.Fsa.from_openfst(f.read(), acceptor=False) L_disambig = k2.arc_sort(L_disambig) torch.save(L_disambig.as_dict(), lang_dir / 'L_disambig.pt') logging.info("Loading G.fst") if (lang_dir / 'G_uni.pt').exists(): logging.info('Loading precompiled G') G = k2.Fsa.from_dict(torch.load(lang_dir / 'G_uni.pt')) else: logging.info('Compiling G') with open(lang_dir / 'G_uni.fst.txt') as f: G = k2.Fsa.from_openfst(f.read(), acceptor=False) G = k2.arc_sort(G) torch.save(G.as_dict(), lang_dir / 'G_uni.pt') graph_compiler = MmiMbrTrainingGraphCompiler(L_inv=L_inv, L_disambig=L_disambig, G=G, device=device, phones=phone_symbol_table, words=word_symbol_table) phone_ids = get_phone_symbols(phone_symbol_table) P = create_bigram_phone_lm(phone_ids) P.scores = torch.zeros_like(P.scores) # load dataset feature_dir = Path('exp/data') logging.info("About to get train cuts") cuts_train = CutSet.from_json(feature_dir / 'cuts_train-clean-100.json.gz') logging.info("About to get dev cuts") cuts_dev = CutSet.from_json(feature_dir / 'cuts_dev-clean.json.gz') logging.info("About to get Musan cuts") cuts_musan = CutSet.from_json(feature_dir / 'cuts_musan.json.gz') logging.info("About to create train dataset") train = K2SpeechRecognitionIterableDataset(cuts_train, max_frames=30000, shuffle=True, aug_cuts=cuts_musan, aug_prob=0.5, aug_snr=(10, 20)) logging.info("About to create dev dataset") validate = K2SpeechRecognitionIterableDataset(cuts_dev, max_frames=60000, shuffle=False, concat_cuts=False) logging.info("About to create train dataloader") train_dl = torch.utils.data.DataLoader(train, batch_size=None, num_workers=4) logging.info("About to create dev dataloader") valid_dl = torch.utils.data.DataLoader(validate, batch_size=None, num_workers=1) logging.info("About to create model") model = TdnnLstm1b( num_features=40, num_classes=len(phone_ids) + 1, # +1 for the blank symbol subsampling_factor=3) model.P_scores = nn.Parameter(P.scores.clone(), requires_grad=True) start_epoch = 0 num_epochs = 10 best_objf = np.inf best_valid_objf = np.inf best_epoch = start_epoch best_model_path = os.path.join(exp_dir, 'best_model.pt') best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info') global_batch_idx_train = 0 # for logging only use_adam = True if start_epoch > 0: model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1)) ckpt = load_checkpoint(filename=model_path, model=model) best_objf = ckpt['objf'] best_valid_objf = ckpt['valid_objf'] global_batch_idx_train = ckpt['global_batch_idx_train'] logging.info( f"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}" ) model.to(device) describe(model) P = P.to(device) if use_adam: learning_rate = 1e-3 weight_decay = 5e-4 optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay) # Equivalent to the following in the epoch loop: # if epoch > 6: # curr_learning_rate *= 0.8 lr_scheduler = optim.lr_scheduler.LambdaLR( optimizer, lambda ep: 1.0 if ep < 7 else 0.8**(ep - 6)) else: learning_rate = 5e-5 weight_decay = 1e-5 momentum = 0.9 lr_schedule_gamma = 0.7 optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) lr_scheduler = optim.lr_scheduler.ExponentialLR( optimizer=optimizer, gamma=lr_schedule_gamma, last_epoch=start_epoch - 1) for epoch in range(start_epoch, num_epochs): # LR scheduler can hold multiple learning rates for multiple parameter groups; # For now we report just the first LR which we assume concerns most of the parameters. curr_learning_rate = lr_scheduler.get_last_lr()[0] tb_writer.add_scalar('train/learning_rate', curr_learning_rate, global_batch_idx_train) tb_writer.add_scalar('train/epoch', epoch, global_batch_idx_train) logging.info('epoch {}, learning rate {}'.format( epoch, curr_learning_rate)) objf, valid_objf, global_batch_idx_train = train_one_epoch( dataloader=train_dl, valid_dataloader=valid_dl, model=model, P=P, device=device, graph_compiler=graph_compiler, optimizer=optimizer, current_epoch=epoch, tb_writer=tb_writer, num_epochs=num_epochs, global_batch_idx_train=global_batch_idx_train, ) # the lower, the better if valid_objf < best_valid_objf: best_valid_objf = valid_objf best_objf = objf best_epoch = epoch save_checkpoint(filename=best_model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) save_training_info(filename=best_epoch_info_filename, model_path=best_model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) # we always save the model for every epoch model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch)) save_checkpoint(filename=model_path, model=model, epoch=epoch, learning_rate=curr_learning_rate, objf=objf, valid_objf=valid_objf, global_batch_idx_train=global_batch_idx_train) epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch)) save_training_info(filename=epoch_info_filename, model_path=model_path, current_epoch=epoch, learning_rate=curr_learning_rate, objf=objf, best_objf=best_objf, valid_objf=valid_objf, best_valid_objf=best_valid_objf, best_epoch=best_epoch) lr_scheduler.step() logging.warning('Done')
def test_specaugment_single(): cuts = CutSet.from_json('test/fixtures/ljspeech/cuts.json') feats = torch.from_numpy(cuts[0].load_features()) tfnm = SpecAugment(p=1.0, time_warp_factor=10) augmented = tfnm(feats) assert (feats != augmented).any()