def decode_score_test(prob, trans, outDir): trans.save(os.path.join(outDir, "ref.txt")) hmmFile = f"{args.expDir}/train_sat/final.mdl" HCLGFile = f"{args.expDir}/train_sat/graph/HCLG.{args.order}.fst" lexicons = exkaldi.load_lex(f"{args.expDir}/dict/lexicons.lex") phoneMap = exkaldi.load_list_table( f"{args.expDir}/dict/phones.48_to_39.map") #print("Decoding...") lat = exkaldi.decode.wfst.nn_decode( prob=prob, hmm=hmmFile, HCLGFile=HCLGFile, symbolTable=lexicons("words"), beam=args.beam, latBeam=args.latBeam, acwt=args.acwt, minActive=200, maxActive=7000, ) #print("Score...") minWER = None for penalty in [0., 0.5, 1.0]: for LMWT in range(1, 15, 1): newLat = lat.add_penalty(penalty) result = newLat.get_1best(lexicons("phones"), hmmFile, lmwt=LMWT, acwt=1, phoneLevel=True) result = exkaldi.hmm.transcription_from_int( result, lexicons("phones")) result = result.convert(phoneMap) fileName = f"{outDir}/penalty_{penalty}_lmwt_{LMWT}.txt" result.save(fileName) score = exkaldi.decode.score.wer(ref=trans, hyp=result, mode="present") if minWER == None or score.WER < minWER[0]: minWER = (score.WER, fileName) #print(f"{penalty} {LMWT}",score) with open(f"{outDir}/best_PER", "w") as fw: fw.write(f"{minWER[0]}% {minWER[1]}") return minWER[0]
def prepare_DNN_data(): print("Start to prepare data for DNN training") assert os.path.isdir(f"{args.expDir}/train_sat" ), "Please run previous programs up to SAT training." # Lexicons and Gmm-Hmm model lexicons = exkaldi.load_lex(f"{args.expDir}/dict/lexicons.lex") hmm = f"{args.expDir}/train_sat/final.mdl" tree = f"{args.expDir}/train_sat/tree" for Name in ["train", "dev", "test"]: exkaldi.utils.make_dependent_dirs( f"{args.expDir}/train_dnn/data/{Name}", pathIsFile=False) # Make LDA feature print(f"Make LDA feature for '{Name}'") feat = exkaldi.load_feat(f"{args.expDir}/mfcc/{Name}/mfcc_cmvn.ark") feat = feat.splice(left=args.LDAsplice, right=args.LDAsplice) feat = exkaldi.transform_feat( feat, matFile=f"{args.expDir}/train_lda_mllt/trans.mat") # Compile the aligning graph print(f"Compile aligning graph") transInt = exkaldi.hmm.transcription_to_int( transcription=f"{args.expDir}/data/{Name}/text", symbolTable=lexicons("words"), unkSymbol=lexicons("oov"), ) graphFile = exkaldi.decode.wfst.compile_align_graph( hmm, tree, transcription=transInt, LFile=f"{args.expDir}/dict/L.fst", outFile=f"{args.expDir}/train_dnn/data/{Name}/align_graph", lexicons=lexicons, ) # Align first time print(f"Align the first time") ali = exkaldi.decode.wfst.gmm_align( hmm, feat, alignGraphFile=graphFile, lexicons=lexicons, ) # Estimate transform matrix print(f"Estimate fMLLR transform matrix") fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix( aliOrLat=ali, lexicons=lexicons, aliHmm=hmm, feat=feat, spk2utt=f"{args.expDir}/data/{Name}/spk2utt", ) fmllrTransMat.save(f"{args.expDir}/train_dnn/data/{Name}/trans.ark") # Transform feature print(f"Transform feature") feat = exkaldi.use_fmllr( feat, fmllrTransMat, utt2spk=f"{args.expDir}/data/{Name}/utt2spk", ) # Align second time with new feature print(f"Align the second time") ali = exkaldi.decode.wfst.gmm_align( hmm, feat, alignGraphFile=graphFile, lexicons=lexicons, ) # Save alignment and feature print(f"Save final fmllr feature and alignment") feat.save(f"{args.expDir}/train_dnn/data/{Name}/fmllr.ark") ali.save(f"{args.expDir}/train_dnn/data/{Name}/ali") # Transform alignment print(f"Generate pdf ID and phone ID alignment") ali.to_numpy( aliType="pdfID", hmm=hmm).save(f"{args.expDir}/train_dnn/data/{Name}/pdfID.npy") ali.to_numpy( aliType="phoneID", hmm=hmm).save(f"{args.expDir}/train_dnn/data/{Name}/phoneID.npy") del ali # Compute cmvn for fmllr feature print(f"Compute the CMVN for fmllr feature") cmvn = exkaldi.compute_cmvn_stats( feat, spk2utt=f"{args.expDir}/data/{Name}/spk2utt") cmvn.save(f"{args.expDir}/train_dnn/data/{Name}/cmvn_of_fmllr.ark") del cmvn del feat # copy spk2utt utt2spk and text file shutil.copyfile(f"{args.expDir}/data/{Name}/spk2utt", f"{args.expDir}/train_dnn/data/{Name}/spk2utt") shutil.copyfile(f"{args.expDir}/data/{Name}/utt2spk", f"{args.expDir}/train_dnn/data/{Name}/utt2spk") shutil.copyfile(f"{args.expDir}/data/{Name}/text", f"{args.expDir}/train_dnn/data/{Name}/text") transInt.save(f"{args.expDir}/data/{Name}/text.int") print("Write feature and alignment dim information") dims = exkaldi.ListTable() feat = exkaldi.load_feat(f"{args.expDir}/train_dnn/data/test/fmllr.ark") dims["fmllr"] = feat.dim del feat hmm = exkaldi.hmm.load_hmm(f"{args.expDir}/train_sat/final.mdl") dims["phones"] = hmm.info.phones + 1 dims["pdfs"] = hmm.info.pdfs del hmm dims.save(f"{args.expDir}/train_dnn/data/dims")
def prepare_LSTM_data(): print("Start to prepare data for LSTM training") declare.is_dir(f"{args.expDir}/train_dnn/prob", debug="Please run previous programs up to DNN training.") # Lexicons and Gmm-Hmm model lexicons = exkaldi.load_lex( f"{args.expDir}/dict/lexicons.lex" ) hmm = f"{args.expDir}/train_sat/final.mdl" tree = f"{args.expDir}/train_sat/tree" for Name in ["train", "dev", "test"]: exkaldi.utils.make_dependent_dirs(f"{args.expDir}/train_lstm/data/{Name}", pathIsFile=False) # Load feature print(f"Make LDA feature for '{Name}'") feat = exkaldi.load_feat( f"{args.expDir}/mfcc/{Name}/mfcc_cmvn.ark" ) feat = feat.splice(left=args.LDAsplice, right=args.LDAsplice) feat = exkaldi.transform_feat(feat, matFile=f"{args.expDir}/train_lda_mllt/trans.mat" ) # Load probability for aligning( File has a large size, so we use index table. ) prob = exkaldi.load_index_table( f"{args.expDir}/train_dnn/prob/{Name}.ark" ) # Compile a aligning graph print(f"Copy aligning graph from DNN resources") shutil.copyfile( f"{args.expDir}/train_dnn/data/{Name}/align_graph", f"{args.expDir}/train_lstm/data/{Name}/align_graph" ) # Align print("Align") ali = exkaldi.decode.wfst.nn_align( hmm, prob, alignGraphFile=f"{args.expDir}/train_lstm/data/{Name}/align_graph", lexicons=lexicons, outFile=f"{args.expDir}/train_lstm/data/{Name}/ali", ) # Estimate transform matrix print("Estimate transform matrix") fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix( aliOrLat=ali, lexicons=lexicons, aliHmm=hmm, feat=feat, spk2utt=f"{args.expDir}/data/{Name}/spk2utt", outFile=f"{args.expDir}/train_lstm/data/{Name}/trans.ark", ) # Transform feature print("Transform matrix") feat = exkaldi.use_fmllr( feat, fmllrTransMat, utt2spk=f"{args.expDir}/data/{Name}/utt2spk", outFile=f"{args.expDir}/train_lstm/data/{Name}/fmllr.ark", ) # Transform alignment (Because 'ali' is a index table object, we need fetch the alignment data in order to use the 'to_numpy' method.) ali = ali.fetch(arkType="ali") ali.to_numpy(aliType="pdfID",hmm=hmm).save( f"{args.expDir}/train_lstm/data/{Name}/pdfID.npy" ) ali.to_numpy(aliType="phoneID",hmm=hmm).save( f"{args.expDir}/train_lstm/data/{Name}/phoneID.npy" ) del ali # Compute cmvn for fmllr feature cmvn = exkaldi.compute_cmvn_stats( feat, spk2utt=f"{args.expDir}/data/{Name}/spk2utt", outFile=f"{args.expDir}/train_lstm/data/{Name}/cmvn_of_fmllr.ark", ) del cmvn del feat # copy spk2utt utt2spk and text file shutil.copyfile( f"{args.expDir}/data/{Name}/spk2utt", f"{args.expDir}/train_lstm/data/{Name}/spk2utt") shutil.copyfile( f"{args.expDir}/data/{Name}/utt2spk", f"{args.expDir}/train_lstm/data/{Name}/utt2spk") shutil.copyfile( f"{args.expDir}/data/{Name}/text", f"{args.expDir}/train_lstm/data/{Name}/text" ) print("Write feature and alignment dim information") dims = exkaldi.ListTable() feat = exkaldi.load_feat( f"{args.expDir}/train_lstm/data/test/fmllr.ark" ) dims["fmllr"] = feat.dim del feat hmm = exkaldi.hmm.load_hmm( f"{args.expDir}/train_sat/final.mdl" ) dims["phones"] = hmm.info.phones + 1 dims["pdfs"] = hmm.info.pdfs del hmm dims.save( f"{args.expDir}/train_lstm/data/dims" )
def main(): # ------------- Parse arguments from command line ---------------------- # 1. Add a discription of this program args.discribe("This program is used to train triphone GMM-HMM model") # 2. Add options args.add("--expDir", abbr="-e", dtype=str, default="exp", discription="The data and output path of current experiment.") args.add("--splice", abbr="-c", dtype=int, default=3, discription="How many left-right frames to splice.") args.add("--numIters", abbr="-n", dtype=int, default=35, discription="How many iterations to train.") args.add("--maxIterInc", abbr="-m", dtype=int, default=25, discription="The final iteration of increasing gaussians.") args.add("--realignIter", abbr="-r", dtype=int, default=[10, 20, 30], discription="The iteration to realign feature.") args.add("--fmllrIter", abbr="-f", dtype=int, default=[2, 4, 6, 12], discription="The iteration to estimate fmllr matrix.") args.add("--order", abbr="-o", dtype=int, default=6, discription="Which N-grams model to use.") args.add("--beam", abbr="-b", dtype=int, default=13, discription="Decode beam size.") args.add("--latBeam", abbr="-l", dtype=int, default=6, discription="Lattice beam size.") args.add("--acwt", abbr="-a", dtype=float, default=0.083333, discription="Acoustic model weight.") args.add( "--parallel", abbr="-p", dtype=int, default=4, minV=1, maxV=10, discription= "The number of parallel process to compute feature of train dataset.") args.add("--skipTrain", abbr="-s", dtype=bool, default=False, discription="If True, skip training. Do decoding only.") # 3. Then start to parse arguments. args.parse() # 4. Take a backup of arguments argsLogFile = os.path.join(args.expDir, "conf", "train_sat.args") args.save(argsLogFile) if not args.skipTrain: # ------------- Prepare feature and previous alignment for training ---------------------- # 1. Load the feature for training print(f"Load MFCC+CMVN feature.") feat = exkaldi.load_index_table( os.path.join(args.expDir, "mfcc", "train", "mfcc_cmvn.ark")) print(f"Splice {args.splice} frames.") originalFeat = exkaldi.splice_feature(feat, left=args.splice, right=args.splice, outFile=os.path.join( args.expDir, "train_delta", "mfcc_cmvn_splice.ark")) print(f"Transform LDA feature") ldaFeat = exkaldi.transform_feat( feat=originalFeat, matFile=os.path.join(args.expDir, "train_lda_mllt", "trans.mat"), outFile=os.path.join(args.expDir, "train_sat", "lda_feat.ark"), ) del originalFeat # 2. Load previous alignment and lexicons ali = exkaldi.load_index_table(os.path.join(args.expDir, "train_lda_mllt", "*final.ali"), useSuffix="ark") lexicons = exkaldi.load_lex( os.path.join(args.expDir, "dict", "lexicons.lex")) # 3. Estimate the primary fMLLR transform matrix print("Estiminate the primary fMLLR transform matrixs") fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix( aliOrLat=ali, lexicons=lexicons, aliHmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"), feat=ldaFeat, spk2utt=os.path.join(args.expDir, "data", "train", "spk2utt"), outFile=os.path.join(args.expDir, "train_sat", "trans.ark"), ) print("Transform feature") fmllrFeat = exkaldi.use_fmllr( ldaFeat, fmllrTransMat, utt2spk=os.path.join("exp", "data", "train", "utt2spk"), outFile=os.path.join(args.expDir, "train_sat", "fmllr_feat.ark"), ) # -------------- Build the decision tree ------------------------ print("Start build a tree") tree = exkaldi.hmm.DecisionTree(lexicons=lexicons, contextWidth=3, centralPosition=1) tree.train( feat=fmllrFeat, hmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"), ali=ali, topoFile=os.path.join(args.expDir, "dict", "topo"), numLeaves=2500, tempDir=os.path.join(args.expDir, "train_sat"), ) tree.save(os.path.join(args.expDir, "train_sat", "tree")) print(f"Build tree done.") del fmllrFeat # ------------- Start training ---------------------- # 1. Initialize a monophone HMM object print("Initialize a triphone HMM object") model = exkaldi.hmm.TriphoneHMM(lexicons=lexicons) model.initialize( tree=tree, topoFile=os.path.join(args.expDir, "dict", "topo"), treeStatsFile=os.path.join(args.expDir, "train_sat", "treeStats.acc"), ) print(f"Initialized a monophone HMM-GMM model: {model.info}.") # 2. convert the previous alignment print(f"Transform the alignment") newAli = exkaldi.hmm.convert_alignment( ali=ali, originHmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"), targetHmm=model, tree=tree, outFile=os.path.join(args.expDir, "train_sat", "initial.ali"), ) # 2. Split data for parallel training transcription = exkaldi.load_transcription( os.path.join(args.expDir, "data", "train", "text")) transcription = transcription.sort() if args.parallel > 1: # split feature ldaFeat = ldaFeat.sort(by="utt").subset(chunks=args.parallel) # split transcription depending on utterance IDs of each feat tempTrans = [] tempAli = [] tempFmllrMat = [] for f in ldaFeat: tempTrans.append(transcription.subset(keys=f.utts)) tempAli.append(newAli.subset(keys=f.utts)) spks = exkaldi.utt_to_spk(f.utts, utt2spk=os.path.join( args.expDir, "data", "train", "utt2spk")) tempFmllrMat.append(fmllrTransMat.subset(keys=spks)) transcription = tempTrans newAli = tempAli fmllrTransMat = tempFmllrMat # 3. Train print("Train the triphone model") model.train( ldaFeat, transcription, os.path.join(args.expDir, "dict", "L.fst"), tree, tempDir=os.path.join(args.expDir, "train_sat"), initialAli=newAli, fmllrTransMat=fmllrTransMat, spk2utt=os.path.join(args.expDir, "data", "train", "spk2utt"), utt2spk=os.path.join(args.expDir, "data", "train", "utt2spk"), numIters=args.numIters, maxIterInc=args.maxIterInc, totgauss=15000, realignIter=args.realignIter, fmllrIter=args.fmllrIter, boostSilence=1.0, power=0.2, fmllrSilWt=0.0, ) print(model.info) del ldaFeat del fmllrTransMat del newAli else: declare.is_file(os.path.join(args.expDir, "train_sat", "final.mdl")) declare.is_file(os.path.join(args.expDir, "train_sat", "tree")) model = exkaldi.load_hmm( os.path.join(args.expDir, "train_sat", "final.mdl")) tree = exkaldi.load_tree(os.path.join(args.expDir, "train_sat", "tree")) # ------------- Compile WFST training ---------------------- # Make a WFST decoding graph make_WFST_graph( outDir=os.path.join(args.expDir, "train_sat", "graph"), hmm=model, tree=tree, ) # Decode test data GMM_decode_fmllr_and_score( outDir=os.path.join(args.expDir, "train_sat", f"decode_{args.order}grams"), hmm=model, HCLGfile=os.path.join(args.expDir, "train_sat", "graph", f"HCLG.{args.order}.fst"), tansformMatFile=os.path.join(args.expDir, "train_lda_mllt", "trans.mat"), )
def main(): # ------------- Parse arguments from command line ---------------------- # 1. Add a discription of this program args.describe("This program is used to train monophone GMM-HMM model") # 2. Add options args.add("--expDir", abbr="-e", dtype=str, default="exp", discription="The data and output path of current experiment.") args.add("--delta", abbr="-d", dtype=int, default=2, discription="Add n-order to feature.") args.add("--numIters", abbr="-n", dtype=int, default=40, discription="How many iterations to train.") args.add("--maxIterInc", abbr="-m", dtype=int, default=30, discription="The final iteration of increasing gaussians.") args.add("--realignIter", abbr="-r", dtype=int, default=[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38 ], discription="the iteration to realign feature.") args.add("--order", abbr="-o", dtype=int, default=6, minV=1, maxV=6, discription="Which N-grams model to use.") args.add("--beam", abbr="-b", dtype=int, default=13, discription="Decode beam size.") args.add("--latBeam", abbr="-l", dtype=int, default=6, discription="Lattice beam size.") args.add("--acwt", abbr="-a", dtype=float, default=0.083333, discription="Acoustic model weight.") args.add( "--parallel", abbr="-p", dtype=int, default=4, minV=1, maxV=10, discription= "The number of parallel process to compute feature of train dataset.") args.add("--skipTrain", abbr="-s", dtype=bool, default=False, discription="If True, skip training. Do decoding only.") # 3. Then start to parse arguments. args.parse() # 4. Take a backup of arguments args.print_args() # print arguments to display argsLogFile = os.path.join(args.expDir, "conf", "train_mono.args") args.save(argsLogFile) if not args.skipTrain: # ------------- Prepare feature for training ---------------------- # 1. Load the feature for training (We use the index table format) feat = exkaldi.load_index_table( os.path.join(args.expDir, "mfcc", "train", "mfcc_cmvn.ark")) print(f"Load MFCC+CMVN feature.") feat = exkaldi.add_delta(feat, order=args.delta, outFile=os.path.join(args.expDir, "train_mono", "mfcc_cmvn_delta.ark")) print(f"Add {args.delta}-order deltas.") # 2. Load lexicon bank lexicons = exkaldi.load_lex( os.path.join(args.expDir, "dict", "lexicons.lex")) print(f"Restorage lexicon bank.") # ------------- Start training ---------------------- # 1. Initialize a monophone HMM object model = exkaldi.hmm.MonophoneHMM(lexicons=lexicons, name="mono") model.initialize(feat=feat, topoFile=os.path.join(args.expDir, "dict", "topo")) print(f"Initialized a monophone HMM-GMM model: {model.info}.") # 2. Split data for parallel training transcription = exkaldi.load_transcription( os.path.join(args.expDir, "data", "train", "text")) transcription = transcription.sort() if args.parallel > 1: # split feature feat = feat.sort(by="utt").subset(chunks=args.parallel) # split transcription depending on utterance IDs of each feature temp = [] for f in feat: temp.append(transcription.subset(keys=f.utts)) transcription = temp # 3. Train model.train( feat, transcription, LFile=os.path.join(args.expDir, "dict", "L.fst"), tempDir=os.path.join(args.expDir, "train_mono"), numIters=args.numIters, maxIterInc=args.maxIterInc, totgauss=1000, realignIter=args.realignIter, boostSilence=1.0, ) print(model.info) # Save the tree model.tree.save(os.path.join(args.expDir, "train_mono", "tree")) print(f"Tree has been saved.") # 4. Realign with boostSilence 1.25 print("Realign the training feature (boost silence = 1.25)") trainGraphFiles = exkaldi.utils.list_files( os.path.join(args.expDir, "train_mono", "*train_graph")) model.align( feat, trainGraphFile= trainGraphFiles, # train graphs have been generated in the train step. boostSilence=1.25, #1.5 outFile=os.path.join(args.expDir, "train_mono", "final.ali")) del feat print("Save the new alignment done.") tree = model.tree else: declare.is_file(os.path.join(args.expDir, "train_mono", "final.mdl")) declare.is_file(os.path.join(args.expDir, "train_mono", "tree")) model = exkaldi.load_hmm( os.path.join(args.expDir, "train_mono", "final.mdl")) tree = exkaldi.load_tree( os.path.join(args.expDir, "train_mono", "tree")) # ------------- Compile WFST training ---------------------- # Make a WFST decoding graph make_WFST_graph( outDir=os.path.join(args.expDir, "train_mono", "graph"), hmm=model, tree=tree, ) # Decode test data GMM_decode_mfcc_and_score( outDir=os.path.join(args.expDir, "train_mono", f"decode_{args.order}grams"), hmm=model, HCLGfile=os.path.join(args.expDir, "train_mono", "graph", f"HCLG.{args.order}.fst"), )
def main(): # ------------- Parse arguments from command line ---------------------- # 1. Add a discription of this program args.discribe("This program is used to train triphone GMM-HMM model") # 2. Add options args.add("--expDir", abbr="-e", dtype=str, default="exp", discription="The data and output path of current experiment.") args.add("--delta", abbr="-d", dtype=int, default=2, discription="Add n-order to feature.") args.add("--numIters", abbr="-n", dtype=int, default=35, discription="How many iterations to train.") args.add("--maxIterInc", abbr="-m", dtype=int, default=25, discription="The final iteration of increasing gaussians.") args.add("--realignIter", abbr="-r", dtype=int, default=[10, 20, 30], discription="the iteration to realign feature.") args.add("--order", abbr="-o", dtype=int, default=6, discription="Which N-grams model to use.") args.add("--beam", abbr="-b", dtype=int, default=13, discription="Decode beam size.") args.add("--latBeam", abbr="-l", dtype=int, default=6, discription="Lattice beam size.") args.add("--acwt", abbr="-a", dtype=float, default=0.083333, discription="Acoustic model weight.") args.add( "--parallel", abbr="-p", dtype=int, default=4, minV=1, maxV=10, discription= "The number of parallel process to compute feature of train dataset.") args.add("--skipTrain", abbr="-s", dtype=bool, default=False, discription="If True, skip training. Do decoding only.") # 3. Then start to parse arguments. args.parse() # 4. Take a backup of arguments argsLogFile = os.path.join(args.expDir, "conf", "train_delta.args") args.save(argsLogFile) if not args.skipTrain: # ------------- Prepare feature and previous alignment for training ---------------------- # 1. Load the feature for training feat = exkaldi.load_index_table( os.path.join(args.expDir, "mfcc", "train", "mfcc_cmvn.ark")) print(f"Load MFCC+CMVN feature.") feat = exkaldi.add_delta(feat, order=args.delta, outFile=os.path.join(args.expDir, "train_delta", "mfcc_cmvn_delta.ark")) print(f"Add {args.delta}-order deltas.") # 2. Load lexicon bank lexicons = exkaldi.load_lex( os.path.join(args.expDir, "dict", "lexicons.lex")) print(f"Restorage lexicon bank.") # 3. Load previous alignment ali = exkaldi.load_index_table(os.path.join(args.expDir, "train_mono", "*final.ali"), useSuffix="ark") # -------------- Build the decision tree ------------------------ print("Start build a tree") tree = exkaldi.hmm.DecisionTree(lexicons=lexicons, contextWidth=3, centralPosition=1) tree.train( feat=feat, hmm=os.path.join(args.expDir, "train_mono", "final.mdl"), ali=ali, topoFile=os.path.join(args.expDir, "dict", "topo"), numLeaves=2500, tempDir=os.path.join(args.expDir, "train_delta"), ) print(f"Build tree done.") # ------------- Start training ---------------------- # 1. Initialize a monophone HMM object model = exkaldi.hmm.TriphoneHMM(lexicons=lexicons, name="mono") model.initialize( tree=tree, topoFile=os.path.join(args.expDir, "dict", "topo"), treeStatsFile=os.path.join(args.expDir, "train_delta", "treeStats.acc"), ) print(f"Initialized a monophone HMM-GMM model: {model.info}.") # 2. convert the previous alignment print(f"Transform the alignment") newAli = exkaldi.hmm.convert_alignment( ali=ali, originHmm=os.path.join("exp", "train_mono", "final.mdl"), targetHmm=model, tree=tree, outFile=os.path.join(args.expDir, "train_delta", "initial.ali"), ) # 2. Split data for parallel training transcription = exkaldi.load_transcription( os.path.join(args.expDir, "data", "train", "text")) transcription = transcription.sort() if args.parallel > 1: # split feature feat = feat.sort(by="utt").subset(chunks=args.parallel) # split transcription depending on utterance IDs of each feat tempTrans = [] tempAli = [] for f in feat: tempTrans.append(transcription.subset(keys=f.utts)) tempAli.append(newAli.subset(keys=f.utts)) transcription = tempTrans newAli = tempAli # 3. Train print("Train the triphone model") model.train( feat, transcription, os.path.join("exp", "dict", "L.fst"), tree, tempDir=os.path.join(args.expDir, "train_delta"), initialAli=newAli, numIters=args.numIters, maxIterInc=args.maxIterInc, totgauss=15000, realignIter=args.realignIter, boostSilence=1.0, ) print(model.info) # Save the tree model.tree.save(os.path.join(args.expDir, "train_delta", "tree")) print(f"Tree has been saved.") del feat else: declare.is_file(os.path.join(args.expDir, "train_delta", "final.mdl")) declare.is_file(os.path.join(args.expDir, "train_delta", "tree")) model = exkaldi.load_hmm( os.path.join(args.expDir, "train_delta", "final.mdl")) tree = exkaldi.load_tree( os.path.join(args.expDir, "train_delta", "tree")) # ------------- Compile WFST training ---------------------- # Make a WFST decoding graph make_WFST_graph( outDir=os.path.join(args.expDir, "train_delta", "graph"), hmm=model, tree=tree, ) # Decode test data GMM_decode_mfcc_and_score( outDir=os.path.join(args.expDir, "train_delta", f"decode_{args.order}grams"), hmm=model, HCLGfile=os.path.join(args.expDir, "train_delta", "graph", f"HCLG.{args.order}.fst"), )
def main(): # 1, Parse command line options. args.add("--order", abbr="-o", dtype=int, default=[2, 3, 4, 5, 6], minV=1, maxV=6, discription="The language model order.") args.add("--dataDir", abbr="-d", dtype=str, default="./exp", discription="The resource directory.") args.add("--exp", abbr="-e", dtype=str, default="./exp_lms", discription="Experiment output directory.") args.parse() # 2, Prepare text and lexicon. declare.is_file( f"./{args.dataDir}/data/train/text", debug= "There is not train text file avaliable. Please run '01_prepare_data.py' to generate it." ) declare.is_file( f"./{args.dataDir}/data/test/text", debug= "There is not train text file avaliable. Please run '01_prepare_data.py' to generate it." ) declare.is_file( f"./{args.dataDir}/dict/lexicons.lex", debug= "There is not lexicon file avaliable. Please run '02_make_dict_and_LM.py' to generate it." ) trainTrans = exkaldi.load_transcription( f"./{args.dataDir}/data/train/text") lexicons = exkaldi.load_lex(f"./{args.dataDir}/dict/lexicons.lex") testTrans = exkaldi.load_transcription(f"./{args.dataDir}/data/test/text") # 3, Train LMs and compute perplexities. for o in args.order: for backend in ["sri", "ken"]: if backend == "sri": exkaldi.lm.train_ngrams_srilm( lexicons, order=o, text= trainTrans, # If "text" received an exkaldi Transcription object, the information of utterance IDs will be omitted automatically. outFile=os.path.join(args.exp, f"{backend}_{o}grams.arpa"), config={"-wbdiscount": True}, ) else: exkaldi.lm.train_ngrams_kenlm( lexicons, order=o, text= trainTrans, # If "text" received an exkaldi Transcription object, the information of utterance IDs will be omitted automatically. outFile=os.path.join(args.exp, f"{backend}_{o}grams.arpa"), config={ "--discount_fallback": True, "-S": "20%" }, ) exkaldi.lm.arpa_to_binary( arpaFile=os.path.join(args.exp, f"{backend}_{o}grams.arpa"), outFile=os.path.join(args.exp, f"{backend}_{o}grams.binary"), ) model = exkaldi.load_ngrams( os.path.join(args.exp, f"{backend}_{o}grams.binary")) perScore = model.perplexity(testTrans) print(f"{o} {backend} score:", perScore)