Esempio n. 1
0
def prepare_DNN_data():

    print("Start to prepare data for DNN training")
    assert os.path.isdir(f"{args.expDir}/train_sat"
                         ), "Please run previous programs up to SAT training."

    # Lexicons and Gmm-Hmm model
    lexicons = exkaldi.load_lex(f"{args.expDir}/dict/lexicons.lex")
    hmm = f"{args.expDir}/train_sat/final.mdl"
    tree = f"{args.expDir}/train_sat/tree"

    for Name in ["train", "dev", "test"]:

        exkaldi.utils.make_dependent_dirs(
            f"{args.expDir}/train_dnn/data/{Name}", pathIsFile=False)
        # Make LDA feature
        print(f"Make LDA feature for '{Name}'")
        feat = exkaldi.load_feat(f"{args.expDir}/mfcc/{Name}/mfcc_cmvn.ark")
        feat = feat.splice(left=args.LDAsplice, right=args.LDAsplice)
        feat = exkaldi.transform_feat(
            feat, matFile=f"{args.expDir}/train_lda_mllt/trans.mat")
        # Compile the aligning graph
        print(f"Compile aligning graph")
        transInt = exkaldi.hmm.transcription_to_int(
            transcription=f"{args.expDir}/data/{Name}/text",
            symbolTable=lexicons("words"),
            unkSymbol=lexicons("oov"),
        )
        graphFile = exkaldi.decode.wfst.compile_align_graph(
            hmm,
            tree,
            transcription=transInt,
            LFile=f"{args.expDir}/dict/L.fst",
            outFile=f"{args.expDir}/train_dnn/data/{Name}/align_graph",
            lexicons=lexicons,
        )
        # Align first time
        print(f"Align the first time")
        ali = exkaldi.decode.wfst.gmm_align(
            hmm,
            feat,
            alignGraphFile=graphFile,
            lexicons=lexicons,
        )
        # Estimate transform matrix
        print(f"Estimate fMLLR transform matrix")
        fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix(
            aliOrLat=ali,
            lexicons=lexicons,
            aliHmm=hmm,
            feat=feat,
            spk2utt=f"{args.expDir}/data/{Name}/spk2utt",
        )
        fmllrTransMat.save(f"{args.expDir}/train_dnn/data/{Name}/trans.ark")
        # Transform feature
        print(f"Transform feature")
        feat = exkaldi.use_fmllr(
            feat,
            fmllrTransMat,
            utt2spk=f"{args.expDir}/data/{Name}/utt2spk",
        )
        # Align second time with new feature
        print(f"Align the second time")
        ali = exkaldi.decode.wfst.gmm_align(
            hmm,
            feat,
            alignGraphFile=graphFile,
            lexicons=lexicons,
        )
        # Save alignment and feature
        print(f"Save final fmllr feature and alignment")
        feat.save(f"{args.expDir}/train_dnn/data/{Name}/fmllr.ark")
        ali.save(f"{args.expDir}/train_dnn/data/{Name}/ali")
        # Transform alignment
        print(f"Generate pdf ID and phone ID alignment")
        ali.to_numpy(
            aliType="pdfID",
            hmm=hmm).save(f"{args.expDir}/train_dnn/data/{Name}/pdfID.npy")
        ali.to_numpy(
            aliType="phoneID",
            hmm=hmm).save(f"{args.expDir}/train_dnn/data/{Name}/phoneID.npy")
        del ali
        # Compute cmvn for fmllr feature
        print(f"Compute the CMVN for fmllr feature")
        cmvn = exkaldi.compute_cmvn_stats(
            feat, spk2utt=f"{args.expDir}/data/{Name}/spk2utt")
        cmvn.save(f"{args.expDir}/train_dnn/data/{Name}/cmvn_of_fmllr.ark")
        del cmvn
        del feat
        # copy spk2utt utt2spk and text file
        shutil.copyfile(f"{args.expDir}/data/{Name}/spk2utt",
                        f"{args.expDir}/train_dnn/data/{Name}/spk2utt")
        shutil.copyfile(f"{args.expDir}/data/{Name}/utt2spk",
                        f"{args.expDir}/train_dnn/data/{Name}/utt2spk")
        shutil.copyfile(f"{args.expDir}/data/{Name}/text",
                        f"{args.expDir}/train_dnn/data/{Name}/text")
        transInt.save(f"{args.expDir}/data/{Name}/text.int")

    print("Write feature and alignment dim information")
    dims = exkaldi.ListTable()
    feat = exkaldi.load_feat(f"{args.expDir}/train_dnn/data/test/fmllr.ark")
    dims["fmllr"] = feat.dim
    del feat
    hmm = exkaldi.hmm.load_hmm(f"{args.expDir}/train_sat/final.mdl")
    dims["phones"] = hmm.info.phones + 1
    dims["pdfs"] = hmm.info.pdfs
    del hmm
    dims.save(f"{args.expDir}/train_dnn/data/dims")
Esempio n. 2
0
def prepare_LSTM_data():

  print("Start to prepare data for LSTM training")
  declare.is_dir(f"{args.expDir}/train_dnn/prob", debug="Please run previous programs up to DNN training.")

  # Lexicons and Gmm-Hmm model
  lexicons = exkaldi.load_lex( f"{args.expDir}/dict/lexicons.lex" )
  hmm = f"{args.expDir}/train_sat/final.mdl"
  tree = f"{args.expDir}/train_sat/tree"

  for Name in ["train", "dev", "test"]:
    exkaldi.utils.make_dependent_dirs(f"{args.expDir}/train_lstm/data/{Name}", pathIsFile=False)
    # Load feature
    print(f"Make LDA feature for '{Name}'")
    feat = exkaldi.load_feat( f"{args.expDir}/mfcc/{Name}/mfcc_cmvn.ark" )
    feat = feat.splice(left=args.LDAsplice, right=args.LDAsplice)
    feat = exkaldi.transform_feat(feat, matFile=f"{args.expDir}/train_lda_mllt/trans.mat" )
    # Load probability for aligning( File has a large size, so we use index table. )
    prob = exkaldi.load_index_table( f"{args.expDir}/train_dnn/prob/{Name}.ark" )
    # Compile a aligning graph
    print(f"Copy aligning graph from DNN resources")
    shutil.copyfile( f"{args.expDir}/train_dnn/data/{Name}/align_graph",
                    f"{args.expDir}/train_lstm/data/{Name}/align_graph"
                  )
    # Align
    print("Align")
    ali = exkaldi.decode.wfst.nn_align(
                                    hmm,
                                    prob,
                                    alignGraphFile=f"{args.expDir}/train_lstm/data/{Name}/align_graph", 
                                    lexicons=lexicons,
                                    outFile=f"{args.expDir}/train_lstm/data/{Name}/ali",
                                )
    # Estimate transform matrix
    print("Estimate transform matrix")
    fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix(
                                aliOrLat=ali,
                                lexicons=lexicons,
                                aliHmm=hmm,
                                feat=feat,
                                spk2utt=f"{args.expDir}/data/{Name}/spk2utt",
                                outFile=f"{args.expDir}/train_lstm/data/{Name}/trans.ark",
                            )
    # Transform feature
    print("Transform matrix")
    feat = exkaldi.use_fmllr(
                        feat,
                        fmllrTransMat,
                        utt2spk=f"{args.expDir}/data/{Name}/utt2spk",
                        outFile=f"{args.expDir}/train_lstm/data/{Name}/fmllr.ark",
                    )
    # Transform alignment (Because 'ali' is a index table object, we need fetch the alignment data in order to use the 'to_numpy' method.)
    ali = ali.fetch(arkType="ali")
    ali.to_numpy(aliType="pdfID",hmm=hmm).save( f"{args.expDir}/train_lstm/data/{Name}/pdfID.npy" )
    ali.to_numpy(aliType="phoneID",hmm=hmm).save( f"{args.expDir}/train_lstm/data/{Name}/phoneID.npy" )
    del ali
    # Compute cmvn for fmllr feature
    cmvn = exkaldi.compute_cmvn_stats(
                                  feat, 
                                  spk2utt=f"{args.expDir}/data/{Name}/spk2utt",
                                  outFile=f"{args.expDir}/train_lstm/data/{Name}/cmvn_of_fmllr.ark",
                                )
    del cmvn
    del feat
    # copy spk2utt utt2spk and text file
    shutil.copyfile( f"{args.expDir}/data/{Name}/spk2utt", f"{args.expDir}/train_lstm/data/{Name}/spk2utt")
    shutil.copyfile( f"{args.expDir}/data/{Name}/utt2spk", f"{args.expDir}/train_lstm/data/{Name}/utt2spk")
    shutil.copyfile( f"{args.expDir}/data/{Name}/text", f"{args.expDir}/train_lstm/data/{Name}/text" )

  print("Write feature and alignment dim information")
  dims = exkaldi.ListTable()
  feat = exkaldi.load_feat( f"{args.expDir}/train_lstm/data/test/fmllr.ark" ) 
  dims["fmllr"] = feat.dim
  del feat
  hmm = exkaldi.hmm.load_hmm( f"{args.expDir}/train_sat/final.mdl" )
  dims["phones"] = hmm.info.phones + 1
  dims["pdfs"] = hmm.info.pdfs
  del hmm
  dims.save( f"{args.expDir}/train_lstm/data/dims" )
Esempio n. 3
0
def GMM_decode_fmllr_and_score(outDir, hmm, HCLGfile, tansformMatFile=None):

    exkaldi.utils.make_dependent_dirs(outDir, pathIsFile=False)

    lexicons = exkaldi.decode.graph.load_lex(
        os.path.join("exp", "dict", "lexicons.lex"))
    print(f"Load test feature.")
    featFile = os.path.join("exp", "mfcc", "test", "mfcc_cmvn.ark")
    feat = exkaldi.load_feat(featFile)
    if tansformMatFile is None:
        print("Feature type is delta")
        feat = feat.add_delta(order=2)
        print("Add 2-order deltas.")
    else:
        print("Feature type is lda+mllt")
        feat = feat.splice(left=3, right=3)
        feat = exkaldi.transform_feat(feat, tansformMatFile)
        print("Transform feature")

    ## 1. Estimate the primary transform matrix from alignment or lattice.
    ## We estimate it from lattice, so we decode it firstly.
    print("Decode the first time with original feature.")
    preLat = exkaldi.decode.wfst.gmm_decode(
        feat,
        hmm,
        HCLGfile,
        wordSymbolTable=lexicons("words"),
        beam=10,
        latBeam=6,
        acwt=0.083333,
        maxActive=2000,
    )
    preLat.save(os.path.join(outDir, "test_premary.lat"))

    print("Estimate the primary fMLLR transform matrix.")
    preTransMatrix = exkaldi.hmm.estimate_fMLLR_matrix(
        aliOrLat=preLat,
        lexicons=lexicons,
        aliHmm=hmm,
        feat=feat,
        adaHmm=None,
        silenceWeight=0.01,
        acwt=0.083333,
        spk2utt=os.path.join("exp", "data", "test", "spk2utt"),
    )
    del preLat
    ## 2. Transform feature. We will use new feature to estimate the secondary transform matrix from lattice.
    print("Transform feature with primary matrix.")
    fmllrFeat = exkaldi.use_fmllr(
        feat,
        preTransMatrix,
        utt2spkFile=os.path.join("exp", "data", "test", "utt2spk"),
    )
    print("Decode the second time with primary fmllr feature.")
    secLat = exkaldi.decode.wfst.gmm_decode(
        fmllrFeat,
        hmm,
        HCLGfile,
        wordSymbolTable=lexicons("words"),
        beam=13,
        latBeam=6,
        acwt=0.083333,
        maxActive=7000,
        config={"--determinize-lattice": "false"},
    )
    print("Determinize secondary lattice.")
    thiLat = secLat.determinize(acwt=0.083333, beam=4)
    print("Estimate the secondary fMLLR transform matrix.")
    secTransMatrix = exkaldi.hmm.estimate_fMLLR_matrix(
        aliOrLat=thiLat,
        lexicons=lexicons,
        aliHmm=hmm,
        feat=fmllrFeat,
        adaHmm=None,
        silenceWeight=0.01,
        acwt=0.083333,
        spk2utt=os.path.join("exp", "data", "test", "spk2utt"),
    )
    del fmllrFeat
    del thiLat
    ## 3. Compose the primary matrix and secondary matrix and get the final transform matrix.
    print("Compose the primary and secondary transform matrix.")
    finalTransMatrix = exkaldi.hmm.compose_transform_matrixs(
        matA=preTransMatrix,
        matB=secTransMatrix,
        bIsAffine=True,
    )
    finalTransMatrix.save(os.path.join(outDir, "trans.ark"))
    print("Transform feature with final matrix.")
    ## 4. Transform feature with the final transform matrix and use it to decode.
    ## We directly use the lattice generated in the second step. The final lattice is obtained.
    finalFmllrFeat = exkaldi.use_fmllr(
        feat,
        finalTransMatrix,
        utt2spkFile=os.path.join("exp", "data", "test", "utt2spk"),
    )
    del finalTransMatrix
    print("Rescore secondary lattice.")
    lat = secLat.am_rescore(
        hmm=hmm,
        feat=finalFmllrFeat,
    )
    print("Determinize secondary lattice.")
    lat = lat.determinize(acwt=0.083333, beam=6)
    lat.save(os.path.join(outDir, "test.lat"))
    print("Generate lattice done.")

    phoneMapFile = os.path.join("exp", "dict", "phones.48_to_39.map")
    phoneMap = exkaldi.ListTable(name="48-39").load(phoneMapFile)
    refText = exkaldi.load_trans(os.path.join("exp", "data", "test",
                                              "text")).convert(phoneMap, None)
    refText.save(os.path.join(outDir, "ref.txt"))
    print("Generate reference text done.")

    print("Now score:")
    bestWER = (1000, 0, 0)
    bestResult = None
    for penalty in [0., 0.5, 1.0]:
        for LMWT in range(1, 11):
            # Add penalty
            newLat = lat.add_penalty(penalty)
            # Get 1-best result (word-level)
            result = newLat.get_1best(lexicons("words"),
                                      hmm,
                                      lmwt=LMWT,
                                      acwt=1)
            # Transform from int value format to text format
            result = exkaldi.hmm.transcription_from_int(
                result, lexicons("words"))
            # Transform 48-phones to 39-phones
            result = result.convert(phoneMap, None)
            # Compute WER
            score = exkaldi.decode.score.wer(ref=refText,
                                             hyp=result,
                                             mode="present")
            if score.WER < bestWER[0]:
                bestResult = result
                bestWER = (score.WER, penalty, LMWT)
            print(f"Penalty: {penalty}, LMWT: {LMWT}, WER: {score.WER}%")
    print("Score done. Save the best result.")
    bestResult.save(os.path.join(outDir, "hyp.txt"))
    with open(os.path.join(outDir, "best_WER"), "w") as fw:
        fw.write(f"WER {bestWER[0]}, penalty {bestWER[1]}, LMWT {bestWER[2]}")
Esempio n. 4
0
def main():

    # ------------- Parse arguments from command line ----------------------
    # 1. Add a discription of this program
    args.discribe("This program is used to train triphone GMM-HMM model")
    # 2. Add options
    args.add("--expDir",
             abbr="-e",
             dtype=str,
             default="exp",
             discription="The data and output path of current experiment.")
    args.add("--splice",
             abbr="-c",
             dtype=int,
             default=3,
             discription="How many left-right frames to splice.")
    args.add("--numIters",
             abbr="-n",
             dtype=int,
             default=35,
             discription="How many iterations to train.")
    args.add("--maxIterInc",
             abbr="-m",
             dtype=int,
             default=25,
             discription="The final iteration of increasing gaussians.")
    args.add("--realignIter",
             abbr="-r",
             dtype=int,
             default=[10, 20, 30],
             discription="The iteration to realign feature.")
    args.add("--fmllrIter",
             abbr="-f",
             dtype=int,
             default=[2, 4, 6, 12],
             discription="The iteration to estimate fmllr matrix.")
    args.add("--order",
             abbr="-o",
             dtype=int,
             default=6,
             discription="Which N-grams model to use.")
    args.add("--beam",
             abbr="-b",
             dtype=int,
             default=13,
             discription="Decode beam size.")
    args.add("--latBeam",
             abbr="-l",
             dtype=int,
             default=6,
             discription="Lattice beam size.")
    args.add("--acwt",
             abbr="-a",
             dtype=float,
             default=0.083333,
             discription="Acoustic model weight.")
    args.add(
        "--parallel",
        abbr="-p",
        dtype=int,
        default=4,
        minV=1,
        maxV=10,
        discription=
        "The number of parallel process to compute feature of train dataset.")
    args.add("--skipTrain",
             abbr="-s",
             dtype=bool,
             default=False,
             discription="If True, skip training. Do decoding only.")
    # 3. Then start to parse arguments.
    args.parse()
    # 4. Take a backup of arguments
    argsLogFile = os.path.join(args.expDir, "conf", "train_sat.args")
    args.save(argsLogFile)

    if not args.skipTrain:
        # ------------- Prepare feature and previous alignment for training ----------------------
        # 1. Load the feature for training
        print(f"Load MFCC+CMVN feature.")
        feat = exkaldi.load_index_table(
            os.path.join(args.expDir, "mfcc", "train", "mfcc_cmvn.ark"))
        print(f"Splice {args.splice} frames.")
        originalFeat = exkaldi.splice_feature(feat,
                                              left=args.splice,
                                              right=args.splice,
                                              outFile=os.path.join(
                                                  args.expDir, "train_delta",
                                                  "mfcc_cmvn_splice.ark"))
        print(f"Transform LDA feature")
        ldaFeat = exkaldi.transform_feat(
            feat=originalFeat,
            matFile=os.path.join(args.expDir, "train_lda_mllt", "trans.mat"),
            outFile=os.path.join(args.expDir, "train_sat", "lda_feat.ark"),
        )
        del originalFeat
        # 2. Load previous alignment and lexicons
        ali = exkaldi.load_index_table(os.path.join(args.expDir,
                                                    "train_lda_mllt",
                                                    "*final.ali"),
                                       useSuffix="ark")
        lexicons = exkaldi.load_lex(
            os.path.join(args.expDir, "dict", "lexicons.lex"))
        # 3. Estimate the primary fMLLR transform matrix
        print("Estiminate the primary fMLLR transform matrixs")
        fmllrTransMat = exkaldi.hmm.estimate_fMLLR_matrix(
            aliOrLat=ali,
            lexicons=lexicons,
            aliHmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"),
            feat=ldaFeat,
            spk2utt=os.path.join(args.expDir, "data", "train", "spk2utt"),
            outFile=os.path.join(args.expDir, "train_sat", "trans.ark"),
        )
        print("Transform feature")
        fmllrFeat = exkaldi.use_fmllr(
            ldaFeat,
            fmllrTransMat,
            utt2spk=os.path.join("exp", "data", "train", "utt2spk"),
            outFile=os.path.join(args.expDir, "train_sat", "fmllr_feat.ark"),
        )

        # -------------- Build the decision tree ------------------------
        print("Start build a tree")
        tree = exkaldi.hmm.DecisionTree(lexicons=lexicons,
                                        contextWidth=3,
                                        centralPosition=1)
        tree.train(
            feat=fmllrFeat,
            hmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"),
            ali=ali,
            topoFile=os.path.join(args.expDir, "dict", "topo"),
            numLeaves=2500,
            tempDir=os.path.join(args.expDir, "train_sat"),
        )
        tree.save(os.path.join(args.expDir, "train_sat", "tree"))
        print(f"Build tree done.")
        del fmllrFeat

        # ------------- Start training ----------------------
        # 1. Initialize a monophone HMM object
        print("Initialize a triphone HMM object")
        model = exkaldi.hmm.TriphoneHMM(lexicons=lexicons)
        model.initialize(
            tree=tree,
            topoFile=os.path.join(args.expDir, "dict", "topo"),
            treeStatsFile=os.path.join(args.expDir, "train_sat",
                                       "treeStats.acc"),
        )
        print(f"Initialized a monophone HMM-GMM model: {model.info}.")

        # 2. convert the previous alignment
        print(f"Transform the alignment")
        newAli = exkaldi.hmm.convert_alignment(
            ali=ali,
            originHmm=os.path.join(args.expDir, "train_lda_mllt", "final.mdl"),
            targetHmm=model,
            tree=tree,
            outFile=os.path.join(args.expDir, "train_sat", "initial.ali"),
        )

        # 2. Split data for parallel training
        transcription = exkaldi.load_transcription(
            os.path.join(args.expDir, "data", "train", "text"))
        transcription = transcription.sort()

        if args.parallel > 1:
            # split feature
            ldaFeat = ldaFeat.sort(by="utt").subset(chunks=args.parallel)
            # split transcription depending on utterance IDs of each feat
            tempTrans = []
            tempAli = []
            tempFmllrMat = []
            for f in ldaFeat:
                tempTrans.append(transcription.subset(keys=f.utts))
                tempAli.append(newAli.subset(keys=f.utts))
                spks = exkaldi.utt_to_spk(f.utts,
                                          utt2spk=os.path.join(
                                              args.expDir, "data", "train",
                                              "utt2spk"))
                tempFmllrMat.append(fmllrTransMat.subset(keys=spks))
            transcription = tempTrans
            newAli = tempAli
            fmllrTransMat = tempFmllrMat

        # 3. Train
        print("Train the triphone model")
        model.train(
            ldaFeat,
            transcription,
            os.path.join(args.expDir, "dict", "L.fst"),
            tree,
            tempDir=os.path.join(args.expDir, "train_sat"),
            initialAli=newAli,
            fmllrTransMat=fmllrTransMat,
            spk2utt=os.path.join(args.expDir, "data", "train", "spk2utt"),
            utt2spk=os.path.join(args.expDir, "data", "train", "utt2spk"),
            numIters=args.numIters,
            maxIterInc=args.maxIterInc,
            totgauss=15000,
            realignIter=args.realignIter,
            fmllrIter=args.fmllrIter,
            boostSilence=1.0,
            power=0.2,
            fmllrSilWt=0.0,
        )
        print(model.info)
        del ldaFeat
        del fmllrTransMat
        del newAli

    else:
        declare.is_file(os.path.join(args.expDir, "train_sat", "final.mdl"))
        declare.is_file(os.path.join(args.expDir, "train_sat", "tree"))
        model = exkaldi.load_hmm(
            os.path.join(args.expDir, "train_sat", "final.mdl"))
        tree = exkaldi.load_tree(os.path.join(args.expDir, "train_sat",
                                              "tree"))

    # ------------- Compile WFST training ----------------------
    # Make a WFST decoding graph
    make_WFST_graph(
        outDir=os.path.join(args.expDir, "train_sat", "graph"),
        hmm=model,
        tree=tree,
    )
    # Decode test data
    GMM_decode_fmllr_and_score(
        outDir=os.path.join(args.expDir, "train_sat",
                            f"decode_{args.order}grams"),
        hmm=model,
        HCLGfile=os.path.join(args.expDir, "train_sat", "graph",
                              f"HCLG.{args.order}.fst"),
        tansformMatFile=os.path.join(args.expDir, "train_lda_mllt",
                                     "trans.mat"),
    )
Esempio n. 5
0
def GMM_decode_mfcc_and_score(outDir, hmm, HCLGfile, tansformMatFile=None):

    exkaldi.utils.make_dependent_dirs(outDir, pathIsFile=False)

    lexicons = exkaldi.decode.graph.load_lex(
        os.path.join("exp", "dict", "lexicons.lex"))
    print(f"Load test feature.")
    featFile = os.path.join("exp", "mfcc", "test", "mfcc_cmvn.ark")
    feat = exkaldi.load_feat(featFile)
    if tansformMatFile is None:
        print("Feature type is delta")
        feat = feat.add_delta(order=2)
        print("Add 2-order deltas.")
    else:
        print("Feature type is lda+mllt")
        feat = feat.splice(left=3, right=3)
        feat = exkaldi.transform_feat(feat, tansformMatFile)
        print("Transform feature")

    print("Start to decode")
    lat = exkaldi.decode.wfst.gmm_decode(feat,
                                         hmm,
                                         HCLGfile,
                                         wordSymbolTable=lexicons("words"),
                                         beam=13,
                                         latBeam=6,
                                         acwt=0.083333)
    lat.save(os.path.join(outDir, "test.lat"))
    print(f"Generate lattice done.")

    phoneMapFile = os.path.join("exp", "dict", "phones.48_to_39.map")
    phoneMap = exkaldi.ListTable(name="48-39").load(phoneMapFile)
    refText = exkaldi.load_trans(os.path.join("exp", "data", "test",
                                              "text")).convert(phoneMap, None)
    refText.save(os.path.join(outDir, "ref.txt"))
    print("Generate reference text done.")

    print("Now score:")
    bestWER = (1000, 0, 0)
    bestResult = None
    for penalty in [0., 0.5, 1.0]:
        for LMWT in range(1, 11):
            # Add penalty
            newLat = lat.add_penalty(penalty)
            # Get 1-best result (word-level)
            result = newLat.get_1best(lexicons("words"),
                                      hmm,
                                      lmwt=LMWT,
                                      acwt=1)
            # Transform from int value format to text format
            result = exkaldi.hmm.transcription_from_int(
                result, lexicons("words"))
            # Transform 48-phones to 39-phones
            result = result.convert(phoneMap, None)
            # Compute WER
            score = exkaldi.decode.score.wer(ref=refText,
                                             hyp=result,
                                             mode="present")
            if score.WER < bestWER[0]:
                bestResult = result
                bestWER = (score.WER, penalty, LMWT)
            print(f"Penalty: {penalty}, LMWT: {LMWT}, WER: {score.WER}%")
    print("Score done. Save the best result.")
    bestResult.save(os.path.join(outDir, "hyp.txt"))
    with open(os.path.join(outDir, "best_WER"), "w") as fw:
        fw.write(f"WER {bestWER[0]}, penalty {bestWER[1]}, LMWT {bestWER[2]}")