ScoreList.append( gamma * float(Result10[i][score_ind]) + (1 - gamma) * float(logProb[i][0])) # if bestScoreInd = 1 ScoreList.append( gamma * float(Result15[i][score_ind]) + (1 - gamma) * float(logProb[i][1])) # if bestScoreInd = 2 ScoreList.append( gamma * float(Result20[i][score_ind]) + (1 - gamma) * float(logProb[i][2])) # if bestScoreInd = 3 bestScoreInd = ScoreList.index(min(ScoreList)) # Best Hypothesis UttId = ResultClean[i][fName_ind] UttId = UttId[::-1].replace("/", "-", 1)[::-1] UttId = UttId.split('/', 2)[-1] BestHypo.append(hypList[bestScoreInd] + ' (' + UttId + ')\n') #print best_utt MDC_Hyp = outDir + "MDC_Result_Score.txt" print("\n Writing MDC results in " + MDC_Hyp) dump.TextWrite(BestHypo, MDC_Hyp) print 'Finish, now Calculating Error Rate, please wait \n' RefFile = BaseDir + "RefClean.txt" out_File = outDir + "Aligned_MDC_Score_WERReslts.txt" perl_script = subprocess.Popen( ["perl", "./word_align.pl", '-silent', MDC_Hyp, RefFile, out_File]) perl_script.wait()
} ListOfFinalResults.append(FinalResult) #print 'Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob LatticeFile = outLattice + fNameOnly.replace("/", '-') #print 'LatticeFile: ' + LatticeFile decoder.get_lattice().write(LatticeFile + '.lat') decoder.get_lattice().write_htk(LatticeFile + '.htk') i = i + 1 k = k + 1 if (k == 17): k = 0 sys.stdout.write('*') progress = 100 * i / TotalNoOfFiles #sys.stdout.write("Progress: %d%% \r" % (progress) ) #sys.stdout.write("Input SNR: %d" % (snr) +" AM: "+ AM +" File: " + fNameOnly +" Progress: %d%% \r" % (progress) ) sys.stdout.flush() # Running perl WER test print "\n" dump.TextWrite(HypText, outDir + currentModel + ".txt") dump.CSVDictWrite(ListOfFinalResults, outDir + "/All_" + currentModel + ".csv") hypFile = outDir + currentModel + ".txt" RefFile = BaseDir + "RefClean.txt" out_File = outDir + "WERReslts_" + currentModel + ".txt" print 'Finish, now Calculating Error Rate, please wait \n' perl_script = subprocess.Popen( ["perl", "./word_align.pl", '-silent', hypFile, RefFile, out_File]) perl_script.wait() print '\n'
ForScore1 = BaseDir + "MultiWUW-1-1_" + IVorOOV + "_Score1.log" ForScore2 = BaseDir + "MultiWUW-1-1_" + IVorOOV + "_Score2.log" outResult = BaseDir + IVorOOV + "_Scores.csv" #out_File = BaseDir + "Noisy_" + str(inputSNR) +"db/" + currentModel #print ("Storing result in: " + out_File) FinalResults = {} ListOfFinalResults = [] print("Getting results of input " + ForScore1) Score1 = getScore(ForScore1) Hypothesis = getHypotheses( ForScore1 ) # Hypotheses are correct only when extracted from forward decding (i.e. score1) filesNames = getFileName(ForScore1) Score2 = getScore(ForScore2) print("Length of Score1: " + str(len(Score1))) print("Length of Score2: " + str(len(Score2))) for i in range(0, len(Score2)): #print ("Name",filesNames[i], "Hyp", Hypothesis[i],"Score1", Score1[i]) #print ("Score2", Score2[i]) FinalResults = { "Name": filesNames[i], "Hyp": Hypothesis[i], "Score1": Score1[i], "Score2": Score2[i] } ListOfFinalResults.append(FinalResults) dump.CSVDictWrite(ListOfFinalResults, outResult)
config.set_string('-hmm', MODELDIR) config.set_string('-lm', path.join(BASEDIR, 'etc/MultiWUW.lm')) config.set_string('-dict', path.join(BASEDIR, 'etc/MultiWUW.dic')) config.set_string('-logfn', '/dev/null') # Decode streaming data. decoder = Decoder(config) """ For IV get forward """ FinalResult = {} ListOfFinalResult = [] with open(IV_FileIds) as fi: line = fi.readline().strip('\n') while (line): fileNameForward = BASEDIR + ForwardFeatDir + line + '.mfc' fileNameBackward = BASEDIR + ReverseFeatDir + line + '.mfc' #print fileName score1 = psDecode(fileNameForward) score2 = psDecode(fileNameBackward) #logmath = decoder.get_logmath() #print("Best hypothesis: ", hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob) FinalResult = {"FileName": line, "Score1": score1, "Score2": score2} ListOfFinalResult.append(FinalResult) """ CONTINUE """ line = fi.readline().strip('\n') dump.CSVDictWrite(BASEDIR + "IVResults.csv")
#!/usr/bin/python2.7 import StoreResults as dump ExpName = "WSJ" #SNR_Level = "White50db" TotalNoOfFiles = 35 BaseDir = "/Users/Azhar/Desktop/MDC_Experiments/" + ExpName + "/" inputFile = BaseDir + "Clean-1-1.log" outFile = BaseDir + "Reference.txt" outString = [] with open(inputFile,'r') as inFile, open(outFile,'w') as out: lines = inFile.readlines() flag = False for line in lines: if(flag): uttID = line.split('/',1)[-1] uttID = "(" + uttID.split(' ',1)[0] + ")" uttID = uttID.replace('/','-') uttID = uttID.replace('_1','_5') line = line.split('(',1)[0] + uttID +'\n' outString.append(line) flag = False if(line.find('INFO: batch.c(762):') != -1): flag = True print outString dump.TextWrite(outString, outFile)