def record(self, button): text, ok = QtWidgets.QInputDialog().getText(self, "Emotion Recognition", "Enter Your Name:") if ok and text: self.speaker = text print "Speaker: " + self.speaker wFile = "Test/" + self.speaker + ".wav" start_record(self, wFile, 5, self.state) state = "File saved as " + self.speaker self.repaint() print 'File saved as ' + self.speaker + ".wav" self.add_files() fFile = "feat/Test/" + self.speaker + ".htk" try: # call MFCC feature extraction subroutine f, E, fs = mfcc(wFile, self.winlen, self.ovrlen, self.pre_coef, self.nfilter, self.nftt) # VAD part if self.opts == 1: f = vad_thr(f, E) # Energy threshold based VAD [comment this line if you would like to plugin the rVAD labels] elif self.opts == 0: l = numpy.loadtxt('..corresponding vad label file') # [Plugin the VAD label generated by rVAD matlab] if (len(f) - len(l)) == 1: # 1-[end-frame] correction [matlab/python] l = numpy.append(l, l[-1:, ]) elif (len(f) - len(l)) == -1: l = numpy.delete(l, -1) if (len(l) == len(f)) and (len(numpy.argwhere(l == 1)) != 0): idx = numpy.where(l == 1) f = f[idx] else: print "mismatch frames between: label and feature files or no voice-frame in VAD" exit() # Zero mean unit variance normalize after VAD f = cmvn(f) # write the VAD+normalized features in file if not os.path.exists(os.path.dirname(fFile)): # create director for the feature file os.makedirs(os.path.dirname(fFile)) writehtk(fFile, f, 0.01) except: print("Fail1..%s ---> %s\n" % (wFile, fFile)) if button.text() == "Live Detection": self.test_emotion(name=self.speaker) else: self.state = "Not valid Name" self.repaint() return
if (len(f) - len(l)) ==1: #1-[end-frame] correction [matlab/python] l= numpy.append(l,l[-1:,]) elif (len(f) -len(l)) == -1: l=numpy.delete(l,-1) if (len(l) == len(f)) and (len(numpy.argwhere(l==1)) !=0): idx=numpy.where(l==1) f=f[idx] else: print "mismatch frames between: label and feature files or no voice-frame in VAD" exit() # Zero mean unit variance normalize after VAD f=cmvn(f) #write the VAD+normalized features in file if not os.path.exists(os.path.dirname(fFile)): # create director for the feature file os.makedirs(os.path.dirname(fFile)) #print("%s --> %s\n" %(wFile,fFile)) writehtk(fFile, f , 0.01) except: print("Fail ..%s ---> %s\n" %(wFile, fFile)) decision=test_decision(fFile,'DEC3_Tau10.0',ubm_mu,ubm_cov, ubm_w) if(decision=="YES"):
def test_emotion(self, name): if name: value = name else: file = self.files.currentItem() print file.text() value = file.text().split(".")[0] fFile = "feat/Test/" + value + ".htk" self.emotion = test(fFile, self.Tardest, self.ubm_mu, self.ubm_cov, self.ubm_w) self.result = "Detected as: " self.repaint() start_play(self.emotion + "_dec.wav") self.state = "Say Yes or No" self.repaint() wFile = "Test_dec.wav" start_record(self, wFile, 3, self.state) fFile = "feat/Test/" + wFile + ".htk" try: # call MFCC feature extraction subroutine f, E, fs = mfcc(wFile, self.winlen, self.ovrlen, self.pre_coef, self.nfilter, self.nftt) # VAD part if self.opts == 1: f = vad_thr(f, E) # Energy threshold based VAD [comment this line if you would like to plugin the rVAD labels] elif self.opts == 0: l = numpy.loadtxt('..corresponding vad label file') # [Plugin the VAD label generated by rVAD matlab] if (len(f) - len(l)) == 1: # 1-[end-frame] correction [matlab/python] l = numpy.append(l, l[-1:, ]) elif (len(f) - len(l)) == -1: l = numpy.delete(l, -1) if (len(l) == len(f)) and (len(numpy.argwhere(l == 1)) != 0): idx = numpy.where(l == 1) f = f[idx] else: print "mismatch frames between: label and feature files or no voice-frame in VAD" exit() # Zero mean unit variance normalize after VAD f = cmvn(f) # write the VAD+normalized features in file if not os.path.exists(os.path.dirname(fFile)): # create director for the feature file os.makedirs(os.path.dirname(fFile)) #print("%s --> %s\n" %(wFile,fFile)) writehtk(fFile, f, 0.01) except: print("Fail ..%s ---> %s\n" % (wFile, fFile)) decision = test_decision(fFile, 'DEC3_Tau10.0', self.ubm_mu, self.ubm_cov, self.ubm_w) if decision == "YES": start_play(self.emotion + ".wav") self.result = "" self.emotion = "" self.state = "" self.repaint() return
#[Pluggin the VAD label generated by rVAD matlab] if (len(f) - len(l)) == 1: #1-[end-frame] correction [matlab/python] l = numpy.append(l, l[-1:, ]) elif (len(f) - len(l)) == -1: l = numpy.delete(l, -1) if (len(l) == len(f)) and (len(numpy.argwhere(l == 1)) != 0): idx = numpy.where(l == 1) f = f[idx] else: print "mismatch frames between: label and feature files or no voice-frame in VAD" exit() # Zero mean unit variance normalize after VAD f = cmvn(f) #write the VAD+normalized features in file if not os.path.exists( os.path.dirname(fFile)): # create director for the feature file os.makedirs(os.path.dirname(fFile)) #print("%s --> %s\n" %(wFile,fFile)) writehtk(fFile, f, 0.01) except: print("Fail ..%s ---> %s\n" % (wFile, fFile)) emotion = test(fFile, Tardest, ubm_mu, ubm_cov, ubm_w)