def analyze_with_plot(filename): '''Analyze a single voice file and plot its volume, pitch, end_point. Example: python analyze 'myfile' ''' #print 'Analyzing %s'%filename m = Voice() m.analyze(filename) t = np.linspace(0,len(m.y)*1.0/m.fs,len(m.frames)) mul = len(m.y)*1.0/m.fs/len(m.frames) pl.subplot(311) pl.plot(t, m.volume) print m.speech_segment print mul for s in m.speech_segment: print s[0]*mul,s[1]*mul pl.plot([s[0]*mul,s[0]*mul],[0,max(m.volume)],color='red') pl.plot([s[1]*mul,s[1]*mul],[0,max(m.volume)],color='blue') pl.subplot(312) pl.plot(t, m.pitch) pl.subplot(313) pl.plot(t, m.energyBelow250) pl.show() '''
def analyze(filename): '''Analyze a voice file. Example: python analyze 'myfile' Return: The detailed infomation and the mood prediction result if success. The file should use its full path, and will be mood_predict by our program. The prediction is processed by svm-machine , model file and scale file should be generated by the script train.py, and no need to indicate. All you need is to tell us which file or directory you want to predict.''' #print 'Analyzing %s'%filename m = Voice() try: m.analyze(filename) m.calFeatures() m.mood_predict() m.callInfo() m.report() print 'Analyze success' except Exception as e: print 'Analyze failed:'+ repr(e)
def train(diretory=config.root_dir): '''Train all the files in diretory 'luyin' The diretory is made up of two subdiretories named 'normal' and 'abnormal'. As a result , the script will generate 3 files: (1)scale : used for scale data by the svm command 'svm-scale -s filename > scale' (2)dataset_scaled : the scaled dataset for training (3)model : the svm model file ''' files = os.listdir(config.normal) dataset = config.dataset dataset_scaled = config.dataset_scaled scale = config.scale #fs = open(dataset,'a') for f in files: f = config.normal + f voice = Voice() voice.analyze(f) voice.calFeatures() voice.learn(dataset, '+1') files = os.listdir(config.abnormal) for f in files: f = config.abnormal + f voice = Voice() voice.analyze(f) voice.calFeatures() voice.learn(dataset, '-1') os.system('svm-scale -s %s %s > %s' % (scale, dataset, dataset_scaled)) y, x = svm_read_problem(dataset_scaled) m = svm_train(y, x) svm_save_model(config.model, m)
def train(diretory = config.root_dir): '''Train all the files in diretory 'luyin' The diretory is made up of two subdiretories named 'normal' and 'abnormal'. As a result , the script will generate 3 files: (1)scale : used for scale data by the svm command 'svm-scale -s filename > scale' (2)dataset_scaled : the scaled dataset for training (3)model : the svm model file ''' files = os.listdir(config.normal) dataset = config.dataset dataset_scaled = config.dataset_scaled scale = config.scale #fs = open(dataset,'a') for f in files: f = config.normal + f voice = Voice() voice.analyze(f) voice.calFeatures() voice.learn(dataset,'+1') files = os.listdir(config.abnormal) for f in files: f = config.abnormal + f voice = Voice() voice.analyze(f) voice.calFeatures() voice.learn(dataset,'-1') os.system('svm-scale -s %s %s > %s'%(scale,dataset,dataset_scaled)) y,x = svm_read_problem(dataset_scaled) m = svm_train(y,x) svm_save_model(config.model,m)
from voice import Voice import pylab as pl m = Voice() m.analyze('../luyin/moni-2.wav') print m.speech_segment pl.subplot(311) pl.plot(m.volume) pl.subplot(312) pl.plot(m.zcr) pl.subplot(313) pl.plot(m.acf) pl.show() ''' m.calFeatures() m.mood_predict() m.callInfo() m.report() '''