def provideGuidance(self): reachDestination = False calculatePath = True sayPathRoute = True speaker = AudioFeedback() while reachDestination == False: if calculatePath == True: path = myMap.SSSP(startNode, destNode) calculatePath = False if sayPathRoute: routeSpeechInfo = '' for i in range(len(path)): #print in reverse order print path[len(path)-1-i], mapinfo['map'][path[len(path)-1-i]-1]['nodeName'] if i == 0: firstNode = 'the path route starts from %s ' % mapinfo['map'][path[len(path)-1-i]-1]['nodeName'] routeSpeechInfo = routeSpeechInfo + firstNode else: nextNode = 'followed by %s ' % mapinfo['map'][path[len(path)-1-i]-1]['nodeName'] routeSpeechInfo = routeSpeechInfo + nextNode routeSpeechInfo = routeSpeechInfo + '\n' speaker.threadedFeedback(routeSpeechInfo) sayPathRoute = False reachCheckPoint = True currentCheckPoint = path.pop() pos_x = mapinfo['map'][currentCheckPoint-1]['x'] pos_y = mapinfo['map'][currentCheckPoint-1]['y'] print "pos_x = ", pos_x, " pos_y = ", pos_y while path: if reachCheckPoint: reachCheckPoint = False nextCheckPoint = path.pop() if not path: reachDestination = True print nextCheckPoint, mapinfo['map'][nextCheckPoint-1]['nodeName'] try: #start_time = time.time() #timing_interval = 1 #count_time = 0 while not reachCheckPoint: #time.sleep(start_time + count_time * timing_interval - time.time()) reachCheckPoint, pos_x, pos_y, detourCheckPoint = self.provideDirections(nextCheckPoint, currentCheckPoint, pos_x, pos_y, speaker) if reachCheckPoint: currentCheckPoint = nextCheckPoint if detourCheckPoint: print "detour!" calculatePath = True path[:] = [] sayPathRoute = True break #count_time = count_time + 1 except Exception: print "INVALID DISTANCE!" if ((not path) and (reachDestination == True)): sayDestinationReached = 'destination reached!\n' speaker.threadedFeedback(sayDestinationReached)
class SpeechRecognition(object): # Create a decoder with certain model def __init__(self): MODELDIR = "cmusphinx-code/pocketsphinx/model" config = Decoder.default_config() config.set_string("-hmm", path.join(MODELDIR, "hmm/en_US/hub4wsj_sc_8kadapt")) config.set_string("-lm", path.join(MODELDIR, "lm/en_US/numadaptation.dmp")) config.set_string("-dict", path.join(MODELDIR, "lm/en_US/numadaptation.dic")) self.decoder = Decoder(config) self.speechCue = AudioFeedback() def Decode(self, wavfile): wavFile = file(wavfile, "rb") wavFile.seek(44) self.decoder.decode_raw(wavFile) try: result = self.decoder.hyp().hypstr os.remove(wavfile) return result except AttributeError: print ("No speech detected, please try again") os.remove(wavfile) def speechRecognise(self): Chunk = 1024 Format = pyaudio.paInt16 Channels = 1 Rate = 16000 Duration = 3 cue = "Start" for x in range(1): fn = "o" + str(x) + ".wav" p = pyaudio.PyAudio() self.speechCue.threadedFeedback(cue) stream = p.open(format=Format, channels=Channels, rate=Rate, input=True, frames_per_buffer=Chunk) print ("* recording") frames = [] for i in range(0, int(Rate / Chunk * Duration)): data = stream.read(Chunk) frames.append(data) print ("* done recording") stream.stop_stream() stream.close() p.terminate() wf = wave.open(fn, "wb") wf.setnchannels(Channels) wf.setsampwidth(p.get_sample_size(Format)) wf.setframerate(Rate) wf.writeframes(b"".join(frames)) wf.close() wavfile = fn Recognised = self.Decode(wavfile) print Recognised return Recognised
def __init__(self): MODELDIR = "cmusphinx-code/pocketsphinx/model" config = Decoder.default_config() config.set_string('-hmm', path.join(MODELDIR, 'hmm/en_US/hub4wsj_sc_8k_adapt')) config.set_string('-lm', path.join(MODELDIR, 'lm/en_US/num_adapt.dmp')) config.set_string('-dict', path.join(MODELDIR, 'lm/en_US/num_adapt.dic')) self.decoder = Decoder(config) self.speechCue = AudioFeedback()
def __init__(self): MODELDIR = "cmusphinx-code/pocketsphinx/model" config = Decoder.default_config() config.set_string("-hmm", path.join(MODELDIR, "hmm/en_US/hub4wsj_sc_8kadapt")) config.set_string("-lm", path.join(MODELDIR, "lm/en_US/numadaptation.dmp")) config.set_string("-dict", path.join(MODELDIR, "lm/en_US/numadaptation.dic")) self.decoder = Decoder(config) self.speechCue = AudioFeedback()
class SpeechRecognition(object): # Create a decoder with certain model def __init__(self): MODELDIR = "cmusphinx-code/pocketsphinx/model" config = Decoder.default_config() config.set_string('-hmm', path.join(MODELDIR, 'hmm/en_US/hub4wsj_sc_8k_adapt')) config.set_string('-lm', path.join(MODELDIR, 'lm/en_US/num_adapt.dmp')) config.set_string('-dict', path.join(MODELDIR, 'lm/en_US/num_adapt.dic')) self.decoder = Decoder(config) self.speechCue = AudioFeedback() def Decode(self, wavfile): wavFile = file(wavfile, 'rb') wavFile.seek(44) self.decoder.decode_raw(wavFile) try: result = self.decoder.hyp().hypstr os.remove(wavfile) return result except AttributeError: print("No speech detected, please try again") os.remove(wavfile) def speechRecognise(self, recog_sample): Chunk = 1024 Format = pyaudio.paInt16 Channels = 1 Rate = 16000 Duration = 4 cue = 'Start' for x in range(1): fn = "o" + str(x) + ".wav" p = pyaudio.PyAudio() self.speechCue.threadedFeedback(cue) stream = p.open(format=Format, channels=Channels, rate=Rate, input=True, frames_per_buffer=Chunk) print("* recording") frames = [] for i in range(0, int(Rate / Chunk * Duration)): data = stream.read(Chunk) frames.append(data) print("* done recording") stream.stop_stream() stream.close() p.terminate() wf = wave.open(fn, 'wb') wf.setnchannels(Channels) wf.setsampwidth(p.get_sample_size(Format)) wf.setframerate(Rate) wf.writeframes(b''.join(frames)) wf.close() wavfile = fn Recognised = self.Decode(wavfile) print Recognised #return Recognised print 'putting the input in queue' recog_sample.put(Recognised) print 'end of speech recogniser'
import os import time from espeak_cls import AudioFeedback output = AudioFeedback() string = "Hello Chew Yi Xiu " output.threadedFeedback(string) time.sleep(2) while True: string = "Yanqing likes someone" output.threadedFeedback(string) time.sleep(2) string = "Angela has tin tin" output.threadedFeedback(string) time.sleep(2) string = "It's bob the builder" output.threadedFeedback(string) time.sleep(2) string = "Can we fix it?" output.threadedFeedback(string) time.sleep(2)
def provideGuidance(self): reachDestination = False calculatePath = True sayPathRoute = True speaker = AudioFeedback() while reachDestination == False: if calculatePath == True: path = myMap.SSSP(startNode, destNode) calculatePath = False if sayPathRoute: routeSpeechInfo = '' for i in range(len(path)): #print in reverse order print path[len(path) - 1 - i], mapinfo['map'][path[len(path) - 1 - i] - 1]['nodeName'] if i == 0: firstNode = 'the path route starts from %s ' % mapinfo[ 'map'][path[len(path) - 1 - i] - 1]['nodeName'] routeSpeechInfo = routeSpeechInfo + firstNode else: nextNode = 'followed by %s ' % mapinfo['map'][ path[len(path) - 1 - i] - 1]['nodeName'] routeSpeechInfo = routeSpeechInfo + nextNode routeSpeechInfo = routeSpeechInfo + '\n' speaker.threadedFeedback(routeSpeechInfo) sayPathRoute = False reachCheckPoint = True currentCheckPoint = path.pop() pos_x = mapinfo['map'][currentCheckPoint - 1]['x'] pos_y = mapinfo['map'][currentCheckPoint - 1]['y'] print "pos_x = ", pos_x, " pos_y = ", pos_y while path: if reachCheckPoint: reachCheckPoint = False nextCheckPoint = path.pop() if not path: reachDestination = True print nextCheckPoint, mapinfo['map'][nextCheckPoint - 1]['nodeName'] try: #start_time = time.time() #timing_interval = 1 #count_time = 0 while not reachCheckPoint: #time.sleep(start_time + count_time * timing_interval - time.time()) reachCheckPoint, pos_x, pos_y, detourCheckPoint = self.provideDirections( nextCheckPoint, currentCheckPoint, pos_x, pos_y, speaker) if reachCheckPoint: currentCheckPoint = nextCheckPoint if detourCheckPoint: print "detour!" calculatePath = True path[:] = [] sayPathRoute = True break #count_time = count_time + 1 except Exception: print "INVALID DISTANCE!" if ((not path) and (reachDestination == True)): sayDestinationReached = 'destination reached!\n' speaker.threadedFeedback(sayDestinationReached)