def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False): """ Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None """ RATE = 16000 CHUNK = 1024 LISTEN_TIME = 12 # check if no threshold provided if THRESHOLD is None: THRESHOLD = self.fetchThreshold() self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) # prepare recording stream stream = self._audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) frames = [] # increasing the range # results in longer pause after command # generation lastN = [THRESHOLD * 1.2 for i in range(30)] for i in range(0, round(RATE / CHUNK * LISTEN_TIME)): data = stream.read(CHUNK) frames.append(data) score = self.getScore(data) lastN.pop(0) lastN.append(score) average = sum(lastN) / float(len(lastN)) # TODO: 0.8 should not be a MAGIC NUMBER! if average < THRESHOLD * 0.8: break self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) # save the audio data stream.stop_stream() stream.close() with tempfile.SpooledTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(b''.join(frames)) wav_fp.close() f.seek(0) return self.active_stt_engine.transcribe(f)
def testJoke(self): query = "Tell me a joke." inputs = ["Who's there?", "Random response"] outputs = self.runConversation(query, inputs, Joke) self.assertEqual(len(outputs), 3) allJokes = open(jasperpath.data('text', 'JOKES.txt'), 'r').read() self.assertTrue(outputs[2] in allJokes)
def _compile_vocabulary(self, phrases): prefix = 'jasper' tmpdir = tempfile.mkdtemp() lexicon_file = jasperpath.data('julius-stt', 'VoxForge.tgz') lexicon_archive_member = 'VoxForge/VoxForgeDict' profile_path = jasperpath.config('profile.yml') if os.path.exists(profile_path): with open(profile_path, 'r') as f: profile = yaml.safe_load(f) if 'julius' in profile: if 'lexicon' in profile['julius']: lexicon_file = profile['julius']['lexicon'] if 'lexicon_archive_member' in profile['julius']: lexicon_archive_member = \ profile['julius']['lexicon_archive_member'] lexicon = JuliusVocabulary.VoxForgeLexicon(lexicon_file, lexicon_archive_member) # Create grammar file tmp_grammar_file = os.path.join(tmpdir, os.extsep.join([prefix, 'grammar'])) with open(tmp_grammar_file, 'w') as f: grammar = self._get_grammar(phrases) for definition in grammar.pop('S'): f.write("%s: %s\n" % ('S', ' '.join(definition))) for name, definitions in grammar.items(): for definition in definitions: f.write("%s: %s\n" % (name, ' '.join(definition))) # Create voca file tmp_voca_file = os.path.join(tmpdir, os.extsep.join([prefix, 'voca'])) with open(tmp_voca_file, 'w') as f: for category, words in self._get_word_defs(lexicon, phrases).items(): f.write("%% %s\n" % category) for word, phoneme in words: f.write("%s\t\t\t%s\n" % (word, phoneme)) # mkdfa.pl olddir = os.getcwd() os.chdir(tmpdir) cmd = ['mkdfa.pl', str(prefix)] with tempfile.SpooledTemporaryFile() as out_f: subprocess.call(cmd, stdout=out_f, stderr=out_f) out_f.seek(0) for line in out_f.read().splitlines(): line = line.strip() if line: self._logger.debug(line) os.chdir(olddir) tmp_dfa_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dfa'])) tmp_dict_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dict'])) shutil.move(tmp_dfa_file, self.dfa_file) shutil.move(tmp_dict_file, self.dict_file) shutil.rmtree(tmpdir)
def get_playlists(filename=jasperpath.data('text', 'playlists.txt')): """Return the playlist. Extracts the playlist from a text file. Arguments: filename {string} -- Filename to extract the playlist from. """ pl_file = open(filename, "r") playlists = [line.replace('\n', '') for line in pl_file.readlines()] return playlists
def handle(text, mic, profile): home = expanduser("~") WINDOW_NAME = "Face Detective" cascPath = jasperpath.data('cascade', 'haarcascade_frontalface_default.xml') faceCascade = cv2.CascadeClassifier(cascPath) video_capture = cv2.VideoCapture(0) cv2.namedWindow(WINDOW_NAME, cv2.CV_WINDOW_AUTOSIZE) while True: # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE) print "Number Of Faces Detected {0}!".format(len(faces)) # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # Display the resulting frame cv2.startWindowThread() cv2.imshow(WINDOW_NAME, frame) k = cv2.waitKey(1) if k == 27: # wait for ESC key to exit break elif len(faces) > 0: # faces greter than 0 save image photoname = home + "/out/%s.png" % datetime.now().strftime( "%Y%m%d-%H%M%S") print photoname cv2.imwrite(photoname, frame) break # When everything is done, release the capture video_capture.release() cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) mic.say("Face detected")
def handle(text, mic, profile): home = expanduser("~") # WINDOW_NAME = "Face Detective" cascPath = jasperpath.data('cascade', 'haarcascade_frontalface_default.xml') faceCascade = cv2.CascadeClassifier(cascPath) video_capture = cv2.VideoCapture(0) # cv2.namedWindow(WINDOW_NAME, cv2.CV_WINDOW_AUTOSIZE) while True: # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print "Number Of Faces Detected {0}!".format(len(faces)) # Draw a rectangle around the faces # for (x, y, w, h) in faces: # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # Display the resulting frame # cv2.startWindowThread() # cv2.imshow(WINDOW_NAME, frame) # k = cv2.waitKey(1) # if k == 27: # wait for ESC key to exit # break if len(faces) > 0: # faces greter than 0 save image photoname = home + "/out/%s.png" % datetime.now().strftime("%Y%m%d-%H%M%S") print photoname cv2.imwrite( photoname, frame) break # When everything is done, release the capture video_capture.release() # cv2.waitKey(1) # cv2.destroyAllWindows() # cv2.waitKey(1) mic.say("Face detected")
def get_keyword_phrases(): """ Gets the keyword phrases from the keywords file in the jasper data dir. Returns: A list of keyword phrases. """ phrases = [] with open(jasperpath.data('keyword_phrases'), mode="r") as f: for line in f: phrase = line.strip() if phrase: phrases.append(phrase) return phrases
def getRandomJoke(filename=jasperpath.data('text', 'JOKES.txt')): jokeFile = open(filename, "r") jokes = [] start = "" end = "" for line in jokeFile.readlines(): line = line.replace("\n", "") if start == "": start = line continue if end == "": end = line continue jokes.append((start, end)) start = "" end = "" jokes.append((start, end)) joke = random.choice(jokes) return joke
def getRandomMusic(self, filename=jasperpath.data('text', 'YOUTUBE.txt')): youtubeFile = open(filename, "r") musics = [] start = "" end = "" for line in youtubeFile.readlines(): line = line.replace("\n", "") if start == "": start = line continue if end == "": end = line continue musics.append((start, end)) start = "" end = "" musics.append((start, end)) music = random.choice(musics) return music
def getNotifications(mic, latestRetweet, latestMention, latestDirectMessage, api): try: latestRetweets = [] latestRetweetsID = [] latestDirectMessages = [] latestDirectMessagesID = [] latestMentions = [] latestMentionsID = [] mentions = api.mentions_timeline() retweets = api.retweets_of_me() directMessages = api.direct_messages() for mention in mentions: if mention.id > latestMention: latestMentions.append(mention) latestMentionsID.append(mention.id) for retweet in retweets: if retweet.id > latestRetweet: latestRetweets.append(retweet) latestRetweetsID.append(retweet.id) for directMessage in directMessages: if directMessage.id > latestDirectMessage: latestDirectMessages.append(directMessage) latestDirectMessagesID.append(directMessage.id) if len(latestRetweets) > 0: mic.say("Latest Retweets are") for retweetFinal in latestRetweets: mic.say(retweetFinal.text + " by " + retweetFinal.user.screen_name) latestRetweetsID.sort() latestRetweet = latestRetweetsID[-1] retweetsIDFile = open( jasperpath.data('twitter', 'retweetsIDFile.txt'), 'w') retweetsIDFile.write(str(latestRetweet)) retweetsIDFile.close() else: mic.say("You have no retweets") if len(latestMentions) > 0: mic.say("Latest Mentions are") for mentionFinal in latestMentions: mic.say(mentionFinal.text + " from " + mentionFinal.user.screen_name) latestMentionsID.sort() latestMention = latestMentionsID[-1] mentionIDFile = open( jasperpath.data('twitter', 'mentionIDFile.txt'), 'w') mentionIDFile.write(str(latestMention)) mentionIDFile.close() else: mic.say("You have no mentions") if len(latestDirectMessages) > 0: mic.say("Latest Direct Messages are") for directMessageFinal in latestDirectMessages: mic.say(directMessageFinal.text + " from " + directMessageFinal.user.screen_name) latestDirectMessagesID.sort() latestDirectMessage = latestDirectMessagesID[-1] directMessageIDFile = open( jasperpath.data('twitter', 'directMessageID.txt'), 'w') directMessageIDFile.write(str(latestDirectMessage)) directMessageIDFile.close() else: mic.say("You have no Direct Messages") except TweepError as e: error = e.reason[0]['message'] code = e.reason[0]['code'] mic.say(error + ' Failed to get Notifications.') return
def handle(text, mic, profile): consumer_key = profile['twitter']["TW_CONSUMER_KEY"] consumer_secret = profile['twitter']["TW_CONSUMER_SECRET"] access_token = profile['twitter']["TW_ACCESS_TOKEN"] access_token_secret = profile['twitter']["TW_ACCESS_TOKEN_SECRET"] woeid = int(profile['twitter']["WOEID"]) try: auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = API(auth) myTwitterID = api.me().id except Exception as e: print e mic.say("Connection Error. Twitter service is offline.") return directMessages = api.direct_messages(count=1) latestRetweet = 0 latestMention = 0 latestDirectMessage = 0 try: directMessageIDFile = open( jasperpath.data('twitter', 'directMessageID.txt'), 'r') directMessageID = directMessageIDFile.readline() latestDirectMessage = int(directMessageID) directMessageIDFile.close() except IOError: if len(directMessages) > 0: for directMessage in directMessages: latestDirectMessage = directMessage.id directMessageIDFile = open( jasperpath.data('twitter', 'directMessageID.txt'), 'w') directMessageIDFile.write(str(latestDirectMessage)) directMessageIDFile.close() mentions = api.mentions_timeline(count=1) try: mentionIDFile = open(jasperpath.data('twitter', 'mentionIDFile.txt'), 'r') latestMentionID = mentionIDFile.readline() latestMention = int(latestMentionID) mentionIDFile.close() except IOError: if len(mentions) > 0: mentionIDFile = open( jasperpath.data('twitter', 'mentionIDFile.txt'), 'w') for mention in mentions: latestMention = mention.id mentionIDFile.write(str(latestMention)) mentionIDFile.close() retweets = api.retweets_of_me(count=1) try: retweetsIDFile = open(jasperpath.data('twitter', 'retweetsIDFile.txt'), 'r') retweetsID = retweetsIDFile.readline() latestRetweet = int(retweetsID) retweetsIDFile.close() except IOError: if len(retweets) > 0: retweetsIDFile = open( jasperpath.data('twitter', 'retweetsIDFile.txt'), 'w') for retweet in retweets: latestRetweet = retweet.id retweetsIDFile.write(str(latestRetweet)) retweetsIDFile.close() if bool(re.search(r'\bTWEET\b', text, re.IGNORECASE)): sendTweet(mic, api) if bool(re.search(r'\bNOTIFICATIONS\b', text, re.IGNORECASE)): getNotifications(mic, latestRetweet, latestMention, latestDirectMessage, api) if bool(re.search(r'\bTRENDING\b', text, re.IGNORECASE)): getWhatsTrending(mic, api, woeid) if bool(re.search(r'\bTWEETS\b', text, re.IGNORECASE)): getPublicTweets(mic, api)
def listenVoice(self, ACTIVE=True, PERSONA=''): FORMAT = pyaudio.paInt16 CHANNELS = 1 CHUNK_DURATION_MS = 30 # supports 10, 20 and 30 (ms) PADDING_DURATION_MS = 1500 # 1 sec jugement CHUNK_SIZE = int(RATE * CHUNK_DURATION_MS / 1000) # chunk to read NUM_PADDING_CHUNKS = int(PADDING_DURATION_MS / CHUNK_DURATION_MS) NUM_WINDOW_CHUNKS = int(400 / CHUNK_DURATION_MS) # 400ms/30ms NUM_WINDOW_CHUNKS_END = NUM_WINDOW_CHUNKS * 2 global leaveRecord, gotOneSentence if leaveRecord: if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " off") return None # prepare recording stream stream = self._audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, start=False, frames_per_buffer=CHUNK_SIZE) vad = webrtcvad.Vad(1) signal.signal(signal.SIGINT, handle_int) if not ACTIVE: if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " white") stream.start_stream() raw_data = array('h') start_point = 0 end_point = 0 # loop for passive listening while not leaveRecord: gotOneSentence = False if ACTIVE: self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) else: self.passive_stt_engine.utt_start() # Process buffered voice data if end_point > 0: raw_data.reverse() for index in range(end_point - CHUNK_SIZE * 20): raw_data.pop() raw_data.reverse() print("* process buffered voice data...") transcribed = self.passive_stt_engine.utt_transcribe( raw_data) # if voice trigger is included in results, return directly if any(PERSONA in phrase for phrase in transcribed): if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " off") self.passive_stt_engine.utt_end() stream.stop_stream() stream.close() return transcribed ring_buffer = collections.deque(maxlen=NUM_PADDING_CHUNKS) triggered = False ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS ring_buffer_index = 0 ring_buffer_flags_end = [0] * NUM_WINDOW_CHUNKS_END ring_buffer_index_end = 0 index = 0 start_point = 0 end_point = 0 StartTime = time.time() print("* recording: ") raw_data = array('h') if ACTIVE: if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " blue") stream.start_stream() # stop recording when EOS is detected while not gotOneSentence and not leaveRecord: chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False) if not ACTIVE: transcribed = self.passive_stt_engine.utt_transcribe(chunk) if any(PERSONA in phrase for phrase in transcribed): triggered = False gotOneSentence = True end_point = index break # add WangS raw_data.extend(array('h', chunk)) index += CHUNK_SIZE TimeUse = time.time() - StartTime active = vad.is_speech(chunk, RATE) if ACTIVE: sys.stdout.write('I' if active else '_') ring_buffer_flags[ring_buffer_index] = 1 if active else 0 ring_buffer_index += 1 ring_buffer_index %= NUM_WINDOW_CHUNKS ring_buffer_flags_end[ring_buffer_index_end] = \ 1 if active else 0 ring_buffer_index_end += 1 ring_buffer_index_end %= NUM_WINDOW_CHUNKS_END # start point detection if not triggered: ring_buffer.append(chunk) num_voiced = sum(ring_buffer_flags) if num_voiced > 0.8 * NUM_WINDOW_CHUNKS: if ACTIVE: sys.stdout.write('[OPEN]') triggered = True start_point = index - CHUNK_SIZE * 20 # start point # voiced_frames.extend(ring_buffer) ring_buffer.clear() # end point detection else: # voiced_frames.append(chunk) ring_buffer.append(chunk) num_unvoiced = NUM_WINDOW_CHUNKS_END \ - sum(ring_buffer_flags_end) if num_unvoiced > 0.90 * NUM_WINDOW_CHUNKS_END \ or TimeUse > 10: if ACTIVE: sys.stdout.write('[CLOSE]') triggered = False gotOneSentence = True end_point = index sys.stdout.flush() sys.stdout.write('\n') # result processing for passive and active listening respectively print("* done recording") if leaveRecord: if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " off") break if ACTIVE: if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " off") self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) # write to file raw_data.reverse() for index in range(start_point): raw_data.pop() raw_data.reverse() raw_data = normalize(raw_data) stream.stop_stream() stream.close() # save the audio data with tempfile.SpooledTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth( pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(raw_data) wav_fp.close() f.seek(0) return self.active_stt_engine.transcribe(f) else: # read one more chunks in EOS chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False) transcribed = self.passive_stt_engine.utt_transcribe(chunk) self.passive_stt_engine.utt_end() # if voice trigger is included in results, return directly if any(PERSONA in phrase for phrase in transcribed): if os.path.exists(jasperpath.tjbot('shine.led.js')): os.system("node " + jasperpath.tjbot('shine.led.js') + " off") stream.stop_stream() stream.close() return transcribed # if voice trigger is not included in results, start another # cycle # exit if ACTIVE: stream.close() return None
def getNotifications(mic,latestRetweet,latestMention,latestDirectMessage, api): try: latestRetweets = [] latestRetweetsID = [] latestDirectMessages = [] latestDirectMessagesID = [] latestMentions = [] latestMentionsID = [] mentions = api.mentions_timeline() retweets = api.retweets_of_me() directMessages = api.direct_messages() for mention in mentions: if mention.id > latestMention: latestMentions.append(mention) latestMentionsID.append(mention.id) for retweet in retweets: if retweet.id > latestRetweet: latestRetweets.append(retweet) latestRetweetsID.append(retweet.id) for directMessage in directMessages: if directMessage.id > latestDirectMessage: latestDirectMessages.append(directMessage) latestDirectMessagesID.append(directMessage.id) if len(latestRetweets) > 0: mic.say("Latest Retweets are") for retweetFinal in latestRetweets: mic.say(retweetFinal.text + " by " + retweetFinal.user.screen_name) latestRetweetsID.sort() latestRetweet = latestRetweetsID[-1] retweetsIDFile = open(jasperpath.data('twitter', 'retweetsIDFile.txt'),'w') retweetsIDFile.write(str(latestRetweet)) retweetsIDFile.close() else: mic.say("You have no retweets") if len(latestMentions) > 0: mic.say("Latest Mentions are") for mentionFinal in latestMentions: mic.say(mentionFinal.text + " from " + mentionFinal.user.screen_name) latestMentionsID.sort() latestMention = latestMentionsID[-1] mentionIDFile = open(jasperpath.data('twitter', 'mentionIDFile.txt'),'w') mentionIDFile.write(str(latestMention)) mentionIDFile.close() else: mic.say("You have no mentions") if len(latestDirectMessages) > 0: mic.say("Latest Direct Messages are") for directMessageFinal in latestDirectMessages: mic.say(directMessageFinal.text + " from " + directMessageFinal.user.screen_name) latestDirectMessagesID.sort() latestDirectMessage = latestDirectMessagesID[-1] directMessageIDFile = open(jasperpath.data('twitter', 'directMessageID.txt'),'w') directMessageIDFile.write(str(latestDirectMessage)) directMessageIDFile.close() else: mic.say("You have no Direct Messages") except TweepError as e: error = e.reason[0]['message'] code = e.reason[0]['code'] mic.say(error + ' Failed to get Notifications.') return
def setUp(self): self.jasper_clip = jasperpath.data('audio', 'jasper.wav') self.time_clip = jasperpath.data('audio', 'time.wav') self.passive_stt_engine = stt.PocketSphinxSTT.get_passive_instance() self.active_stt_engine = stt.PocketSphinxSTT.get_active_instance()
def getRandomJoke(): filename = jasperpath.data("chuck.txt") with open(filename, "r") as f: return random.choice(f.readlines()).strip()
def getCommands(): dir = jasperpath.data('wts-chor') cmds = [] for song in os.listdir(dir): cmds.append("mpg321 " + os.path.join(dir, song.replace(" ", "\ ").replace("(", "\(").replace(")", "\)"))) return cmds
def setUp(self): self.jasper_clip = jasperpath.data('audio', 'jasper.wav') self.weather_zh_clip = jasperpath.data('audio', 'weather_zh.wav') self.passive_stt_engine = stt.PocketSphinxSTT.get_passive_instance() self.active_stt_engine = stt.BaiduSTT.get_active_instance()
def listenVoice(self, ACTIVE=True): FORMAT = pyaudio.paInt16 CHANNELS = 1 CHUNK_DURATION_MS = 30 # supports 10, 20 and 30 (ms) PADDING_DURATION_MS = 1500 # 1 sec jugement CHUNK_SIZE = int(RATE * CHUNK_DURATION_MS / 1000) # chunk to read NUM_PADDING_CHUNKS = int(PADDING_DURATION_MS / CHUNK_DURATION_MS) NUM_WINDOW_CHUNKS = int(400 / CHUNK_DURATION_MS) # 400ms/30ms NUM_WINDOW_CHUNKS_END = NUM_WINDOW_CHUNKS * 2 global leaveRecord, gotOneSentence vad = webrtcvad.Vad(1) if ACTIVE: self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) # prepare recording stream stream = self._audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, start=False, frames_per_buffer=CHUNK_SIZE) signal.signal(signal.SIGINT, handle_int) while not leaveRecord: ring_buffer = collections.deque(maxlen=NUM_PADDING_CHUNKS) triggered = False ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS ring_buffer_index = 0 ring_buffer_flags_end = [0] * NUM_WINDOW_CHUNKS_END ring_buffer_index_end = 0 raw_data = array('h') index = 0 start_point = 0 StartTime = time.time() print("* recording: ") stream.start_stream() while not gotOneSentence and not leaveRecord: chunk = stream.read(CHUNK_SIZE) # add WangS raw_data.extend(array('h', chunk)) index += CHUNK_SIZE TimeUse = time.time() - StartTime active = vad.is_speech(chunk, RATE) if ACTIVE: sys.stdout.write('I' if active else '_') ring_buffer_flags[ring_buffer_index] = 1 if active else 0 ring_buffer_index += 1 ring_buffer_index %= NUM_WINDOW_CHUNKS ring_buffer_flags_end[ring_buffer_index_end] = \ 1 if active else 0 ring_buffer_index_end += 1 ring_buffer_index_end %= NUM_WINDOW_CHUNKS_END # start point detection if not triggered: ring_buffer.append(chunk) num_voiced = sum(ring_buffer_flags) if num_voiced > 0.8 * NUM_WINDOW_CHUNKS: sys.stdout.write('[OPEN]') triggered = True start_point = index - CHUNK_SIZE * 20 # start point # voiced_frames.extend(ring_buffer) ring_buffer.clear() # end point detection else: # voiced_frames.append(chunk) ring_buffer.append(chunk) num_unvoiced = NUM_WINDOW_CHUNKS_END \ - sum(ring_buffer_flags_end) if num_unvoiced > 0.90 * NUM_WINDOW_CHUNKS_END \ or TimeUse > 10: sys.stdout.write('[CLOSE]') triggered = False gotOneSentence = True sys.stdout.flush() sys.stdout.write('\n') print("* done recording") gotOneSentence = False if ACTIVE: self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) # write to file raw_data.reverse() for index in range(start_point): raw_data.pop() raw_data.reverse() raw_data = normalize(raw_data) stream.stop_stream() stream.close() # save the audio data with tempfile.SpooledTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(raw_data) wav_fp.close() f.seek(0) if ACTIVE: return self.active_stt_engine.transcribe(f) else: return self.passive_stt_engine.transcribe(f) leaveRecord = True # exit stream.close()
def getRandomSong(format): dir = jasperpath.data('daniel', format) return os.path.join(dir, random.choice(os.listdir(dir)))
def handle(text, mic, profile): consumer_key = profile['twitter']["TW_CONSUMER_KEY"] consumer_secret = profile['twitter']["TW_CONSUMER_SECRET"] access_token = profile['twitter']["TW_ACCESS_TOKEN"] access_token_secret = profile['twitter']["TW_ACCESS_TOKEN_SECRET"] woeid = int(profile['twitter']["WOEID"]) try: auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = API(auth) myTwitterID = api.me().id except Exception as e: print e mic.say("Connection Error. Twitter service is offline.") return directMessages = api.direct_messages(count=1) latestRetweet = 0 latestMention = 0 latestDirectMessage = 0 try: directMessageIDFile = open(jasperpath.data('twitter', 'directMessageID.txt'),'r') directMessageID = directMessageIDFile.readline() latestDirectMessage = int(directMessageID) directMessageIDFile.close() except IOError: if len(directMessages) > 0: for directMessage in directMessages: latestDirectMessage = directMessage.id directMessageIDFile = open(jasperpath.data('twitter', 'directMessageID.txt'),'w') directMessageIDFile.write(str(latestDirectMessage)) directMessageIDFile.close() mentions = api.mentions_timeline(count=1) try: mentionIDFile = open(jasperpath.data('twitter', 'mentionIDFile.txt'),'r') latestMentionID = mentionIDFile.readline() latestMention = int(latestMentionID) mentionIDFile.close() except IOError: if len(mentions) > 0: mentionIDFile = open(jasperpath.data('twitter', 'mentionIDFile.txt'),'w') for mention in mentions: latestMention = mention.id mentionIDFile.write(str(latestMention)) mentionIDFile.close() retweets = api.retweets_of_me(count=1) try: retweetsIDFile = open(jasperpath.data('twitter', 'retweetsIDFile.txt'),'r') retweetsID = retweetsIDFile.readline() latestRetweet = int(retweetsID) retweetsIDFile.close() except IOError: if len(retweets) > 0: retweetsIDFile = open(jasperpath.data('twitter', 'retweetsIDFile.txt'),'w') for retweet in retweets: latestRetweet = retweet.id retweetsIDFile.write(str(latestRetweet)) retweetsIDFile.close() if bool(re.search(r'\bTWEET\b', text, re.IGNORECASE)): sendTweet(mic, api) if bool(re.search(r'\bNOTIFICATIONS\b', text, re.IGNORECASE)): getNotifications(mic,latestRetweet,latestMention,latestDirectMessage, api) if bool(re.search(r'\bTRENDING\b', text, re.IGNORECASE)): getWhatsTrending(mic, api, woeid) if bool(re.search(r'\bTWEETS\b', text, re.IGNORECASE)): getPublicTweets(mic, api)