Esempio n. 1
0
def handle(self, text, mic, profile):
       GPIO.output(12,1)

       randomfile = random.choice(os.listdir(jasperpath.data("audio/com/")))
       rfile = jasperpath.data('audio/com/'+ randomfile)
       mic.speaker.play(rfile)
       GPIO.output(12,0)
Esempio n. 2
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds

            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 1024
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD == None:
            THRESHOLD = self.fetchThreshold()

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                            channels=1,
                            rate=RATE,
                            input=True,
                            frames_per_buffer=CHUNK)

        frames = []
        # increasing the range # results in longer pause after command generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, RATE / CHUNK * LISTEN_TIME):

            data = stream.read(CHUNK)
            frames.append(data)
            score = self.getScore(data)

            lastN.pop(0)
            lastN.append(score)

            average = sum(lastN) / float(len(lastN))

            # TODO: 0.8 should not be a MAGIC NUMBER!
            if average < THRESHOLD * 0.8:
                break

        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        # save the audio data
        stream.stop_stream()
        stream.close()

        with tempfile.SpooledTemporaryFile(mode='w+b') as f:
            wav_fp = wave.open(f, 'wb')
            wav_fp.setnchannels(1)
            wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
            wav_fp.setframerate(RATE)
            wav_fp.writeframes(''.join(frames))
            wav_fp.close()
            f.seek(0)
            mode = TranscriptionMode.MUSIC if MUSIC else TranscriptionMode.NORMAL
            transcribed = self.active_stt_engine.transcribe(f, mode=mode)
        return transcribed
    def getpic():

        randomfile = random.choice(os.listdir(jasperpath.data("img/")))
        rfile = jasperpath.data('img/'+ randomfile)
        ball = pygame.image.load(rfile)                      
        ball = pygame.transform.smoothscale(ball, imsize)
        global ball
def handle(self, text, mic, profile):
       GPIO.output(12,1)
       randomfile = random.choice(os.listdir(jasperpath.data("audio/bender/")))
       rfile = jasperpath.data('audio/bender/'+ randomfile)
       self.blitimg(image, size, black, x, y)
       mic.speaker.play(rfile)
       GPIO.output(12,0)
Esempio n. 5
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds

            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 1024
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                                  channels=1,
                                  rate=RATE,
                                  input=True,
                                  frames_per_buffer=CHUNK)

        frames = []
        # increasing the range # results in longer pause after command
        # generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, int(RATE / CHUNK * LISTEN_TIME)):

            data = stream.read(CHUNK)
            frames.append(data)
            score = self.getScore(data)

            lastN.pop(0)
            lastN.append(score)

            average = sum(lastN) / float(len(lastN))

            # TODO: 0.8 should not be a MAGIC NUMBER!
            if average < THRESHOLD * 0.8:
                break

        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        # save the audio data
        stream.stop_stream()
        stream.close()

        with tempfile.SpooledTemporaryFile(mode='w+b') as f:
            wav_fp = wave.open(f, 'wb')
            wav_fp.setnchannels(1)
            wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
            wav_fp.setframerate(RATE)
            wav_fp.writeframes(b''.join(frames))
            wav_fp.close()
            f.seek(0)
            return self.active_stt_engine.transcribe(f)
Esempio n. 6
0
    def setUp(self):
        self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
        self.time_clip = jasperpath.data('audio', 'time.wav')

        from stt import PocketSphinxSTT
        self.passive_stt_engine = PocketSphinxSTT.get_passive_instance()
        self.active_stt_engine = PocketSphinxSTT.get_active_instance()
Esempio n. 7
0
    def setUp(self):
        self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
        self.time_clip = jasperpath.data('audio', 'time.wav')

        from stt import PocketSphinxSTT
        self.passive_stt_engine = PocketSphinxSTT.get_passive_instance()
        self.active_stt_engine = PocketSphinxSTT.get_active_instance()
Esempio n. 8
0
def handle(self, text, mic, profile):
    GPIO.output(12, 1)

    randomfile = random.choice(os.listdir(jasperpath.data("audio/danger/")))
    rfile = jasperpath.data('audio/danger/' + randomfile)
    mic.speaker.play(rfile)
    GPIO.output(12, 0)
Esempio n. 9
0
    def __init__(
            self,
            lmd=jasperpath.config("languagemodel.lm"),
            dictd=jasperpath.config("dictionary.dic"),
            lmd_persona=jasperpath.data("languagemodel_persona.lm"),
            dictd_persona=jasperpath.data("dictionary_persona.dic"),
            lmd_music=None,
            dictd_music=None,
            hmm_dir="/usr/local/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"
    ):
        """
        Initiates the pocketsphinx instance.

        Arguments:
        speaker -- handles platform-independent audio output
        lmd -- filename of the full language model
        dictd -- filename of the full dictionary (.dic)
        lmd_persona -- filename of the 'Persona' language model (containing, e.g., 'Jasper')
        dictd_persona -- filename of the 'Persona' dictionary (.dic)
        """

        self._logger = logging.getLogger(__name__)

        # quirky bug where first import doesn't work
        try:
            import pocketsphinx as ps
        except:
            import pocketsphinx as ps

        self._logfiles = {}
        with tempfile.NamedTemporaryFile(prefix='psdecoder_music_',
                                         suffix='.log',
                                         delete=False) as f:
            self._logfiles[TranscriptionMode.MUSIC] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_keyword_',
                                         suffix='.log',
                                         delete=False) as f:
            self._logfiles[TranscriptionMode.KEYWORD] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_normal_',
                                         suffix='.log',
                                         delete=False) as f:
            self._logfiles[TranscriptionMode.NORMAL] = f.name

        self._decoders = {}
        if lmd_music and dictd_music:
            self._decoders[TranscriptionMode.MUSIC] = ps.Decoder(
                hmm=hmm_dir,
                lm=lmd_music,
                dict=dictd_music,
                logfn=self._logfiles[TranscriptionMode.MUSIC])
        self._decoders[TranscriptionMode.KEYWORD] = ps.Decoder(
            hmm=hmm_dir,
            lm=lmd_persona,
            dict=dictd_persona,
            logfn=self._logfiles[TranscriptionMode.KEYWORD])
        self._decoders[TranscriptionMode.NORMAL] = ps.Decoder(
            hmm=hmm_dir,
            lm=lmd,
            dict=dictd,
            logfn=self._logfiles[TranscriptionMode.NORMAL])
Esempio n. 10
0
    def make_a_request(self):
        self.speaker.play_wav_file(jasperpath.data('audio', 'beep_hi.wav'))
        audio_data = self.mic.listen(4)
        text = self.speech_recognizer.transcribe(audio_data)
        print("text : " + text)
        self.speaker.play_wav_file(jasperpath.data('audio', 'beep_lo.wav'))
	
        return text
def handle(self,text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    stuff = os.listdir(jasperpath.data('img/'))
    count = len(stuff)
    count +=1
    cam.start()
    pic = cam.get_image()
    pygame.image.save(pic,(jasperpath.data('img/pic%s.jpg' %count)))
    self.pygm.blitimg(("pic%s.jpg" %count), size, black, x, y)

    mic.say("%s" %message)
##    pic(self)
    cam.stop()
    time.sleep(5)

    fromaddr = "*****@*****.**"
    #toaddr = "*****@*****.**"
    #toaddr = "*****@*****.**"
    toaddr = "*****@*****.**"

    sub = "test"
    msg = MIMEMultipart()
    pasw = "Garfield76"
    msg['From'] = fromaddr
    msg['To'] = toaddr
    msg['Subject'] = "pic"
     
    body = "blah blah"
     
    msg.attach(MIMEText(body, 'plain'))
     
    filename = ("pic%s.jpg" %count)
    attachment = open((jasperpath.data('img/pic%s.jpg' %count)), "rb")
     
    part = MIMEBase('application', 'octet-stream')
    part.set_payload((attachment).read())
    encoders.encode_base64(part)
    part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
     
    msg.attach(part)
     
    server = smtplib.SMTP('smtp.gmail.com', 587)
    server.starttls()
    server.login(fromaddr, pasw)
    text = msg.as_string()
    server.sendmail(fromaddr, toaddr, text)
    server.quit()
Esempio n. 12
0
    def activeListen(self,
                     threshold=None,
                     timeout=12,
                     audio_file=None,
                     music=False):
        """
            Records until a second of silence or times out after 12 seconds
        """

        # user can request pre-recorded sound
        if audio_file:
            if not os.path.exists(audio_file):
                return None
            transcribe_file = audio_file
        else:
            # check if no threshold provided
            if not threshold:
                threshold = self.ar.get_threshold()

            self.speaker.play(jasperpath.data("audio", "beep_hi.wav"))

            frames = []

            # increasing the range # results in longer pause after command
            # generation
            lastN = [threshold * 1.2 for i in range(30)]

            for data in self.ar.record_audio_data(seconds):
                frames.append(data)
                score = self.ar.get_score(data)

                lastN.pop(0)
                lastN.append(score)

                average = sum(lastN) / float(len(lastN))

                # TODO: 0.8 should not be a MAGIC NUMBER!
                if average < threshold * 0.8:
                    break

            self.speaker.play(jasperpath.data("audio", "beep_lo.wav"))

            # Save recorded data as .wav file
            with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
                self.ar.save_audio(f, frames)
                transcribe_file = tmpfile_path = f.name

        # Transcribe the .wav file
        transcribed = self.active_stt_engine.transcribe(transcribe_file, music)

        if not audio_file:
            # Remove the temporary .wav file afterwards
            os.remove(tmpfile_path)

        return transcribed
Esempio n. 13
0
    def activeListen(self, threshold=None, timeout=12, audio_file=None, music=False):
        """
            Records until a second of silence or times out after 12 seconds
        """

        # user can request pre-recorded sound
        if audio_file:
            if not os.path.exists(audio_file):
                return None
            transcribe_file = audio_file
        else:
            # check if no threshold provided
            if not threshold:
                threshold = self.ar.get_threshold()

            self.speaker.play(jasperpath.data("audio", "beep_hi.wav"))

            frames = []

            # increasing the range # results in longer pause after command
            # generation
            lastN = [threshold * 1.2 for i in range(30)]

            for data in self.ar.record_audio_data(seconds):
                frames.append(data)
                score = self.ar.get_score(data)

                lastN.pop(0)
                lastN.append(score)

                average = sum(lastN) / float(len(lastN))

                # TODO: 0.8 should not be a MAGIC NUMBER!
                if average <  threshold * 0.8:
                    break

            self.speaker.play(jasperpath.data("audio", "beep_lo.wav"))

            # Save recorded data as .wav file
            with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
                self.ar.save_audio(f, frames)
                transcribe_file = tmpfile_path = f.name

        # Transcribe the .wav file
        transcribed = self.active_stt_engine.transcribe(transcribe_file, music)
        
        if not audio_file:
            # Remove the temporary .wav file afterwards
            os.remove(tmpfile_path)

        return transcribed
Esempio n. 14
0
    def __init__(self, lmd=jasperpath.config("languagemodel.lm"),
                 dictd=jasperpath.config("dictionary.dic"),
                 lmd_persona=jasperpath.data("languagemodel_persona.lm"),
                 dictd_persona=jasperpath.data("dictionary_persona.dic"),
                 lmd_music=None, dictd_music=None,
                 hmm_dir="/usr/local/share/pocketsphinx/model/hmm/en_US/" +
                         "hub4wsj_sc_8k"):
        """
        Initiates the pocketsphinx instance.

        Arguments:
        speaker -- handles platform-independent audio output
        lmd -- filename of the full language model
        dictd -- filename of the full dictionary (.dic)
        lmd_persona -- filename of the 'Persona' language model (containing,
                       e.g., 'Jasper')
        dictd_persona -- filename of the 'Persona' dictionary (.dic)
        """

        self._logger = logging.getLogger(__name__)

        # quirky bug where first import doesn't work
        try:
            import pocketsphinx as ps
        except:
            import pocketsphinx as ps

        self._logfiles = {}
        with tempfile.NamedTemporaryFile(prefix='psdecoder_music_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.MUSIC] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_keyword_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.KEYWORD] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_normal_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.NORMAL] = f.name

        self._decoders = {}
        if lmd_music and dictd_music:
            self._decoders[TranscriptionMode.MUSIC] = \
                ps.Decoder(hmm=hmm_dir, lm=lmd_music, dict=dictd_music,
                           logfn=self._logfiles[TranscriptionMode.MUSIC])
        self._decoders[TranscriptionMode.KEYWORD] = \
            ps.Decoder(hmm=hmm_dir, lm=lmd_persona, dict=dictd_persona,
                       logfn=self._logfiles[TranscriptionMode.KEYWORD])
        self._decoders[TranscriptionMode.NORMAL] = \
            ps.Decoder(hmm=hmm_dir, lm=lmd, dict=dictd,
                       logfn=self._logfiles[TranscriptionMode.NORMAL])
Esempio n. 15
0
def handle(text, mic, profile):
    """
        Reports that the user has unclear or unusable input.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    # Respond with random soundboard file
    responses = os.listdir(jasperpath.data('audio', 'soundboards'))
    response = random.choice(responses)

    mic.speaker.play(jasperpath.data('audio', 'soundboards', response))
Esempio n. 16
0
    def _compile_vocabulary(self, phrases):
        prefix = 'jasper'
        tmpdir = tempfile.mkdtemp()

        lexicon_file = jasperpath.data('julius-stt', 'VoxForge.tgz')
        lexicon_archive_member = 'VoxForge/VoxForgeDict'
        profile_path = jasperpath.config('profile.yml')
        if os.path.exists(profile_path):
            with open(profile_path, 'r') as f:
                profile = yaml.safe_load(f)
                if 'julius' in profile:
                    if 'lexicon' in profile['julius']:
                        lexicon_file = profile['julius']['lexicon']
                    if 'lexicon_archive_member' in profile['julius']:
                        lexicon_archive_member = \
                            profile['julius']['lexicon_archive_member']

        lexicon = JuliusVocabulary.VoxForgeLexicon(lexicon_file,
                                                   lexicon_archive_member)

        # Create grammar file
        tmp_grammar_file = os.path.join(tmpdir,
                                        os.extsep.join([prefix, 'grammar']))
        with open(tmp_grammar_file, 'w') as f:
            grammar = self._get_grammar(phrases)
            for definition in grammar.pop('S'):
                f.write("%s: %s\n" % ('S', ' '.join(definition)))
            for name, definitions in grammar.items():
                for definition in definitions:
                    f.write("%s: %s\n" % (name, ' '.join(definition)))

        # Create voca file
        tmp_voca_file = os.path.join(tmpdir, os.extsep.join([prefix, 'voca']))
        with open(tmp_voca_file, 'w') as f:
            for category, words in self._get_word_defs(lexicon,
                                                       phrases).items():
                f.write("%% %s\n" % category)
                for word, phoneme in words:
                    f.write("%s\t\t\t%s\n" % (word, phoneme))

        # mkdfa.pl
        olddir = os.getcwd()
        os.chdir(tmpdir)
        cmd = ['mkdfa.pl', str(prefix)]
        with tempfile.SpooledTemporaryFile() as out_f:
            subprocess.call(cmd, stdout=out_f, stderr=out_f)
            out_f.seek(0)
            for line in out_f.read().splitlines():
                line = line.strip()
                if line:
                    self._logger.debug(line)
        os.chdir(olddir)

        tmp_dfa_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dfa']))
        tmp_dict_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dict']))
        shutil.move(tmp_dfa_file, self.dfa_file)
        shutil.move(tmp_dict_file, self.dict_file)

        shutil.rmtree(tmpdir)
Esempio n. 17
0
    def _compile_vocabulary(self, phrases):
        prefix = 'jasper'
        tmpdir = tempfile.mkdtemp()

        lexicon_file = jasperpath.data('julius-stt', 'VoxForge.tgz')
        lexicon_archive_member = 'VoxForge/VoxForgeDict'
        profile_path = jasperpath.config('profile.yml')
        if os.path.exists(profile_path):
            with open(profile_path, 'r') as f:
                profile = yaml.safe_load(f)
                if 'julius' in profile:
                    if 'lexicon' in profile['julius']:
                        lexicon_file = profile['julius']['lexicon']
                    if 'lexicon_archive_member' in profile['julius']:
                        lexicon_archive_member = \
                            profile['julius']['lexicon_archive_member']

        lexicon = JuliusVocabulary.VoxForgeLexicon(lexicon_file,
                                                   lexicon_archive_member)

        # Create grammar file
        tmp_grammar_file = os.path.join(tmpdir,
                                        os.extsep.join([prefix, 'grammar']))
        with open(tmp_grammar_file, 'w') as f:
            grammar = self._get_grammar(phrases)
            for definition in grammar.pop('S'):
                f.write("%s: %s\n" % ('S', ' '.join(definition)))
            for name, definitions in grammar.items():
                for definition in definitions:
                    f.write("%s: %s\n" % (name, ' '.join(definition)))

        # Create voca file
        tmp_voca_file = os.path.join(tmpdir, os.extsep.join([prefix, 'voca']))
        with open(tmp_voca_file, 'w') as f:
            for category, words in self._get_word_defs(lexicon,
                                                       phrases).items():
                f.write("%% %s\n" % category)
                for word, phoneme in words:
                    f.write("%s\t\t\t%s\n" % (word, phoneme))

        # mkdfa.pl
        olddir = os.getcwd()
        os.chdir(tmpdir)
        cmd = ['mkdfa.pl', str(prefix)]
        with tempfile.SpooledTemporaryFile() as out_f:
            subprocess.call(cmd, stdout=out_f, stderr=out_f)
            out_f.seek(0)
            for line in out_f.read().splitlines():
                line = line.strip()
                if line:
                    self._logger.debug(line)
        os.chdir(olddir)

        tmp_dfa_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dfa']))
        tmp_dict_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dict']))
        shutil.move(tmp_dfa_file, self.dfa_file)
        shutil.move(tmp_dict_file, self.dict_file)

        shutil.rmtree(tmpdir)
Esempio n. 18
0
 def blitjimg(self, ix, iy, image, size, imgdeg):
     self.img = pygame.image.load(jasperpath.data('img/%s' %image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.img = pygame.transform.rotate(self.img, imgdeg)
     self.imgc = self.img.get_rect()
     self.imgc.center = (ix,iy)
     self.background.blit(self.img, self.imgc)
     self.screen.blit(self.background, (x,y))
Esempio n. 19
0
    def testJoke(self):
        from modules import Joke

        query = "Tell me a joke."
        inputs = ["Who's there?", "Random response"]
        outputs = self.runConversation(query, inputs, Joke)
        self.assertEqual(len(outputs), 3)
        allJokes = open(jasperpath.data('text', 'JOKES.txt'), 'r').read()
        self.assertTrue(outputs[2] in allJokes)
Esempio n. 20
0
 def say_if_cached(self, phrase):
     # Check if the file is cached
     cache_filename = jasperpath.data('cache', '%s.wav' %
                                      hashlib.md5(phrase).hexdigest())
     if os.path.isfile(cache_filename):
         self.play(cache_filename)
         return True
     else:
         return False
Esempio n. 21
0
    def testJoke(self):
        from modules import Joke

        query = "Tell me a joke."
        inputs = ["Who's there?", "Random response"]
        outputs = self.runConversation(query, inputs, Joke)
        self.assertEqual(len(outputs), 3)
        allJokes = open(jasperpath.data('text','JOKES.txt'), 'r').read()
        self.assertTrue(outputs[2] in allJokes)
Esempio n. 22
0
    def blitimg2(self, image, size,  x, y):

        self.img = pygame.image.load(jasperpath.data('img/%s' %image))
        self.img = pygame.transform.smoothscale((self.img), (size))
        self.imgc = self.img.get_rect()
        self.imgc.center = self.background.get_rect().center
        self.background.blit(self.img, self.imgc)
        self.screen.blit(self.background, (x,y))
        pygame.display.flip()
Esempio n. 23
0
def get_all_phrases():
    phrases = []
    with open(jasperpath.data('all_phrases'), mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return sorted(list(set(phrases)))
Esempio n. 24
0
    def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds
            Returns the first matching string or None
        """
        self._logger.info("#### Active Listen Start..... ##### ")
  
        self._logger.info("Play beep_hi.wav")        
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        if self.active_stt_engine.has_mic()==True:
            transcribed = self.active_stt_engine.transcribe(None)
        else:
            transcribed = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC)

        self._logger.info("Play beep_lo.wav")        
        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        self._logger.info("#### Active Listen End..... ##### ")  

        return transcribed
    def __init__(self,
                 username='',
                 password='',
                 whitelist=[],
                 camera='',
                 snd_capture='',
                 snd_playback=''):
        self.quit = False
        self.whitelist = whitelist
        callbacks = {
            'call_state_changed': self.call_state_changed,
        }

        # Configure the linphone core
        logging.basicConfig(level=logging.INFO)
        signal.signal(signal.SIGINT, self.signal_handler)
        # linphone.set_log_handler(self.log_handler)
        self.core = linphone.Core.new(callbacks, None, None)
        self.core.max_calls = 1
        self.core.echo_cancellation_enabled = False
        self.core.video_capture_enabled = True
        self.core.video_display_enabled = False
        self.core.stun_server = 'stun.linphone.org'
        self.core.ring = "../static/audio/telephone-ring-04.wav"
        self.core.ringback = jasperpath.data('audio', 'phone_ringing.wav')
        self.core.root_ca = "/usr/local/etc/openssl/cert.pem"
        # self.core.ice_enabled = True
        if len(camera):
            self.core.video_device = camera
        if len(snd_capture):
            self.core.capture_device = snd_capture
        if len(snd_playback):
            self.core.playback_device = snd_playback

        # Only enable PCMU and PCMA audio codecs
        # for codec in self.core.audio_codecs:
        #   if codec.mime_type == "PCMA" or codec.mime_type == "PCMU":
        #     self.core.enable_payload_type(codec, True)
        #   else:
        #     self.core.enable_payload_type(codec, False)

        # Only enable VP8 video codec
        for codec in self.core.video_codecs:
            if codec.mime_type == "VP8":
                self.core.enable_payload_type(codec, True)
            if codec.mime_type == "VP8":
                self.core.enable_payload_type(codec, True)
                # profile-level-id=42801F
        #   else:
        #     self.core.enable_payload_type(codec, False)

        self.configure_sip_account(username, password)
Esempio n. 26
0
    def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds
            Returns the first matching string or None
        """
        self._logger.info("#### Active Listen Start..... ##### ")

        self._logger.info("Play beep_hi.wav")
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        if self.active_stt_engine.has_mic() == True:
            transcribed = self.active_stt_engine.transcribe(None)
        else:
            transcribed = self.activeListenToAllOptions(
                THRESHOLD, LISTEN, MUSIC)

        self._logger.info("Play beep_lo.wav")
        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        self._logger.info("#### Active Listen End..... ##### ")

        return transcribed
Esempio n. 27
0
 def blitimg(self, image, size, color, x, y):
     global img
     global imgc
     self.background = pygame.Surface(screen.get_size())
     self. background = self.background.convert()
     self.background.fill(color)
     self.img = pygame.image.load(jasperpath.data('img/%s' %image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.imgc = self.img.get_rect()
     self.imgc.center = self.background.get_rect().center
     self.background.blit(self.img, self.imgc)
     self.screen.blit(self.background, (x,y))
     pygame.display.flip()
 def blitimg(self, image, size, color, x, y):
     global img
     global imgc
     self.background = pygame.Surface(screen.get_size())
     self.background = self.background.convert()
     self.background.fill(color)
     self.img = pygame.image.load(jasperpath.data('img/%s' % image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.imgc = self.img.get_rect()
     self.imgc.center = self.background.get_rect().center
     self.background.blit(self.img, self.imgc)
     self.screen.blit(self.background, (x, y))
     pygame.display.flip()
Esempio n. 29
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" %message)
    mic.speaker.play(jasperpath.data('audio/bender/', 'ststart.wav'))
Esempio n. 30
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" % message)
    mic.speaker.play(jasperpath.data('audio/bender/', 'ststart.wav'))
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" % message)
    self.pygm.on = False
    os.system(jasperpath.data('boot.sh &'))
Esempio n. 32
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" %message)
    self.pygm.on = False
    os.system(jasperpath.data('boot.sh &'))
Esempio n. 33
0
def get_keyword_phrases():
    """
    Gets the keyword phrases from the keywords file in the jasper data dir.

    Returns:
        A list of keyword phrases.
    """
    phrases = []

    with open(jasperpath.data('keyword_phrases'), mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return phrases
Esempio n. 34
0
def get_keyword_phrases():
    """
    Gets the keyword phrases from the keywords file in the jasper data dir.

    Returns:
        A list of keyword phrases.
    """
    phrases = []

    with open(jasperpath.data('keyword_phrases'), mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return phrases
Esempio n. 35
0
 def blittxtimgam(self, txt, txts, color, tx, ty, ix, iy, image, size, imgdeg, txtdeg, bcolor):
     self.img = pygame.image.load(jasperpath.data('img/%s' %image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.img = pygame.transform.rotate(self.img, imgdeg)
     self.imgc = self.img.get_rect()
     self.imgc.center = (ix,iy)
     self.background.fill(bcolor)
     self.background.blit(self.img, self.imgc)
     self.font = pygame.font.Font(None, txts)
     self.txt = self.font.render("%s" %txt, True, (color))
     self.txt = pygame.transform.rotate(self.txt, txtdeg)
     self.textx = self.txt.get_rect()
     self.textx.center = (tx, ty)
     self.background.blit(self.txt, self.textx)
     self.screen.blit(self.background, (0, 0))
     pygame.display.flip()
Esempio n. 36
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 3 seconds

            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 4096
        LISTEN_TIME = 3

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                                  channels=1,
                                  rate=RATE,
                                  input=True,
                                  frames_per_buffer=CHUNK)

        frames = []
        # increasing the range # results in longer pause after command
        # generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, RATE / CHUNK * LISTEN_TIME):

            try:
		data = stream.read(CHUNK)
            except IOError, e:
                print "silently ignoring IOError", e
            frames.append(data)
            score = self.getScore(data)

            lastN.pop(0)
            lastN.append(score)

            average = sum(lastN) / float(len(lastN))

            # TODO: 0.8 should not be a MAGIC NUMBER!
            if average < THRESHOLD * 0.8:
                break
Esempio n. 37
0
def get_keyword_phrases():
    """
    Gets the keyword phrases from the keywords file in the jasper data dir.

    Returns:
        A list of keyword phrases.
    """
    phrases = []
    keyword_phrases_file = os.path.join(jasperpath.data(), 'language', l10n.macsen_language, 'keyword_phrases') 
    print "Opening keyword_phrases file %s" % keyword_phrases_file
    with open(keyword_phrases_file, mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return phrases
Esempio n. 38
0
def handle(self, text, mic, profile):

##    mic.say("are you sure?")
##    response = mic.activeListen()
##    if yes(response):
    done  = False
    mic.speaker.play(jasperpath.data("audio/term/t4.wav"))

    while done == False:
            a = self.blitimg(image, size, black, x, y)
            a
            #br = self.img.get_rect()
            for event in pygame.event.get():
                    if event.type == pygame.QUIT:
                            done = True
                    if event.type == pygame.MOUSEBUTTONDOWN:
                              done = True
            pressed = pygame.key.get_pressed()
                                
            if pressed[pygame.K_DOWN]:
                done = True
            #time.sleep(0.01)
    self.blitimg(image2, size, black, x, y)
    mic.say("self destruct sequence Activated")

    for i in range(5):
        blink()

    time.sleep(1)
    for i in range(10, 0, -1):
        self.blittxt(i, 400, white, black)
        mic.say(str(i))
        time.sleep(1)
  
    self.blitimg(image3, size, black, x, y)
    mic.say("boom!")
    time.sleep(1)
    self.blitimg(image4, size, black, x, y)
    for i in range(5):
        blink2()
    
    GPIO.output(27,1)
Esempio n. 39
0
def handle(self, text, mic, profile):

    ##    mic.say("are you sure?")
    ##    response = mic.activeListen()
    ##    if yes(response):
    done = False
    mic.speaker.play(jasperpath.data("audio/term/t4.wav"))

    while done == False:
        a = self.blitimg(image, size, black, x, y)
        a
        #br = self.img.get_rect()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                done = True
            if event.type == pygame.MOUSEBUTTONDOWN:
                done = True
        pressed = pygame.key.get_pressed()

        if pressed[pygame.K_DOWN]:
            done = True
        #time.sleep(0.01)
    self.blitimg(image2, size, black, x, y)
    mic.say("self destruct sequence Activated")

    for i in range(5):
        blink()

    time.sleep(1)
    for i in range(10, 0, -1):
        self.blittxt(i, 400, white, black)
        mic.say(str(i))
        time.sleep(1)

    self.blitimg(image3, size, black, x, y)
    mic.say("boom!")
    time.sleep(1)
    self.blitimg(image4, size, black, x, y)
    for i in range(5):
        blink2()

    GPIO.output(27, 1)
def handle(self, text, mic, profile):
    import subprocess
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    messages = ["shutting down"]

    message = random.choice(messages)
    GPIO.output(12,1)
    self.blitimg(image, size, black, x, y)
    mic.speaker.play(jasperpath.data('audio/hal/cantalow.wav'))
    GPIO.output(12,0)

    mic.say(message)
    subprocess.call(["sudo", "shutdown", "now"])
Esempio n. 41
0
def getRandomJoke(filename=jasperpath.data('text','JOKES.txt')):
    jokeFile = open(filename, "r")
    jokes = []
    start = ""
    end = ""
    for line in jokeFile.readlines():
        line = line.replace("\n", "")

        if start == "":
            start = line
            continue

        if end == "":
            end = line
            continue

        jokes.append((start, end))
        start = ""
        end = ""

    jokes.append((start, end))
    joke = random.choice(jokes)
    return joke
Esempio n. 42
0
def getRandomJoke(filename=jasperpath.data('text', 'JOKES.txt')):
    jokeFile = open(filename, "r")
    jokes = []
    start = ""
    end = ""
    for line in jokeFile.readlines():
        line = line.replace("\n", "")

        if start == "":
            start = line
            continue

        if end == "":
            end = line
            continue

        jokes.append((start, end))
        start = ""
        end = ""

    jokes.append((start, end))
    joke = random.choice(jokes)
    return joke
Esempio n. 43
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    done = False

    self.pygm.background.fill(black)
    self.pygm.screen.blit(self.background, (0, 0))
    pygame.display.flip()
    mic.say("%s" %message)

    while done == False:
        for event2 in pygame.event.get():
            if event2.type == pygame.QUIT:
                done = True
            if event2.type == pygame.MOUSEBUTTONDOWN:
                done = True
        rf = random.choice(os.listdir(jasperpath.data("img/")))
        for i in range(30):
            for event2 in pygame.event.get():
                if event2.type == pygame.QUIT:
                    done = True
                if event2.type == pygame.MOUSEBUTTONDOWN:
                    done = True
            ix = random.randint(0,480)
            iy = random.randint(0,320)
            self.pygm.blitjimg(ix,iy,rf,size,0)
            pygame.display.flip()
            time.sleep(.5)
Esempio n. 44
0
    def setUp(self):
        self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
        self.time_clip = jasperpath.data('audio', 'time.wav')

        from stt import PocketSphinxSTT
        self.stt = PocketSphinxSTT()
Esempio n. 45
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds
            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 1024
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()
        self._logger.warning("playing hi")
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        #stream = self._audio.open(format=pyaudio.paInt16,
        #                          channels=1,
        #                          rate=RATE,
        #                          input=True,
        #                          frames_per_buffer=CHUNK)
	#
	#       frames = []
        # increasing the range # results in longer pause after command
        # generation
        #lastN = [THRESHOLD * 1.2 for i in range(30)]

        #for i in range(0, RATE / CHUNK * LISTEN_TIME):

        #    data = stream.read(CHUNK)
        #    frames.append(data)
        #    score = self.getScore(data)
	#
        #    lastN.pop(0)
        #    lastN.append(score)
	#
        #    average = sum(lastN) / float(len(lastN))
	
            # TODO: 0.8 should not be a MAGIC NUMBER!
        #    if average < THRESHOLD * 0.8:
        #        break
	wit.init()
	response = wit.voice_query_auto(self.token)
	wit.close()

        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))
	self._logger.warning("playing lo")

        # save the audio data
        #stream.stop_stream()
        #stream.close()

        #with tempfile.SpooledTemporaryFile(mode='w+b') as f:
        #    wav_fp = wave.open(f, 'wb')
        #    wav_fp.setnchannels(1)
        #    wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
        #    wav_fp.setframerate(RATE)
        #    wav_fp.writeframes(''.join(frames))
        #    wav_fp.close()
        #    f.seek(0)	
	#return json.dumps(self.active_stt_engine.transcribe(f))
	return response
Esempio n. 46
0
def zip2():
    zip2 = pygame.image.load(jasperpath.data('img/zip2.png'))
    zip2 = pygame.transform.smoothscale(zip2, (size))
    screen.fill(black)
    screen.blit(zip2, (x,y))
    pygame.display.flip()
Esempio n. 47
0
def nu():
    nu = pygame.image.load(jasperpath.data('img/nu.png'))
    nu = pygame.transform.smoothscale(nu, (size))
    screen.fill(black)
    screen.blit(nu, (x,y))
    pygame.display.flip()
Esempio n. 48
0
        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()

        # prepare recording stream
        try:
            stream = self._audio.open(format=pyaudio.paInt16,
                                      channels=1,
                                      rate=RATE,
                                      input=True,
                                      frames_per_buffer=CHUNK)
        except Exception, e:
            self._logger.error(e)
            return None

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        frames = []
        # increasing the range # results in longer pause after command
        # generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, RATE / CHUNK * LISTEN_TIME):
            try:
                data = stream.read(CHUNK)
                frames.append(data)
                score = self.getScore(data)

                lastN.pop(0)
                lastN.append(score)
Esempio n. 49
0
def handle(self, text, mic, profile):

    mic.say("are you sure?")
    response = mic.activeListen()
    if yes(response):
        done  = False
        mic.speaker.play(jasperpath.data("audio/term/t4.wav"))

        while done == False:
                self.blitimg(image, size, black, x, y)
                br = self.img.get_rect()
                for event in pygame.event.get():
                        if event.type == pygame.QUIT:
                                done = True
                        if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
                                pos = pygame.mouse.get_pos()
                                if br.collidepoint(pos):
                                  done = True
                pressed = pygame.key.get_pressed()
                                    
                if pressed[pygame.K_DOWN]:
                    done = True
                time.sleep(0.01)
        self.blitimg(image2, size, black, x, y)
        mic.say("self destruct sequence Activated")

        blink()
        blink()
        blink()
        blink()
        blink()

        time.sleep(1)
        
        blittxt(10, 120, white, black)
        mic.say("10")
        time.sleep(1)
        
        blittxt(9, 120, white, black)
        mic.say("9")
        time.sleep(1)
        
        blittxt(8, 120, white, black)
        mic.say("8")
        time.sleep(1)
        
        blittxt(7, 120, white, black)
        mic.say("7")
        time.sleep(1)

        blittxt(6, 120, white, black)
        mic.say("6")
        time.sleep(1)

        blittxt(5, 120, white, black)
        mic.say("5")
        time.sleep(1)

        blittxt(4, 120, white, black)
        mic.say("4")
        time.sleep(1)

        blittxt(3, 120, red , black)
        mic.say("3")
        time.sleep(1)

        blittxt(2, 120, red, black)
        mic.say("2")
        time.sleep(1)

        blittxt(1, 120, red, black)
        mic.say("1")
        time.sleep(1)
        
        self.blitimg(image3, size, black, x, y)
        mic.say("boom!")
        time.sleep(1)
        self.blitimg(image4, size, black, x, y)
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        GPIO.output(27,1)
    elif no(response):
        mic.say("pussy!")
Esempio n. 50
0
    def activeListenToAllOptions(self,
                                 THRESHOLD=None,
                                 LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds

            Returns a list of the matching options or None
        """

        cls = self.__class__
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()

        #wait_count = 0
        #while not self.phone.ptt_pressed() and wait_count < 120:
        #wait_count += 1
        #time.sleep(0.1)
        #if self.phone.on_hook():
        #raise phone.Hangup()

        #if not self.phone.ptt_pressed():
        #return ['',]

        cls.lock.acquire()

        stream = None
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        try:
            # prepare recording stream
            stream = self._audio.open(format=pyaudio.paInt16,
                                      channels=1,
                                      rate=self.RATE,
                                      input=True,
                                      input_device_index=self._audio_dev,
                                      frames_per_buffer=self.CHUNK)

            frames = []
            # increasing the range # results in longer pause after command
            # generation
            lastN = [THRESHOLD * 1.2 for i in range(int(30720 / self.CHUNK))]
            # States:
            # 0 -- before utterance
            # 1 -- during utterance
            # 2 -- after utterance
            state = 0
            utterances = 0
            post_utterance_frames = 0
            silence_frames_threshold = int(0.25 * self.RATE /
                                           self.CHUNK)  # 1/4 of a second
            self._dial_stack = []

            for i in range(0, self.RATE / self.CHUNK * LISTEN_TIME):
                if self.phone.on_hook():
                    raise phone.Hangup()

                # Only works with pyaudio 0.2.11 and up
                data = stream.read(self.CHUNK, False)
                # data = stream.read(CHUNK)
                frames.append(data)
                score = self.getScore(data)

                lastN.pop(0)
                lastN.append(score)

                average = sum(lastN) / float(len(lastN))

                # TODO: find appropriate threshold multiplier
                if average > THRESHOLD * 1.25:
                    if state != 1:
                        self._logger.debug('Begin utterance')
                        utterances += 1
                        state = 1
                elif state > 0 and average < THRESHOLD * 0.8:
                    if state != 2:
                        self._logger.debug('End utterance')
                    post_utterance_frames += 1
                    state = 2
                if state == 2 and post_utterance_frames >= silence_frames_threshold:
                    self._logger.debug('Enough post-utterance silence')
                    break
                elif self.phone.has_dial_stack() and \
                     self.phone.time_since_last_dial() > self.dial_timeout:
                    self._dial_stack = self.phone.dial_stack()
                    self._logger.debug('Dialed number timeout')
                    break

            self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))
            if self.phone.has_dial_stack():
                self._dial_stack += self.phone.dial_stack()
            if self._dial_stack:
                self._logger.info('Got dialed number {}'.format(''.join(
                    self._dial_stack)))

        # save the audio data
        finally:
            if stream is not None:
                stream.stop_stream()
                stream.close()

            cls.lock.release()

        with tempfile.NamedTemporaryFile(mode='w+b',
                                         suffix='.wav',
                                         delete=not self.keep_files) as f:
            wav_fp = wave.open(f, 'wb')
            wav_fp.setnchannels(1)
            wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
            wav_fp.setframerate(self.RATE)
            wav_fp.writeframes(''.join(frames))
            wav_fp.close()
            f.seek(0)
            if self.RATE == self.TARGET_RATE:
                self._logger.debug('No resample necessary')
                candidates = self.active_stt_engine.transcribe(f)
                if self._echo:
                    self.speaker.play(f.name)
                if self.keep_files:
                    self.last_file_recorded = f.name
            else:
                resampled_file = resample(f.name, self.TARGET_RATE)
                f_prime = open(resampled_file)
                candidates = self.active_stt_engine.transcribe(f_prime)
                f_prime.close()
                if self._echo:
                    self.speaker.play(resampled_file)
                if self.keep_files:
                    self.last_file_recorded = resampled_file
                    os.remove(f.name)
                else:
                    os.remove(resampled_file)

            candidates = ['' if c is None else c for c in candidates]
            if candidates:
                self._logger.info('Got the following possible transcriptions:')
                for c in candidates:
                    self._logger.info(c)
            # f.close()
            return candidates