Exemplo n.º 1
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds

            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 1024
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD == None:
            THRESHOLD = self.fetchThreshold()

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                            channels=1,
                            rate=RATE,
                            input=True,
                            frames_per_buffer=CHUNK)

        frames = []
        # increasing the range # results in longer pause after command generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, RATE / CHUNK * LISTEN_TIME):

            data = stream.read(CHUNK)
            frames.append(data)
            score = self.getScore(data)

            lastN.pop(0)
            lastN.append(score)

            average = sum(lastN) / float(len(lastN))

            # TODO: 0.8 should not be a MAGIC NUMBER!
            if average < THRESHOLD * 0.8:
                break

        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        # save the audio data
        stream.stop_stream()
        stream.close()

        with tempfile.SpooledTemporaryFile(mode='w+b') as f:
            wav_fp = wave.open(f, 'wb')
            wav_fp.setnchannels(1)
            wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
            wav_fp.setframerate(RATE)
            wav_fp.writeframes(''.join(frames))
            wav_fp.close()
            f.seek(0)
            mode = TranscriptionMode.MUSIC if MUSIC else TranscriptionMode.NORMAL
            transcribed = self.active_stt_engine.transcribe(f, mode=mode)
        return transcribed
Exemplo n.º 2
0
    def getpic():

        randomfile = random.choice(os.listdir(jasperpath.data("img/")))
        rfile = jasperpath.data('img/'+ randomfile)
        ball = pygame.image.load(rfile)                      
        ball = pygame.transform.smoothscale(ball, imsize)
        global ball
Exemplo n.º 3
0
def handle(self, text, mic, profile):
       GPIO.output(12,1)
       randomfile = random.choice(os.listdir(jasperpath.data("audio/bender/")))
       rfile = jasperpath.data('audio/bender/'+ randomfile)
       self.blitimg(image, size, black, x, y)
       mic.speaker.play(rfile)
       GPIO.output(12,0)
Exemplo n.º 4
0
def handle(self, text, mic, profile):
       GPIO.output(12,1)

       randomfile = random.choice(os.listdir(jasperpath.data("audio/com/")))
       rfile = jasperpath.data('audio/com/'+ randomfile)
       mic.speaker.play(rfile)
       GPIO.output(12,0)
Exemplo n.º 5
0
    def setUp(self):
        self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
        self.time_clip = jasperpath.data('audio', 'time.wav')

        from stt import PocketSphinxSTT
        self.passive_stt_engine = PocketSphinxSTT.get_passive_instance()
        self.active_stt_engine = PocketSphinxSTT.get_active_instance()
Exemplo n.º 6
0
    def make_a_request(self):
        self.speaker.play_wav_file(jasperpath.data('audio', 'beep_hi.wav'))
        audio_data = self.mic.listen(4)
        text = self.speech_recognizer.transcribe(audio_data)
        print("text : " + text)
        self.speaker.play_wav_file(jasperpath.data('audio', 'beep_lo.wav'))
	
        return text
Exemplo n.º 7
0
def handle(self,text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    stuff = os.listdir(jasperpath.data('img/'))
    count = len(stuff)
    count +=1
    cam.start()
    pic = cam.get_image()
    pygame.image.save(pic,(jasperpath.data('img/pic%s.jpg' %count)))
    self.pygm.blitimg(("pic%s.jpg" %count), size, black, x, y)

    mic.say("%s" %message)
##    pic(self)
    cam.stop()
    time.sleep(5)

    fromaddr = "[email protected]"
    #toaddr = "[email protected]"
    #toaddr = "[email protected]"
    toaddr = "[email protected]"

    sub = "test"
    msg = MIMEMultipart()
    pasw = "Garfield76"
    msg['From'] = fromaddr
    msg['To'] = toaddr
    msg['Subject'] = "pic"
     
    body = "blah blah"
     
    msg.attach(MIMEText(body, 'plain'))
     
    filename = ("pic%s.jpg" %count)
    attachment = open((jasperpath.data('img/pic%s.jpg' %count)), "rb")
     
    part = MIMEBase('application', 'octet-stream')
    part.set_payload((attachment).read())
    encoders.encode_base64(part)
    part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
     
    msg.attach(part)
     
    server = smtplib.SMTP('smtp.gmail.com', 587)
    server.starttls()
    server.login(fromaddr, pasw)
    text = msg.as_string()
    server.sendmail(fromaddr, toaddr, text)
    server.quit()
Exemplo n.º 8
0
    def activeListen(self, threshold=None, timeout=12, audio_file=None, music=False):
        """
            Records until a second of silence or times out after 12 seconds
        """

        # user can request pre-recorded sound
        if audio_file:
            if not os.path.exists(audio_file):
                return None
            transcribe_file = audio_file
        else:
            # check if no threshold provided
            if not threshold:
                threshold = self.ar.get_threshold()

            self.speaker.play(jasperpath.data("audio", "beep_hi.wav"))

            frames = []

            # increasing the range # results in longer pause after command
            # generation
            lastN = [threshold * 1.2 for i in range(30)]

            for data in self.ar.record_audio_data(seconds):
                frames.append(data)
                score = self.ar.get_score(data)

                lastN.pop(0)
                lastN.append(score)

                average = sum(lastN) / float(len(lastN))

                # TODO: 0.8 should not be a MAGIC NUMBER!
                if average <  threshold * 0.8:
                    break

            self.speaker.play(jasperpath.data("audio", "beep_lo.wav"))

            # Save recorded data as .wav file
            with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
                self.ar.save_audio(f, frames)
                transcribe_file = tmpfile_path = f.name

        # Transcribe the .wav file
        transcribed = self.active_stt_engine.transcribe(transcribe_file, music)
        
        if not audio_file:
            # Remove the temporary .wav file afterwards
            os.remove(tmpfile_path)

        return transcribed
Exemplo n.º 9
0
    def __init__(self, lmd=jasperpath.config("languagemodel.lm"),
                 dictd=jasperpath.config("dictionary.dic"),
                 lmd_persona=jasperpath.data("languagemodel_persona.lm"),
                 dictd_persona=jasperpath.data("dictionary_persona.dic"),
                 lmd_music=None, dictd_music=None,
                 hmm_dir="/usr/local/share/pocketsphinx/model/hmm/en_US/" +
                         "hub4wsj_sc_8k"):
        """
        Initiates the pocketsphinx instance.

        Arguments:
        speaker -- handles platform-independent audio output
        lmd -- filename of the full language model
        dictd -- filename of the full dictionary (.dic)
        lmd_persona -- filename of the 'Persona' language model (containing,
                       e.g., 'Jasper')
        dictd_persona -- filename of the 'Persona' dictionary (.dic)
        """

        self._logger = logging.getLogger(__name__)

        # quirky bug where first import doesn't work
        try:
            import pocketsphinx as ps
        except:
            import pocketsphinx as ps

        self._logfiles = {}
        with tempfile.NamedTemporaryFile(prefix='psdecoder_music_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.MUSIC] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_keyword_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.KEYWORD] = f.name
        with tempfile.NamedTemporaryFile(prefix='psdecoder_normal_',
                                         suffix='.log', delete=False) as f:
            self._logfiles[TranscriptionMode.NORMAL] = f.name

        self._decoders = {}
        if lmd_music and dictd_music:
            self._decoders[TranscriptionMode.MUSIC] = \
                ps.Decoder(hmm=hmm_dir, lm=lmd_music, dict=dictd_music,
                           logfn=self._logfiles[TranscriptionMode.MUSIC])
        self._decoders[TranscriptionMode.KEYWORD] = \
            ps.Decoder(hmm=hmm_dir, lm=lmd_persona, dict=dictd_persona,
                       logfn=self._logfiles[TranscriptionMode.KEYWORD])
        self._decoders[TranscriptionMode.NORMAL] = \
            ps.Decoder(hmm=hmm_dir, lm=lmd, dict=dictd,
                       logfn=self._logfiles[TranscriptionMode.NORMAL])
Exemplo n.º 10
0
def handle(text, mic, profile):
    """
        Reports that the user has unclear or unusable input.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    # Respond with random soundboard file
    responses = os.listdir(jasperpath.data('audio', 'soundboards'))
    response = random.choice(responses)

    mic.speaker.play(jasperpath.data('audio', 'soundboards', response))
Exemplo n.º 11
0
    def _compile_vocabulary(self, phrases):
        prefix = 'jasper'
        tmpdir = tempfile.mkdtemp()

        lexicon_file = jasperpath.data('julius-stt', 'VoxForge.tgz')
        lexicon_archive_member = 'VoxForge/VoxForgeDict'
        profile_path = jasperpath.config('profile.yml')
        if os.path.exists(profile_path):
            with open(profile_path, 'r') as f:
                profile = yaml.safe_load(f)
                if 'julius' in profile:
                    if 'lexicon' in profile['julius']:
                        lexicon_file = profile['julius']['lexicon']
                    if 'lexicon_archive_member' in profile['julius']:
                        lexicon_archive_member = \
                            profile['julius']['lexicon_archive_member']

        lexicon = JuliusVocabulary.VoxForgeLexicon(lexicon_file,
                                                   lexicon_archive_member)

        # Create grammar file
        tmp_grammar_file = os.path.join(tmpdir,
                                        os.extsep.join([prefix, 'grammar']))
        with open(tmp_grammar_file, 'w') as f:
            grammar = self._get_grammar(phrases)
            for definition in grammar.pop('S'):
                f.write("%s: %s\n" % ('S', ' '.join(definition)))
            for name, definitions in grammar.items():
                for definition in definitions:
                    f.write("%s: %s\n" % (name, ' '.join(definition)))

        # Create voca file
        tmp_voca_file = os.path.join(tmpdir, os.extsep.join([prefix, 'voca']))
        with open(tmp_voca_file, 'w') as f:
            for category, words in self._get_word_defs(lexicon,
                                                       phrases).items():
                f.write("%% %s\n" % category)
                for word, phoneme in words:
                    f.write("%s\t\t\t%s\n" % (word, phoneme))

        # mkdfa.pl
        olddir = os.getcwd()
        os.chdir(tmpdir)
        cmd = ['mkdfa.pl', str(prefix)]
        with tempfile.SpooledTemporaryFile() as out_f:
            subprocess.call(cmd, stdout=out_f, stderr=out_f)
            out_f.seek(0)
            for line in out_f.read().splitlines():
                line = line.strip()
                if line:
                    self._logger.debug(line)
        os.chdir(olddir)

        tmp_dfa_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dfa']))
        tmp_dict_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dict']))
        shutil.move(tmp_dfa_file, self.dfa_file)
        shutil.move(tmp_dict_file, self.dict_file)

        shutil.rmtree(tmpdir)
Exemplo n.º 12
0
 def say_if_cached(self, phrase):
     # Check if the file is cached
     cache_filename = jasperpath.data('cache', '%s.wav' %
                                      hashlib.md5(phrase).hexdigest())
     if os.path.isfile(cache_filename):
         self.play(cache_filename)
         return True
     else:
         return False
Exemplo n.º 13
0
    def testJoke(self):
        from modules import Joke

        query = "Tell me a joke."
        inputs = ["Who's there?", "Random response"]
        outputs = self.runConversation(query, inputs, Joke)
        self.assertEqual(len(outputs), 3)
        allJokes = open(jasperpath.data('text','JOKES.txt'), 'r').read()
        self.assertTrue(outputs[2] in allJokes)
Exemplo n.º 14
0
    def blitimg2(self, image, size,  x, y):

        self.img = pygame.image.load(jasperpath.data('img/%s' %image))
        self.img = pygame.transform.smoothscale((self.img), (size))
        self.imgc = self.img.get_rect()
        self.imgc.center = self.background.get_rect().center
        self.background.blit(self.img, self.imgc)
        self.screen.blit(self.background, (x,y))
        pygame.display.flip()
Exemplo n.º 15
0
    def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds
            Returns the first matching string or None
        """
        self._logger.info("#### Active Listen Start..... ##### ")
  
        self._logger.info("Play beep_hi.wav")        
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        if self.active_stt_engine.has_mic()==True:
            transcribed = self.active_stt_engine.transcribe(None)
        else:
            transcribed = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC)

        self._logger.info("Play beep_lo.wav")        
        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))

        self._logger.info("#### Active Listen End..... ##### ")  

        return transcribed
Exemplo n.º 16
0
 def blitimg(self, image, size, color, x, y):
     global img
     global imgc
     self.background = pygame.Surface(screen.get_size())
     self. background = self.background.convert()
     self.background.fill(color)
     self.img = pygame.image.load(jasperpath.data('img/%s' %image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.imgc = self.img.get_rect()
     self.imgc.center = self.background.get_rect().center
     self.background.blit(self.img, self.imgc)
     self.screen.blit(self.background, (x,y))
     pygame.display.flip()
Exemplo n.º 17
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" %message)
    mic.speaker.play(jasperpath.data('audio/bender/', 'ststart.wav'))
Exemplo n.º 18
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """

    mic.say("%s" %message)
    self.pygm.on = False
    os.system(jasperpath.data('boot.sh &'))
Exemplo n.º 19
0
 def blittxtimgam(self, txt, txts, color, tx, ty, ix, iy, image, size, imgdeg, txtdeg, bcolor):
     self.img = pygame.image.load(jasperpath.data('img/%s' %image))
     self.img = pygame.transform.smoothscale((self.img), (size))
     self.img = pygame.transform.rotate(self.img, imgdeg)
     self.imgc = self.img.get_rect()
     self.imgc.center = (ix,iy)
     self.background.fill(bcolor)
     self.background.blit(self.img, self.imgc)
     self.font = pygame.font.Font(None, txts)
     self.txt = self.font.render("%s" %txt, True, (color))
     self.txt = pygame.transform.rotate(self.txt, txtdeg)
     self.textx = self.txt.get_rect()
     self.textx.center = (tx, ty)
     self.background.blit(self.txt, self.textx)
     self.screen.blit(self.background, (0, 0))
     pygame.display.flip()
Exemplo n.º 20
0
def get_keyword_phrases():
    """
    Gets the keyword phrases from the keywords file in the jasper data dir.

    Returns:
        A list of keyword phrases.
    """
    phrases = []

    with open(jasperpath.data('keyword_phrases'), mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return phrases
Exemplo n.º 21
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 3 seconds

            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 4096
        LISTEN_TIME = 3

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()

        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                                  channels=1,
                                  rate=RATE,
                                  input=True,
                                  frames_per_buffer=CHUNK)

        frames = []
        # increasing the range # results in longer pause after command
        # generation
        lastN = [THRESHOLD * 1.2 for i in range(30)]

        for i in range(0, RATE / CHUNK * LISTEN_TIME):

            try:
		data = stream.read(CHUNK)
            except IOError, e:
                print "silently ignoring IOError", e
            frames.append(data)
            score = self.getScore(data)

            lastN.pop(0)
            lastN.append(score)

            average = sum(lastN) / float(len(lastN))

            # TODO: 0.8 should not be a MAGIC NUMBER!
            if average < THRESHOLD * 0.8:
                break
Exemplo n.º 22
0
def get_keyword_phrases():
    """
    Gets the keyword phrases from the keywords file in the jasper data dir.

    Returns:
        A list of keyword phrases.
    """
    phrases = []
    keyword_phrases_file = os.path.join(jasperpath.data(), 'language', l10n.macsen_language, 'keyword_phrases') 
    print "Opening keyword_phrases file %s" % keyword_phrases_file
    with open(keyword_phrases_file, mode="r") as f:
        for line in f:
            phrase = line.strip()
            if phrase:
                phrases.append(phrase)

    return phrases
Exemplo n.º 23
0
def handle(self, text, mic, profile):

##    mic.say("are you sure?")
##    response = mic.activeListen()
##    if yes(response):
    done  = False
    mic.speaker.play(jasperpath.data("audio/term/t4.wav"))

    while done == False:
            a = self.blitimg(image, size, black, x, y)
            a
            #br = self.img.get_rect()
            for event in pygame.event.get():
                    if event.type == pygame.QUIT:
                            done = True
                    if event.type == pygame.MOUSEBUTTONDOWN:
                              done = True
            pressed = pygame.key.get_pressed()
                                
            if pressed[pygame.K_DOWN]:
                done = True
            #time.sleep(0.01)
    self.blitimg(image2, size, black, x, y)
    mic.say("self destruct sequence Activated")

    for i in range(5):
        blink()

    time.sleep(1)
    for i in range(10, 0, -1):
        self.blittxt(i, 400, white, black)
        mic.say(str(i))
        time.sleep(1)
  
    self.blitimg(image3, size, black, x, y)
    mic.say("boom!")
    time.sleep(1)
    self.blitimg(image4, size, black, x, y)
    for i in range(5):
        blink2()
    
    GPIO.output(27,1)
Exemplo n.º 24
0
def handle(self, text, mic, profile):
    import subprocess
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    messages = ["shutting down"]

    message = random.choice(messages)
    GPIO.output(12,1)
    self.blitimg(image, size, black, x, y)
    mic.speaker.play(jasperpath.data('audio/hal/cantalow.wav'))
    GPIO.output(12,0)

    mic.say(message)
    subprocess.call(["sudo", "shutdown", "now"])
Exemplo n.º 25
0
def getRandomJoke(filename=jasperpath.data('text','JOKES.txt')):
    jokeFile = open(filename, "r")
    jokes = []
    start = ""
    end = ""
    for line in jokeFile.readlines():
        line = line.replace("\n", "")

        if start == "":
            start = line
            continue

        if end == "":
            end = line
            continue

        jokes.append((start, end))
        start = ""
        end = ""

    jokes.append((start, end))
    joke = random.choice(jokes)
    return joke
Exemplo n.º 26
0
def handle(self, text, mic, profile):
    """
        Responds to user-input, typically speech text, by relaying the
        meaning of life.

        Arguments:
        text -- user-input, typically transcribed speech
        mic -- used to interact with the user (for both input and output)
        profile -- contains information related to the user (e.g., phone
                   number)
    """
    done = False

    self.pygm.background.fill(black)
    self.pygm.screen.blit(self.background, (0, 0))
    pygame.display.flip()
    mic.say("%s" %message)

    while done == False:
        for event2 in pygame.event.get():
            if event2.type == pygame.QUIT:
                done = True
            if event2.type == pygame.MOUSEBUTTONDOWN:
                done = True
        rf = random.choice(os.listdir(jasperpath.data("img/")))
        for i in range(30):
            for event2 in pygame.event.get():
                if event2.type == pygame.QUIT:
                    done = True
                if event2.type == pygame.MOUSEBUTTONDOWN:
                    done = True
            ix = random.randint(0,480)
            iy = random.randint(0,320)
            self.pygm.blitjimg(ix,iy,rf,size,0)
            pygame.display.flip()
            time.sleep(.5)
Exemplo n.º 27
0
def handle(self, text, mic, profile):

    mic.say("are you sure?")
    response = mic.activeListen()
    if yes(response):
        done  = False
        mic.speaker.play(jasperpath.data("audio/term/t4.wav"))

        while done == False:
                self.blitimg(image, size, black, x, y)
                br = self.img.get_rect()
                for event in pygame.event.get():
                        if event.type == pygame.QUIT:
                                done = True
                        if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
                                pos = pygame.mouse.get_pos()
                                if br.collidepoint(pos):
                                  done = True
                pressed = pygame.key.get_pressed()
                                    
                if pressed[pygame.K_DOWN]:
                    done = True
                time.sleep(0.01)
        self.blitimg(image2, size, black, x, y)
        mic.say("self destruct sequence Activated")

        blink()
        blink()
        blink()
        blink()
        blink()

        time.sleep(1)
        
        blittxt(10, 120, white, black)
        mic.say("10")
        time.sleep(1)
        
        blittxt(9, 120, white, black)
        mic.say("9")
        time.sleep(1)
        
        blittxt(8, 120, white, black)
        mic.say("8")
        time.sleep(1)
        
        blittxt(7, 120, white, black)
        mic.say("7")
        time.sleep(1)

        blittxt(6, 120, white, black)
        mic.say("6")
        time.sleep(1)

        blittxt(5, 120, white, black)
        mic.say("5")
        time.sleep(1)

        blittxt(4, 120, white, black)
        mic.say("4")
        time.sleep(1)

        blittxt(3, 120, red , black)
        mic.say("3")
        time.sleep(1)

        blittxt(2, 120, red, black)
        mic.say("2")
        time.sleep(1)

        blittxt(1, 120, red, black)
        mic.say("1")
        time.sleep(1)
        
        self.blitimg(image3, size, black, x, y)
        mic.say("boom!")
        time.sleep(1)
        self.blitimg(image4, size, black, x, y)
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        blink2()
        GPIO.output(27,1)
    elif no(response):
        mic.say("pussy!")
Exemplo n.º 28
0
    def setUp(self):
        self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
        self.time_clip = jasperpath.data('audio', 'time.wav')

        from stt import PocketSphinxSTT
        self.stt = PocketSphinxSTT()
Exemplo n.º 29
0
    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds
            Returns a list of the matching options or None
        """

        RATE = 16000
        CHUNK = 1024
        LISTEN_TIME = 12

        # check if no threshold provided
        if THRESHOLD is None:
            THRESHOLD = self.fetchThreshold()
        self._logger.warning("playing hi")
        self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))

        # prepare recording stream
        #stream = self._audio.open(format=pyaudio.paInt16,
        #                          channels=1,
        #                          rate=RATE,
        #                          input=True,
        #                          frames_per_buffer=CHUNK)
	#
	#       frames = []
        # increasing the range # results in longer pause after command
        # generation
        #lastN = [THRESHOLD * 1.2 for i in range(30)]

        #for i in range(0, RATE / CHUNK * LISTEN_TIME):

        #    data = stream.read(CHUNK)
        #    frames.append(data)
        #    score = self.getScore(data)
	#
        #    lastN.pop(0)
        #    lastN.append(score)
	#
        #    average = sum(lastN) / float(len(lastN))
	
            # TODO: 0.8 should not be a MAGIC NUMBER!
        #    if average < THRESHOLD * 0.8:
        #        break
	wit.init()
	response = wit.voice_query_auto(self.token)
	wit.close()

        self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))
	self._logger.warning("playing lo")

        # save the audio data
        #stream.stop_stream()
        #stream.close()

        #with tempfile.SpooledTemporaryFile(mode='w+b') as f:
        #    wav_fp = wave.open(f, 'wb')
        #    wav_fp.setnchannels(1)
        #    wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
        #    wav_fp.setframerate(RATE)
        #    wav_fp.writeframes(''.join(frames))
        #    wav_fp.close()
        #    f.seek(0)	
	#return json.dumps(self.active_stt_engine.transcribe(f))
	return response
Exemplo n.º 30
0
def zip2():
    zip2 = pygame.image.load(jasperpath.data('img/zip2.png'))
    zip2 = pygame.transform.smoothscale(zip2, (size))
    screen.fill(black)
    screen.blit(zip2, (x,y))
    pygame.display.flip()