示例#1
0
 def move_front(self):
     if get_env_value('DEVICE') == 'PI':
         from lib.PiControls import PiControls
         pi = PiControls()
         pi.move_forward()
     else:
         print("sorry! you don't have pi")
示例#2
0
 def turn_on_flash(self):
     if get_env_value('DEVICE') == 'PI':
         from lib.PiControls import PiControls
         pi = PiControls()
         pi.flash_white()
     else:
         print("sorry! you don't have pi")
示例#3
0
    def __enter__(self):
        p = pyaudio.PyAudio()
        info = p.get_host_api_info_by_index(0)
        numdevices = info.get('deviceCount')
        for i in range(0, numdevices):
            if (p.get_device_info_by_host_api_device_index(
                    0, i).get('maxInputChannels')) > 0:
                print(
                    "Input Device id ", i, " - ",
                    p.get_device_info_by_host_api_device_index(0,
                                                               i).get('name'))

        self._audio_interface = pyaudio.PyAudio()
        self._audio_stream = self._audio_interface.open(
            format=pyaudio.paInt16,
            # The API currently only supports 1-channel (mono) audio
            # https://goo.gl/z757pE
            channels=1,
            rate=self._rate,
            input=True,
            frames_per_buffer=self._chunk,
            # Run the audio stream asynchronously to fill the buffer object.
            # This is necessary so that the input device's buffer doesn't
            # overflow while the calling thread makes network requests, etc.
            stream_callback=self._fill_buffer,
            input_device_index=get_env_value('AUDIO_DEVICE_INDEX2'))

        self.closed = False

        return self
示例#4
0
    def listen_speech(self):
        # obtain audio from the microphone
        for index, name in enumerate(sr.Microphone.list_microphone_names()):
            print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name))
        # obtain audio from the microphone
        r = sr.Recognizer()
        try:
            with sr.Microphone(device_index=get_env_value('AUDIO_DEVICE_INDEX1')) as source:
                r.adjust_for_ambient_noise(source, duration=1)
                print("Say something!")
                if get_env_value('DEVICE') == 'PI':
                    from lib.PiControls import PiControls
                    pi = PiControls()
                    pi.flash_red()
                else:
                    print("sorry! can't blink red you don't have pi")
                audio = r.listen(source,phrase_time_limit=2)
        except Exception as e:
            print(e)
        if get_env_value('DEVICE') == 'PI':
            from lib.PiControls import PiControls
            pi = PiControls()
            pi.flash_yellow()
        else:
            print("sorry! can't blink yellow you don't have pi")
        print("converting")

        # recognize speech using Google Speech Recognition
        try:
            # for testing purposes, we're just using the default API key
            # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
            # instead of `r.recognize_google(audio)`
            text = r.recognize_google(audio)
            print("Speech Recognition thinks you said " + text)
            self.ai.get_reply_as_speech(text)
        except sr.UnknownValueError:
            print("Speech Recognition could not understand audio")
        except sr.RequestError as e:
            print("Could not request results from Speech Recognition service; {0}".format(e))
        if get_env_value('DEVICE') == 'PI':
            from lib.PiControls import PiControls
            pi = PiControls()
            pi.flash_blue()
        else:
            print("sorry! can't blink blue you don't have pi")
示例#5
0
 def __del__(self):
     if get_env_value('DEVICE') == 'PI':
         from lib.PiControls import PiControls
         pi = PiControls()
         pi.no_led_flash()
     else:
         print("sorry! can't blink yellow you don't have pi")
     print(PrintColors.WARNING + "killing ngrok")
     self.process.kill()
示例#6
0
 def __start_avconv(self):
     stream_url = self.__get_stream_url()
     LiveStream.process = subprocess.Popen([
         'avconv', '-f', 'video4linux2', '-r', '10', '-i', '/dev/video0',
         '-f', 'pulse', '-ac', '1', '-i',
         get_env_value('AUDIO_DEVICE'), '-vcodec', 'h264', '-preset',
         'medium', '-acodec', 'mp3', '-ar', '44100', '-threads', '1',
         '-qscale', '3', '-b:a', '128k', '-b:v', '500k', '-minrate', '500k',
         '-g', '60', '-pix_fmt', 'yuv420p', '-f', 'flv', stream_url
     ],
                                           stdout=subprocess.PIPE)
示例#7
0
    def stream_speach(self):
        try:
            if get_env_value('DEVICE') == 'PI':
                from lib.PiControls import PiControls
                pi = PiControls()
                pi.flash_blue()
            else:
                print("sorry! can't blink blue you don't have pi")

            print('live speech recognition started')
            print(threading.enumerate())
            # See http://g.co/cloud/speech/docs/languages
            # for a list of supported languages..
            language_code = 'en-US'  # a BCP-47 language tag
            credentials = service_account.Credentials.from_service_account_file(
                'google-cloud.json')
            client = speech.SpeechClient(credentials=credentials)
            config = types.RecognitionConfig(
                encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
                sample_rate_hertz=RATE,
                language_code=language_code)
            streaming_config = types.StreamingRecognitionConfig(
                config=config,
                interim_results=True)

            with MicrophoneStream(RATE, CHUNK) as stream:
                audio_generator = stream.generator()
                requests = (types.StreamingRecognizeRequest(audio_content=content)
                            for content in audio_generator)

                responses = client.streaming_recognize(streaming_config, requests)

                # Now, put the transcription responses to use.
                self.listen_print_loop(responses)
        except:
            print('exception occured')
            self.stream_speach()