Esempio n. 1
0
def main():
    """Get access token, play speech file, convert to text then text to synthesized speech"""
    logger.info('getting an access_token')
    oauth = AccessToken(url=config.URL_OAUTH, app_key=config.APP_KEY,
                        app_secret=config.APP_SECRET, scopes=config.APP_SCOPES,
                        grant_type=config.APP_GRANT_TYPE, new=False)
    #access_token = oauth.post()
    access_token = oauth.access_token

    logger.info('access_token= %s', access_token)

    if len(sys.argv) == 2:
        #file path in command line argument
        audio_file = sys.argv[1]
        audio_type = get_content_type(audio_file)
    else:
        #no arg so use a default audio file
        audio_file = config.AUDIO_FILE_DEFAULT
        audio_type = get_content_type(audio_file)

    #invoke a system command to play the source audio file
    os.system('aplay ' + audio_file)
    logger.info('speechToText with audio file= %s audio_type= %s', audio_file, audio_type)

    f = open(audio_file, 'rb')
    audio_data = f.read()
    f.close()

    stt = SpeechToText(audio_data=audio_data, content_type=audio_type, access_token=access_token,
                       headers=None, url=config.URL_SPEECH, producer=None)
    stt.post()

    logger.info('number of transcriptions= %s', stt.number_of_transcriptions)
    for index in range(stt.number_of_transcriptions):
        logger.info('transcription %s: %s', index, stt.transcription[index])

    #use the transcription as text input for textToSpeech API request
    say_this_text = 'You said. ' + stt.transcription[0]
    logger.info('say_this_text= %s', say_this_text)

    tts = TextToSpeech(text=say_this_text, accept='audio/x-wav', content_type='text/plain',
                       access_token=access_token, headers=None,
                       url=config.URL_TTS)

    audio_data = tts.post()
    tts_file = config.AUDIO_FILE_TTS
    f = open(tts_file, 'wb')
    f.write(audio_data)

    #invoke a system command to play the tts audio file
    os.system('aplay ' + tts_file)
Esempio n. 2
0
    def _get_formatted_response_text(self):
        response = self.last_response
        ct = get_content_type(response)
        txt = response.text
        try:
            if ct == 'application/json':
                j = response.json()
                txt = json.dumps(j, indent=2)
            elif ct in {'text/xml', 'application/xml'}:
                root = etree.fromstring(response.text)
                txt = etree.tostring(root,
                                     encoding='unicode',
                                     pretty_print=True)
            elif ct == 'text/html':  # TODO: Add css path filters
                root = html.fromstring(response.text)
                txt = etree.tostring(root,
                                     encoding='unicode',
                                     pretty_print=True)
            elif not response.text:
                txt = 'Empty Response'
        except Exception as e:
            log.warning('Failed to parse %s response: %s', ct, e)
            txt = 'Failed to parse response.'

        return txt
Esempio n. 3
0
    def update_webview(self, response: requests.Response):
        """Loads the webview, or show error message if webkit unavailable."""
        ct = get_content_type(response)
        if not (response.request.method == 'GET' and response.ok
                and ct == 'text/html'):
            # self.response_webview.try_close()
            return

        # TODO: Enable running of javascript
        self.response_webview.load_html(response.text)
Esempio n. 4
0
    def _highlight_syntax(self, txt: str):
        lang_id = get_language_for_mime_type(
            get_content_type(self.last_response))
        buf: GtkSource.Buffer = self.response_text.get_buffer()
        if lang_id == 'html':
            lang_id = 'xml'  # Full HTML highlighting is very slow; it freezes the UI.

        # Disable highlighting for files with really long lines
        if any((True for line in StringIO(txt) if len(line) > 5000)):
            lang_id = 'text'

        lang = self.lang_manager.get_language(lang_id)
        current_lang: GtkSource.Language = buf.get_language()
        if not current_lang or current_lang.get_id() != lang_id:
            buf.set_language(lang)
Esempio n. 5
0
    def set(self, instance, value, **kwargs):
        #just in case they have old file around
        self.field.unset(instance, **kwargs)
        
        putils = getToolByName(instance, 'plone_utils')
        portal = getToolByName(instance, "portal_url").getPortalObject()
        storage_directory = putils.normalizeString(getattr(instance, 'title', 'no-title'))

        ftp = FTP(flash_conf.ftp_address)
        ftp.login(flash_conf.ftp_user, flash_conf.ftp_password)
        ftp.cwd(flash_conf.ftp_media_directory)
        ftp.set_pasv(flash_conf.ftp_use_passive_mode)
        
        if storage_directory not in ftp.nlst():
            ftp.mkd(storage_directory)
        
        ftp.cwd(storage_directory)
        
        media_list = ftp.nlst()
        has_media_set = True
        folder = current_media = False
        filename = value.filename
        if hasattr(instance, 'streaming_url') and getattr(instance, 'streaming_url', None):
            folder, current_media = getattr(instance, 'streaming_url').split("/")
        else:
            has_media_set = False
         
        #check if already has a file set
        if has_media_set:
            #always just delete the old file...
            if folder == storage_directory and current_media in media_list:
                ftp.delete(current_media)
                media_list.remove(current_media)
            
        #check if video of same name exists on server already
        new_filename = filename
        count = 1
        while new_filename in media_list:
            new_filename = filename[:-4] + "-" + str(count) + "." + filename[-3:]
            count += 1
        
        ftp.storbinary("STOR %s" % new_filename, value)
        
        setattr(aq_base(instance), 'streaming_url', storage_directory + "/" + new_filename)
        setattr(aq_base(instance), 'original_content_type', get_content_type(value))
        instance._p_changed = 1 #this is what makes the change persistant
Esempio n. 6
0
    def _on_response_filter_changed(self, entry: Gtk.SearchEntry):
        filter_text = entry.get_text()
        if filter_text == '':
            self._set_response_text()

        ct = get_content_type(self.last_response)
        try:
            if ct == 'application/json':
                path_expr = jsonpath_rw.parse(filter_text)
                j = self.last_response.json()
                match_text = json.dumps(
                    [match.value for match in path_expr.find(j)],
                    indent=4) or 'No matches found'
                self.response_text.get_buffer().set_text(match_text)
            elif ct in {'text/xml', 'application/xml'}:
                root = etree.fromstring(self.last_response.text)
                matches = root.xpath(filter_text)
                matches_root = etree.Element('matches')
                for m in matches:
                    matches_root.append(m)

                matches_html = etree.tostring(matches_root,
                                              encoding='unicode',
                                              pretty_print=True)
                self.response_text.get_buffer().set_text(matches_html)
            elif ct == 'text/html':
                root = html.fromstring(self.last_response.text)
                matches = root.xpath(filter_text)
                matches_root = etree.Element('matches')
                for m in matches:
                    matches_root.append(m)

                matches_html = etree.tostring(matches_root,
                                              encoding='unicode',
                                              pretty_print=True)
                self.response_text.get_buffer().set_text(matches_html)
            else:
                log.warning(
                    'Got unexpected content type %s when filtering response.',
                    ct)
        except Exception as e:
            log.debug('Failed to filter response json %s', e)
Esempio n. 7
0
    def _populate_response_text_context_menu(self, view: Gtk.TextView,
                                             popup: Gtk.Widget):
        if type(popup) is not Gtk.Menu:
            return

        menu: Gtk.Menu = popup

        word_wrap_toggle: Gtk.MenuItem = Gtk.MenuItem().new_with_label(
            'Toggle word wrap')
        word_wrap_toggle.connect('activate', self._word_wrap_toggle_clicked)
        menu.append(word_wrap_toggle)

        ct = get_content_type(self.last_response)
        if self.last_response and ct in {
                'application/json', 'text/html', 'text/xml', 'application/xml'
        }:
            show_filter_toggle: Gtk.MenuItem = Gtk.MenuItem().new_with_label(
                'Show response filter')
            show_filter_toggle.connect('activate',
                                       self._show_filter_toggle_clicked)
            menu.append(show_filter_toggle)

        menu.show_all()
Esempio n. 8
0
def main():
    """Demo: Get access_token, capture audio, convert to text, act on text, play response."""
    try:
        logger.info('\n')
        #for demo we will control an on board PI led - visible feedback
        led = LEDThread()
        led.start()
        logger.info('LED control thread has started')

        oauth = AccessToken(url=config.URL_OAUTH, app_key=config.APP_KEY,
                                    app_secret=config.APP_SECRET, scopes=config.APP_SCOPES,
                                    grant_type=config.APP_GRANT_TYPE, new=False)

        access_token = oauth.access_token

        #simple loop on keyboard entry - replace with command line args, external stimuli, etc.
        while True:
            c = raw_input('--> enter any key to start audio capture (q to quit): ')
            logger.info('--> %s', c)
            if c == 'q':
                led.stop()
                exit()
            audio_stream = deque()
            audio_capture = None
            audio_type = CAPTURE_AUDIO_TYPE

            file_provided = False
            if len(sys.argv) == 2:
                #a file was provided on the command line
                file_provided = True
                #for illustration purpose using our capture thread with a file
                logger.info('audio_stream is a file')
                #audio_stream = 'static' + os.sep + 'homeBy6.wav'
                audio_stream = sys.argv[1]
                audio_type = get_content_type(audio_stream)

                audio_capture = AudioCapture(audio_stream)
                audio_capture.daemon = USE_DAEMON
                audio_capture.start()
                audio_capture.join()
                audio_stream = audio_capture.audio_stream
            else:
                logger.info('audio stream is audio input')
                #setup and start audio capture thread
                audio_capture = AudioCapture(audio_stream)
                audio_capture.daemon = USE_DAEMON
                audio_capture.start()
                logger.info('\n\nJ------------------------ > SPEAK NOW <-------------------')


            speechToText_thread = SpeechToTextThread(audio_stream=audio_stream,
                                                     access_token=access_token,
                                                     audio_type=audio_type, producer=audio_capture)
            speechToText_thread.daemon = USE_DAEMON
            speechToText_thread.start()

            #wait for speechToText_thread to complete
            speechToText_thread.join(timeout=240)

            transcription = speechToText_thread.api.transcription
            logger.info('transcription=  %s', transcription)

            interactive = transcription_processor(str(transcription), led)

            if interactive is None:
                logger.info('command unknown or silence')
                interactive = 'I do not understand the command'
            else:
                logger.info('command is known: %s', interactive)

            #convert command response text to speech and play it
            tts = TextToSpeech(interactive, 'audio/x-wav', 'text/plain', access_token, None,
                               config.URL_TTS)
            #tts.headers['X-Arg: VoiceName'] = 'Mike'
            #tts.headers['X-Arg: Tempo'] = 0
            audio_data = tts.post()
            audio_play(audio_output(), audio_data)
            if file_provided:
                raise Exception


    except Exception:
        if audio_capture.isAlive():
            audio_capture.stop()

        if speechToText_thread.isAlive():
            speechToText_thread.stop()

        if led.isAlive():
            led.stop()
Esempio n. 9
0
def add(file, name, rotate):
    print("Method", "facerec_service.add")

    file_type = utils.get_content_type(file)[0]
    file_extension = utils.get_content_type(file)[1]

    if file_type not in ['image', 'video']:
        return json.dumps({'result': 'error', 'message': 'Invalid File!'})

    datetime_format = datetime.now().strftime("%Y%m%d%H%M%S")
    filename = "add_{}_{}_1.{}".format(name, datetime_format,
                                       file_extension).lower()
    file.save(os.path.join(IMAGE_UPLOAD_DIR, filename))
    file = os.path.join(IMAGE_UPLOAD_DIR, filename)
    utils.create_directory(os.path.join(IMAGE_ORIGINAL_DIR, name))

    probe = ffmpeg.probe(file)
    video_stream = next(
        (stream
         for stream in probe['streams'] if stream['codec_type'] == 'video'),
        None)
    rotate = video_stream['tags'][
        'rotate'] if 'tags' in video_stream and 'rotate' in video_stream[
            'tags'] else rotate

    filename_list = []
    if file_type == 'image':
        image = cv2.imread(file)

        if rotate is not None:
            image = utils.rotate_cv2_image(image, rotate)

        filename = name + "/" + "{}_{}.{}".format(name, datetime_format,
                                                  file_extension).lower()
        filename_list.append(filename)
        utils.save_cv2_image(IMAGE_ORIGINAL_DIR, filename, image)

    else:
        cap = cv2.VideoCapture(file)
        frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        frame_count = ADD_FRAMES_PER_VIDEO if frame_count > ADD_FRAMES_PER_VIDEO else frame_count
        frame_no = 0
        count = 1

        while frame_no < frame_count:
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
            ret, frame = cap.read()
            if ret:
                if rotate is not None:
                    frame = utils.rotate_cv2_image(frame, rotate)

                filename = name + "/" + "{}_{}_{}.jpg".format(
                    name, datetime_format, count).lower()
                filename_list.append(filename)
                utils.save_cv2_image(IMAGE_ORIGINAL_DIR, filename, frame)
                count += 1

            frame_no += 1

    if OCI_STORAGE_SYNC:
        pool = multiprocessing.pool.ThreadPool(processes=3)
        pool.apply_async(oci_utils.upload_to_object_storage,
                         args=[
                             config, IMAGE_ORIGINAL_DIR,
                             OCI_STORAGE_BUCKET_NAME, filename_list
                         ])
        pool.close()

    return json.dumps({'result': 'success', 'message': 'File uploaded!'})