Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    with Board() as board:
        print('Press button to start recording.')
        board.button.wait_for_press()

        done = threading.Event()
        board.button.when_pressed = done.set

        def wait():
            start = time.monotonic()
            while not done.is_set():
                duration = time.monotonic() - start
                print('Recording: %.02f seconds [Press button to stop]' %
                      duration)
                time.sleep(0.5)

        form = AudioFormat(sample_rate_hz=16000,
                           num_channels=1,
                           bytes_per_sample=2)
        record_file(form, filename=args.filename, wait=wait, filetype='wav')
        print('Press button to play recorded sound.')
        board.button.wait_for_press()

        print('Playing...')
        play_wav(args.filename)
        print('Done.')
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    with Board() as board:
        done = threading.Event()

        form = AudioFormat(sample_rate_hz=16000,
                           num_channels=1,
                           bytes_per_sample=2)
        record_file(form,
                    filename=args.filename,
                    wait=interval_predicate,
                    filetype='wav')
        print('[EQUIBOX] Recorded Interval')
Пример #3
0
def voice_to_text(filename='recording.wav'):
    client = speech.SpeechClient()

    with Board() as board:
        # logging.debug('Press button to start recording.')
        # board.button.wait_for_press()

        # done = threading.Event()
        # board.button.when_pressed = done.set

        def wait():
            start = time.monotonic()
            duration = 0
            while duration < 7:  #not done.is_set():
                duration = time.monotonic() - start
                logging.debug('Recording: %.02f seconds' % duration)
                time.sleep(0.5)

        format = AudioFormat(sample_rate_hz=44100,
                             num_channels=1,
                             bytes_per_sample=2)
        record_file(format, filename=filename, wait=wait, filetype='wav')
        #logging.debug('Press button to play recorded sound.')
        #board.button.wait_for_press()

        # logging.debug('Playing...')
        # play_wav(filename)
        # logging.debug('Done.')

        # use speech to
        with io.open(filename, 'rb') as audio_file:
            content = audio_file.read()
            audio = types.RecognitionAudio(content=content)

        config = types.RecognitionConfig(
            encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
            language_code='en-US')
        response = client.recognize(config, audio)

        msg = ""
        for result in response.results:
            msg += result.alternatives[0].transcript
            logging.debug('Transcript: {}'.format(
                result.alternatives[0].transcript))
        return msg
Пример #4
0
from aiy.assistant import auth_helpers, device_helpers
from aiy.board import Led
from aiy.voice.audio import AudioFormat, Recorder, BytesPlayer

logger = logging.getLogger(__name__)

ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
END_OF_UTTERANCE = embedded_assistant_pb2.AssistResponse.END_OF_UTTERANCE
DIALOG_FOLLOW_ON = embedded_assistant_pb2.DialogStateOut.DIALOG_FOLLOW_ON
CLOSE_MICROPHONE = embedded_assistant_pb2.DialogStateOut.CLOSE_MICROPHONE
PLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
AUDIO_SAMPLE_RATE_HZ = 16000
AUDIO_FORMAT=AudioFormat(sample_rate_hz=AUDIO_SAMPLE_RATE_HZ,
                         num_channels=1,
                         bytes_per_sample=2)

def _normalize_audio_buffer(buf, volume_percentage, sample_width=2):
    assert sample_width == 2
    scale = math.pow(2, 1.0 * volume_percentage / 100) - 1
    arr = array.array('h', buf)
    for i in range(0, len(arr)):
        arr[i] = int(arr[i] * scale)
    return arr.tobytes()

# https://developers.google.com/assistant/sdk/reference/rpc/
class AssistantServiceClient:
    def __init__(self, language_code='en-US', volume_percentage=100):
        self._volume_percentage = volume_percentage  # Mutable state.
        self._conversation_state = None              # Mutable state.
Пример #5
0
import requests
import http.client, urllib.request
import json
import io
from scipy import spatial
import json
from collections import OrderedDict
import socket
import struct
import online_T2C_trans
import re

from aiy.board import Board
from aiy.voice.audio import AudioFormat, play_wav, record_file, Recorder

Lab = AudioFormat(sample_rate_hz=16000, num_channels=1, bytes_per_sample=2)

data_list = []  #原資料
cos_list = []  #計算cos用
symptom_list = []  #症狀陣列
sym_name = []  #症狀名稱(問診)
yesno = []  #是否
score = [0] * 9  #病症分數
patient = [0] * 39
sym_list = []  #目前症狀
sym_nolist = []  #not症狀
ill_list = []  #目前可能疾病
sym_count = 0  #症狀次數
ill_sym_num = [6, 4, 5, 10, 4, 8, 9, 6, 10]  #每個疾病的症狀數