Exemple #1
0
 def __init__(self, recognizer):
     self.recognizer = recognizer
     self.session_running = False
     self.mic = MicrophoneRecorder(
         chunkSize=KaldiConfig.chunk_length,
         rate=KaldiConfig.samp_freq,
     )
Exemple #2
0
def testTandem():
    from ProjectUtils.Microphone import MicrophoneRecorder, AUTO_DURATION_LIMIT

    microphone = MicrophoneRecorder()
    player = Player()

    print("Please speak")
    player.playAudio(
        microphone.recordAuto(mode=AUTO_DURATION_LIMIT, threshold=5))
Exemple #3
0
class PyAudioHelper:
    def __init__(self, recognizer):
        self.recognizer = recognizer
        self.session_running = False
        self.mic = MicrophoneRecorder(
            chunkSize=KaldiConfig.chunk_length,
            rate=KaldiConfig.samp_freq,
        )

    def streamCallback(self, in_data, frame_count, time_info, status):
        self.recognizer.sendChunk(in_data)
        return in_data, pyaudio.paContinue if self.session_running else pyaudio.paComplete

    def handleIntermediate(self, text):
        if text is None:
            return
        text = text.strip()
        if text == "":
            return
        print(text)
        sys.stdout.flush()

    def handleFinal(self, text):
        if text is None:
            return
        text = text.strip()
        if text == "":
            return
        print(f"— {text}")
        sys.stdout.flush()
        file = wave.open(f"./records/record{datetime.datetime.now()}.wav",
                         'wb')
        file.setnchannels(1)
        file.setsampwidth(self.mic.getSampleSize())
        file.setframerate(KaldiConfig.samp_freq)
        file.writeframes(bytearray(self.recognizer.record))
        file.close()
        # answer = self.talk(text)
        # print(f"— {answer.strip() if answer else '...'}\n")

    def startStream(self):
        self.session_running = True
        # start_stream микрофона запускает асинхронное прослушивание
        self.mic.startStream(callback=self.streamCallback)
        # recognizer.start запускает блокирующий цикл.
        # запуск самой распознавалки занимает время, к тому же занимает целый порт,
        # поэтому каждый запуск/остановка распознавалки должна соответствовать сессии целиком.
        # внутри одной сессии контекст обнуляется между предложениями (опр. продолжительными паузами)
        self.recognizer.start()

    def stopStream(self):
        self.session_running = False
        self.recognizer.stop()
        self.mic.stopStream()
Exemple #4
0
async def main():
    global voice

    try:
        print("Trying to connect...", end=' ', flush=True)

        async with aiohttp.ClientSession() as session, session.ws_connect(CFG.MGR_WS_URI) as mgr:
            print("connected", flush=True)

            await mgr.send_bytes(Message(type_=Message.SUBSCRIBE, data=Message.AUDIO_CHUNK).dumps())

            async for ws_msg in mgr:
                if ws_msg.type == aiohttp.WSMsgType.BINARY:
                    message: Message = Message.loads(ws_msg.data)

                    if message.type == Message.AUDIO_CHUNK:
                        chunk = array("h", message.data)
                        voice.extend(chunk)

                        if len(voice) // rate >= sec_threshold:
                            voice = MicrophoneRecorder.trim(voice, 500)
                            voice = MicrophoneRecorder.normalize(voice, 16384)

                            if len(voice) < len(chunk):
                                continue

                            voice = MicrophoneRecorder.convertToWAVFile(voice, 2, 16000)
                            name, _ = identifier.identifyViaFile(voice, unknownThreshold=0.25)

                            voice = array("h")
                            print(name)

                            output_message = Message(
                                data=name,
                                type_=Message.RECOGNIZED_FACE_ROI,
                                device_id=message.device_id
                            )

                            asyncio.get_event_loop().create_task(mgr.send_bytes(output_message.dumps()))

                elif ws_msg.type == aiohttp.WSMsgType.ERROR:
                    print('ws connection closed with exception %s' % mgr.exception())

    except ConnectionRefusedError:
        print("refused", flush=True)

    except KeyboardInterrupt:
        return

    except Exception as e:
        print(f"Unexpected exception: {e}", flush=True)

    await asyncio.sleep(CFG.RECONNECT_TIMEOUT)
    asyncio.get_event_loop().create_task(main())
	def __init__(self, address: tuple, chunkSize=4096):
		self.microphone = MicrophoneRecorder()

		self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		self.address = address

		self.chunkSize = chunkSize
Exemple #6
0
    def __init__(self,
                 sdkMode=False,
                 key=AzureCredentials.SUBSCRIPTION_KEY,
                 region=AzureCredentials.SERVICE_REGION,
                 language=AzureConfig.LANG_RUS,
                 responseFormat=AzureConfig.RESPONSE_DETAILED):

        super().__init__(language=language)

        self.key = key
        self.region = region

        self.format = responseFormat

        self.recognizer = self._initRecognizer(
            self.key, self.region) if sdkMode else None
        self.microphone = MicrophoneRecorder()

        self.REST = {
            "base": AzureConfig.REST_BASE_URL,
            "path": AzureConfig.REST_PATH
        }
	def __init__(self):
		self.microphone = MicrophoneRecorder()