예제 #1
0
    def test_topics(self):
        """Check get_ methods for topics"""
        siteId = "testSiteId"
        requestId = "testRequestId"
        intentName = "testIntent"
        wakewordId = "testWakeWord"

        # AudioFrame
        self.assertTrue(AudioFrame.is_topic(AudioFrame.topic(siteId=siteId)))
        self.assertEqual(
            AudioFrame.get_siteId(AudioFrame.topic(siteId=siteId)), siteId)

        # AudioPlayBytes
        self.assertTrue(
            AudioPlayBytes.is_topic(
                AudioPlayBytes.topic(siteId=siteId, requestId=requestId)))
        self.assertEqual(
            AudioPlayBytes.get_siteId(
                AudioPlayBytes.topic(siteId=siteId, requestId=requestId)),
            siteId,
        )
        self.assertEqual(
            AudioPlayBytes.get_requestId(
                AudioPlayBytes.topic(siteId=siteId, requestId=requestId)),
            requestId,
        )

        # AudioPlayFinished
        self.assertTrue(
            AudioPlayFinished.is_topic(AudioPlayFinished.topic(siteId=siteId)))
        self.assertEqual(
            AudioPlayFinished.get_siteId(
                AudioPlayFinished.topic(siteId=siteId)), siteId)

        # NluIntent
        self.assertTrue(
            NluIntent.is_topic(NluIntent.topic(intentName=intentName)))
        self.assertEqual(
            NluIntent.get_intentName(NluIntent.topic(intentName=intentName)),
            intentName)

        # HotwordDetected
        self.assertTrue(
            HotwordDetected.is_topic(
                HotwordDetected.topic(wakewordId=wakewordId)))
        self.assertEqual(
            HotwordDetected.get_wakewordId(
                HotwordDetected.topic(wakewordId=wakewordId)),
            wakewordId,
        )
    def __init__(
        self,
        client,
        transcriber: Transcriber,
        siteIds: typing.Optional[typing.List[str]] = None,
        enabled: bool = True,
        sample_rate: int = 16000,
        sample_width: int = 2,
        channels: int = 1,
        make_recorder: typing.Callable[[], VoiceCommandRecorder] = None,
    ):
        self.client = client
        self.transcriber = transcriber
        self.siteIds = siteIds or []
        self.enabled = enabled

        # Required audio format
        self.sample_rate = sample_rate
        self.sample_width = sample_width
        self.channels = channels

        # No timeout
        self.make_recorder = make_recorder or (
            lambda: WebRtcVadRecorder(max_seconds=None))

        # WAV buffers for each session
        self.session_recorders: typing.Dict[
            str, VoiceCommandRecorder] = defaultdict(VoiceCommandRecorder)

        # Topic to listen for WAV chunks on
        self.audioframe_topics: typing.List[str] = []
        for siteId in self.siteIds:
            self.audioframe_topics.append(AudioFrame.topic(siteId=siteId))

        self.first_audio: bool = True
예제 #3
0
    def on_connect(self, client, userdata, flags, rc):
        """Connected to MQTT broker."""
        try:
            topics = [HotwordToggleOn.topic(), HotwordToggleOff.topic()]

            if self.audioframe_topics:
                # Specific siteIds
                topics.extend(self.audioframe_topics)
            else:
                # All siteIds
                topics.append(AudioFrame.topic(siteId="#"))

            for topic in topics:
                self.client.subscribe(topic)
                _LOGGER.debug("Subscribed to %s", topic)
        except Exception:
            _LOGGER.exception("on_connect")
예제 #4
0
    def __init__(
        self,
        client,
        record_command: str,
        sample_rate: int,
        sample_width: int,
        channels: int,
        chunk_size: int = 2048,
        siteId: str = "default",
    ):
        self.client = client
        self.record_command = record_command
        self.sample_rate = sample_rate
        self.sample_width = sample_width
        self.channels = channels
        self.chunk_size = chunk_size
        self.siteId = siteId

        self.audioframe_topic: str = AudioFrame.topic(siteId=self.siteId)
    def __init__(
        self,
        client,
        transcriber_factory: typing.Callable[[None], Transcriber],
        siteIds: typing.Optional[typing.List[str]] = None,
        enabled: bool = True,
        sample_rate: int = 16000,
        sample_width: int = 2,
        channels: int = 1,
        recorder_factory: typing.Optional[
            typing.Callable[[None], VoiceCommandRecorder]
        ] = None,
        session_result_timeout: float = 1,
    ):
        self.client = client
        self.transcriber_factory = transcriber_factory
        self.siteIds = siteIds or []
        self.enabled = enabled

        # Seconds to wait for a result from transcriber thread
        self.session_result_timeout = session_result_timeout

        # Required audio format
        self.sample_rate = sample_rate
        self.sample_width = sample_width
        self.channels = channels

        # No timeout on silence detection
        def make_webrtcvad():
            return WebRtcVadRecorder(max_seconds=None)

        self.recorder_factory = recorder_factory or make_webrtcvad

        # WAV buffers for each session
        self.sessions: typing.Dict[str, TranscriberInfo] = {}
        self.free_transcribers: typing.List[TranscriberInfo] = []

        # Topic to listen for WAV chunks on
        self.audioframe_topics: typing.List[str] = []
        for siteId in self.siteIds:
            self.audioframe_topics.append(AudioFrame.topic(siteId=siteId))

        self.first_audio: bool = True
예제 #6
0
    def __init__(
        self,
        client,
        porcupine: typing.Any,
        model_ids: typing.List[str],
        wakeword_ids: typing.List[str],
        sensitivities: typing.List[float],
        siteIds: typing.Optional[typing.List[str]] = None,
        enabled: bool = True,
        sample_rate: int = 16000,
        sample_width: int = 2,
        channels: int = 1,
    ):
        self.client = client
        self.porcupine = porcupine
        self.wakeword_ids = wakeword_ids
        self.model_ids = model_ids
        self.sensitivities = sensitivities
        self.siteIds = siteIds or []
        self.enabled = enabled

        # Required audio format
        self.sample_rate = sample_rate
        self.sample_width = sample_width
        self.channels = channels

        self.chunk_size = self.porcupine.frame_length * 2
        self.chunk_format = "h" * self.porcupine.frame_length

        # Topics to listen for WAV chunks on
        self.audioframe_topics: typing.List[str] = []
        for siteId in self.siteIds:
            self.audioframe_topics.append(AudioFrame.topic(siteId=siteId))

        self.first_audio: bool = True

        self.audio_buffer = bytes()
예제 #7
0
def test_audio_frame():
    """Test AudioFrame."""
    assert AudioFrame.is_topic(AudioFrame.topic(site_id=site_id))
    assert AudioFrame.get_site_id(AudioFrame.topic(site_id=site_id)) == site_id