class AudioTester(object):
    def __init__(self, samp_rate):
        print()  # Pad debug messages
        self.ww_recognizer = RecognizerLoop().create_mycroft_recognizer(
            samp_rate, 'en-us')
        self.listener = ResponsiveRecognizer(self.ww_recognizer)
        print()

    def test_audio(self, file_name):
        source = FileMockMicrophone(file_name)
        ee = pyee.EventEmitter()

        class SharedData:
            times_found = 0

        def on_found_wake_word():
            SharedData.times_found += 1

        ee.on('recognizer_loop:record_begin', on_found_wake_word)

        try:
            while True:
                self.listener.listen(source, ee)
        except EOFError:
            pass

        return SharedData.times_found
    def testMaxAudioWithBaselineShift(self):
        low_base = b"".join(["\x10\x00\x01\x00"] * 100)
        higher_base = b"".join(["\x01\x00\x00\x01"] * 100)

        source = MockSource()

        for i in range(100):
            source.stream.inject(low_base)

        source.stream.inject(higher_base)
        recognizer = ResponsiveRecognizer(None)

        sec_per_buffer = float(source.CHUNK) / (source.SAMPLE_RATE *
                                                source.SAMPLE_WIDTH)

        test_seconds = 30.0
        while test_seconds > 0:
            test_seconds -= sec_per_buffer
            data = source.stream.read(source.CHUNK)
            energy = recognizer.calc_energy(data, source.SAMPLE_WIDTH)
            recognizer.adjust_threshold(energy, sec_per_buffer)

        higher_base_energy = audioop.rms(higher_base, source.SAMPLE_WIDTH)
        # after recalibration (because of max audio length) new threshold
        # should be >= 1.5 * higher_base_energy
        delta_below_threshold = (
            recognizer.energy_threshold - higher_base_energy)
        min_delta = higher_base_energy * .5
        assert abs(delta_below_threshold - min_delta) < 1
class AudioTester(object):
    def __init__(self, samp_rate):
        print  # Pad debug messages
        self.ww_recognizer = LocalRecognizer(samp_rate, 'en-us')
        self.listener = ResponsiveRecognizer(self.ww_recognizer)
        print
        speech_logger.setLevel(100)  # Disables logging to clean output

    def test_audio(self, file_name):
        source = FileMockMicrophone(file_name)
        ee = pyee.EventEmitter()

        class SharedData:
            times_found = 0

        def on_found_wake_word():
            SharedData.times_found += 1

        ee.on('recognizer_loop:record_begin', on_found_wake_word)

        try:
            while True:
                self.listener.listen(source, ee)
        except EOFError:
            pass

        return SharedData.times_found
 def __init__(self, samp_rate):
     print  # Pad debug messages
     self.ww_recognizer = RecognizerLoop().create_mycroft_recognizer(
         samp_rate, 'en-us')
     self.listener = ResponsiveRecognizer(self.ww_recognizer)
     print
     speech_logger.setLevel(100)  # Disables logging to clean output
예제 #5
0
파일: listener.py 프로젝트: lakst/mykt
    def _load_config(self):
        """
            Load configuration parameters from configuration
        """
        config = Configuration.get()
        self.config_core = config
        self._config_hash = hash(str(config))
        self.lang = config.get('lang')
        self.config = config.get('listener')
        rate = self.config.get('sample_rate')
        device_index = self.config.get('device_index')

        self.microphone = MutableMicrophone(device_index,
                                            rate,
                                            mute=self.mute_calls > 0)
        # FIXME - channels are not been used
        self.microphone.CHANNELS = self.config.get('channels')
        self.wakeword_recognizer = self.create_wake_word_recognizer()
        # TODO - localization
        self.wakeup_recognizer = self.create_wakeup_recognizer()
        self.responsive_recognizer = ResponsiveRecognizer(
            self.wakeword_recognizer)
        self.state = RecognizerLoopState()
예제 #6
0
 def __init__(self, samp_rate):
     print()  # Pad debug messages
     self.ww_recognizer = RecognizerLoop().create_wake_word_recognizer()
     self.listener = ResponsiveRecognizer(self.ww_recognizer)
     self.listener.config['confirm_listening'] = False
     print()
예제 #7
0
 def __init__(self, samp_rate):
     print()  # Pad debug messages
     self.ww_recognizer = RecognizerLoop().create_mycroft_recognizer(
         samp_rate, 'en-us')
     self.listener = ResponsiveRecognizer(self.ww_recognizer)
     print()
예제 #8
0
 def __init__(self, samp_rate):
     self.ww_recognizer = LocalRecognizer(samp_rate, 'en-us')
     self.listener = ResponsiveRecognizer(self.ww_recognizer)
     speech_logger.setLevel(100)  # Disables logging to clean output