def testMaxAudioWithBaselineShift(self):
        low_base = b"".join(["\x10\x00\x01\x00"] * 100)
        higher_base = b"".join(["\x01\x00\x00\x01"] * 100)

        source = MockSource()
        for i in xrange(100):
            source.stream.inject(low_base)

        source.stream.inject(higher_base)
        recognizer = Recognizer()
        recognizer.listen(source)
        higher_base_energy = audioop.rms(higher_base, 2)
        # after recalibration (because of max audio length) new threshold should be >= 1.5 * higher_base_energy
        delta_below_threshold =  recognizer.energy_threshold - higher_base_energy
        min_delta = higher_base_energy * .5
        assert abs(delta_below_threshold - min_delta) < 1
示例#2
0
    def testMaxAudioWithBaselineShift(self):
        low_base = b"".join(["\x10\x00\x01\x00"] * 100)
        higher_base = b"".join(["\x01\x00\x00\x01"] * 100)

        source = MockSource()
        for i in xrange(100):
            source.stream.inject(low_base)

        source.stream.inject(higher_base)
        recognizer = Recognizer()
        recognizer.listen(source)
        higher_base_energy = audioop.rms(higher_base, 2)
        # after recalibration (because of max audio length) new threshold should be >= 1.5 * higher_base_energy
        delta_below_threshold = recognizer.energy_threshold - higher_base_energy
        min_delta = higher_base_energy * .5
        assert abs(delta_below_threshold - min_delta) < 1
示例#3
0
    def __init__(self,
                 channels=int(speech_config.get('channels')),
                 sample_rate=int(speech_config.get('sample_rate')),
                 device_index=None,
                 lang=core_config.get('lang')):
        pyee.EventEmitter.__init__(self)
        self.microphone = MutableMicrophone(sample_rate=sample_rate,
                                            device_index=device_index)

        # FIXME - channels are not been used
        self.microphone.CHANNELS = channels
        self.mycroft_recognizer = LocalRecognizer(sample_rate, lang)
        # TODO - localization
        self.wakeup_recognizer = LocalRecognizer(sample_rate, lang, "wake up")
        self.remote_recognizer = Recognizer()
        self.state = RecognizerLoopState()
示例#4
0
 def __init__(self,
              channels=int(speech_config.get('channels')),
              sample_rate=int(speech_config.get('sample_rate')),
              device_index=None,
              lang=core_config.get('lang')):
     pyee.EventEmitter.__init__(self)
     self.microphone = MutableMicrophone(sample_rate=sample_rate,
                                         device_index=device_index)
     self.microphone.CHANNELS = channels
     self.ww_recognizer = wakeword_recognizer.create_recognizer(
         samprate=sample_rate, lang=lang)
     self.wakeup_recognizer = wakeword_recognizer.create_recognizer(
         samprate=sample_rate, lang=lang,
         keyphrase="wake up mycroft")  # TODO - localization
     self.remote_recognizer = Recognizer()
     basedir = os.path.dirname(__file__)
     self.wakeup_words = read_stripped_lines(
         os.path.join(basedir, 'model', lang, 'WakeUpWord.voc'))
     self.wakeup_prefixes = read_stripped_lines(
         os.path.join(basedir, 'model', lang, 'PrefixWakeUp.voc'))
     self.state = RecognizerLoopState()