示例#1
0
    def __init__(self):
        self.input_engine = STTEngine(
            pause_threshold=SPEECH_RECOGNITION['pause_threshold'],
            energy_theshold=SPEECH_RECOGNITION['energy_threshold'],
            ambient_duration=SPEECH_RECOGNITION['ambient_duration'],
            dynamic_energy_threshold=SPEECH_RECOGNITION[
                'dynamic_energy_threshold'],
            sr=sr) if GENERAL_SETTINGS['user_voice_input'] else TTTEngine()

        self.console_manager = ConsoleManager(log_settings=ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=GENERAL_SETTINGS['response_in_speech'])
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=ANALYZER['args'],
            skills_=SKILLS,
            sensitivity=ANALYZER['sensitivity'])

        self.skill_controller = SkillController(
            settings_=GENERAL_SETTINGS,
            input_engine=self.input_engine,
            analyzer=self.skill_analyzer,
            control_skills=CONTROL_SKILLS,
        )
示例#2
0
    def __init__(self, settings_):
        self.settings = settings_
        self.input_engine = STTEngine(
            pause_threshold=self.settings.SPEECH_RECOGNITION.get(
                'pause_threshold'),
            energy_theshold=self.settings.SPEECH_RECOGNITION.get(
                'energy_threshold'),
            ambient_duration=self.settings.SPEECH_RECOGNITION.get(
                'ambient_duration'),
            dynamic_energy_threshold=self.settings.SPEECH_RECOGNITION.get(
                'dynamic_energy_threshold'),
            sr=sr) if self.settings.GENERAL_SETTINGS.get(
                'input_mode') == InputMode.VOICE.value else TTTEngine()

        self.console_manager = ConsoleManager(
            log_settings=self.settings.ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=self.settings.GENERAL_SETTINGS.get(
                'response_in_speech'))
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=self.settings.SKILL_ANALYZER.get('args'),
            skills_=SKILLS,
            sensitivity=self.settings.SKILL_ANALYZER.get('sensitivity'))
示例#3
0
class Processor:
    def __init__(self):
        self.input_engine = STTEngine(
            pause_threshold=SPEECH_RECOGNITION['pause_threshold'],
            energy_theshold=SPEECH_RECOGNITION['energy_threshold'],
            ambient_duration=SPEECH_RECOGNITION['ambient_duration'],
            dynamic_energy_threshold=SPEECH_RECOGNITION[
                'dynamic_energy_threshold'],
            sr=sr) if GENERAL_SETTINGS['user_voice_input'] else TTTEngine()

        self.console_manager = ConsoleManager(log_settings=ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=GENERAL_SETTINGS['response_in_speech'])
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=ANALYZER['args'],
            skills_=SKILLS,
            sensitivity=ANALYZER['sensitivity'])

        self.skill_controller = SkillController(
            settings_=GENERAL_SETTINGS,
            input_engine=self.input_engine,
            analyzer=self.skill_analyzer,
            control_skills=CONTROL_SKILLS,
        )

    def run(self):
        start_up()
        while True:
            self.skill_controller.wake_up_check()
            if self.skill_controller.is_assistant_enabled:  # Check if the assistant is waked up
                self._process()

    def _process(self):
        self.skill_controller.get_transcript()
        self.skill_controller.get_skills()
        if self.skill_controller.to_execute:
            response = self.response_creator.create_positive_response(
                self.skill_controller.latest_voice_transcript)
        else:
            response = self.response_creator.create_negative_response(
                self.skill_controller.latest_voice_transcript)

        self.output_engine.assistant_response(response)
        self.skill_controller.execute()
示例#4
0
class Processor:
    def __init__(self):
        self.input_engine = SPEECH_ENGINES[SPEECH_RECOGNITION['recognizer']]()

        self.console_manager = ConsoleManager(log_settings=ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=GENERAL_SETTINGS['response_in_speech'])
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=ANALYZER['args'],
            skills_=SKILLS,
            sensitivity=ANALYZER['sensitivity'])

        self.skill_controller = SkillController(
            settings_=GENERAL_SETTINGS,
            input_engine=self.input_engine,
            analyzer=self.skill_analyzer,
            control_skills=CONTROL_SKILLS,
        )

    def run(self):
        start_up()
        keyboard.add_hotkey(GENERAL_SETTINGS['wake_up_hotkey'], self._process)
        keyboard.wait()

    def _process(self):
        print('Assistant has woken up')
        self.skill_controller.get_transcript()
        self.skill_controller.get_skills()
        if self.skill_controller.to_execute:
            response = self.response_creator.create_positive_response(
                self.skill_controller.latest_voice_transcript)
        else:
            response = self.response_creator.create_negative_response(
                self.skill_controller.latest_voice_transcript)

        self.output_engine.assistant_response(response)
        self.skill_controller.execute()
示例#5
0
    def __init__(self):
        self.input_engine = SPEECH_ENGINES[SPEECH_RECOGNITION['recognizer']]()

        self.console_manager = ConsoleManager(log_settings=ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=GENERAL_SETTINGS['response_in_speech'])
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=ANALYZER['args'],
            skills_=SKILLS,
            sensitivity=ANALYZER['sensitivity'])

        self.skill_controller = SkillController(
            settings_=GENERAL_SETTINGS,
            input_engine=self.input_engine,
            analyzer=self.skill_analyzer,
            control_skills=CONTROL_SKILLS,
        )
示例#6
0
class AssistantSkill:
    first_activation = True
    console_manager = ConsoleManager(
                                     log_settings=ROOT_LOG_CONF,
                                    )
    tts_engine = TTSEngine(
                           console_manager=console_manager,
                           speech_response_enabled=GENERAL_SETTINGS['response_in_speech']
                           )

    @classmethod
    def response(cls, text):
        cls.tts_engine.assistant_response(text)

    @classmethod
    def _extract_tags(cls, voice_transcript, skill_tags):
        transcript_words = voice_transcript.split()
        return set(transcript_words).intersection(skill_tags)
示例#7
0
class Processor:
    def __init__(self, settings_):
        self.settings = settings_
        self.input_engine = STTEngine(
            pause_threshold=self.settings.SPEECH_RECOGNITION.get(
                'pause_threshold'),
            energy_theshold=self.settings.SPEECH_RECOGNITION.get(
                'energy_threshold'),
            ambient_duration=self.settings.SPEECH_RECOGNITION.get(
                'ambient_duration'),
            dynamic_energy_threshold=self.settings.SPEECH_RECOGNITION.get(
                'dynamic_energy_threshold'),
            sr=sr) if self.settings.GENERAL_SETTINGS.get(
                'input_mode') == InputMode.VOICE.value else TTTEngine()

        self.console_manager = ConsoleManager(
            log_settings=self.settings.ROOT_LOG_CONF, )
        self.output_engine = TTSEngine(
            console_manager=self.console_manager,
            speech_response_enabled=self.settings.GENERAL_SETTINGS.get(
                'response_in_speech'))
        self.response_creator = ResponseCreator()

        self.skill_analyzer = SkillAnalyzer(
            weight_measure=TfidfVectorizer,
            similarity_measure=cosine_similarity,
            args=self.settings.SKILL_ANALYZER.get('args'),
            skills_=SKILLS,
            sensitivity=self.settings.SKILL_ANALYZER.get('sensitivity'))

    def run(self):

        self._traped_until_assistant_is_enabled()

        transcript = self.input_engine.recognize_input()
        skill_to_execute = self._extract_skill(transcript)
        response = self.response_creator.create_positive_response(transcript) if skill_to_execute \
            else self.response_creator.create_negative_response(transcript)

        self.output_engine.assistant_response(response)
        self._execute_skill(skill_to_execute)

    def _execute_skill(self, skill):
        if skill:
            try:
                skill_method = skill.get('skill').get('skill')
                logging.debug('Executing skill {0}'.format(skill))
                skill_method(**skill)
            except Exception as e:
                logging.debug(
                    "Error with the execution of skill with message {0}".
                    format(e))

    def _traped_until_assistant_is_enabled(self):
        if self.settings.GENERAL_SETTINGS.get(
                'input_mode') == InputMode.VOICE.value:
            while not ExecutionState.is_ready_to_execute():
                voice_transcript = self.input_engine.recognize_input()
                transcript_words = voice_transcript.split()
                enable_tag = set(transcript_words).intersection(
                    CONTROL_SKILLS.get('enable_assistant').get('tags'))

                if bool(enable_tag):
                    CONTROL_SKILLS.get('enable_assistant').get('skill')()
                    ExecutionState.update()

    def _extract_skill(self, transcript):
        skill = self.skill_analyzer.extract(transcript)
        if skill:
            return {'voice_transcript': transcript, 'skill': skill}