def _generate_audio_file(self): """ Generic method used as a Callback in TTSModule - must provided the audio file and write it on the disk .. raises:: FailToLoadSoundFile """ options = { 'v': '-v' + self.voice_and_variant, 's': '-s' + self.speed, 'a': '-a' + self.amplitude, 'p': '-p' + self.pitch, 'w': '-w' + self.file_path } final_command = [ self.espeak_exec_path, options['v'], options['s'], options['a'], options['p'], options['w'], self.words ] # generate the file with eSpeak subprocess.call(final_command, stderr=sys.stderr) Utils.print_info( "Text to Speech Engine: audio file generated by eSpeak")
def load_events(self): """ For each received synapse that have an event as signal, we add a new job scheduled to launch the synapse """ for synapse in self.list_synapses_with_event: for signal in synapse.signals: # We need to loop here again if the synapse has multiple event signals. # if the signal is an event we add it to the task list. if signal.name == "event": my_cron = CronTrigger( year=self.get_parameter_from_dict( "year", signal.parameters), month=self.get_parameter_from_dict( "month", signal.parameters), day=self.get_parameter_from_dict( "day", signal.parameters), week=self.get_parameter_from_dict( "week", signal.parameters), day_of_week=self.get_parameter_from_dict( "day_of_week", signal.parameters), hour=self.get_parameter_from_dict( "hour", signal.parameters), minute=self.get_parameter_from_dict( "minute", signal.parameters), second=self.get_parameter_from_dict( "second", signal.parameters), ) Utils.print_info( "Add synapse name \"%s\" to the scheduler: %s" % (synapse.name, my_cron)) self.scheduler.add_job(self.run_synapse_by_name, my_cron, args=[synapse.name])
def _generate_audio_file(self): """ Generic method used as a Callback in TTSModule - must provided the audio file and write it on the disk .. raises:: FailToLoadSoundFile """ # Prepare payload payload = self.get_payload() # getting the audio r = requests.get(TTS_URL, params=payload, stream=True, timeout=TTS_TIMEOUT_SEC) content_type = r.headers['Content-Type'] logger.debug("Googletts : Trying to get url: %s response code: %s and content-type: %s", r.url, r.status_code, content_type) # Verify the response status code and the response content type if r.status_code != requests.codes.ok or content_type != TTS_CONTENT_TYPE: raise FailToLoadSoundFile("Googletts : Fail while trying to remotely access the audio file") # OK we get the audio we can write the sound file FileManager.write_in_file(self.file_path, r.content) Utils.print_info("Text to Speech Engine: audio file generated by Google")
def __init__(self, **kwargs): super(SignalModule, self).__init__() # get the child who called the class self.signal_name = self.__class__.__name__ Utils.print_info('Init Signal :' + self.signal_name) self.brain = BrainLoader().brain
def _generate_audio_file(self): """ Generic method used as a Callback in TTSModule """ # Prepare payload payload = self.get_payload() headers = {"Content-Type": "application/json", "Accept": "audio/wav"} url = "%s/synthesize?voice=%s" % (TTS_URL, self.voice) response = requests.post(url, auth=HTTPBasicAuth(self.username, self.password), headers=headers, json=payload) logger.debug("[Watson TTS] status code: %s" % response.status_code) if response.status_code == 200: # OK we get the audio we can write the sound file FileManager.write_in_file(self.file_path, response.content) Utils.print_info( "Text to Speech Engine: audio file generated by Watson") else: logger.debug("[Watson TTS] Fail to get audio. Header: %s" % response.headers)
def __init__(self, **kwargs): super(Event, self).__init__(**kwargs) Thread.__init__(self, name=Event) Utils.print_info('[Event] Starting manager') self.scheduler = BackgroundScheduler() self.list_synapses_with_event = list( super(Event, self).get_list_synapse()) self.load_events()
def __init__(self, **kwargs): super(Mqtt_subscriber, self).__init__(**kwargs) Thread.__init__(self, name=Mqtt_subscriber) Utils.print_info('[Mqtt_subscriber] Starting manager') # variables self.list_synapses_with_mqtt = list( super(Mqtt_subscriber, self).get_list_synapse()) self.broker_ip = None self.topic = None self.json_message = False
def run_synapse_by_name(synapse_name): """ This method will run the synapse """ Utils.print_info("[Event] triggered, running synapse: %s" % synapse_name) # get a brain brain_loader = BrainLoader() brain = brain_loader.brain SynapseLauncher.start_synapse_by_list_name([synapse_name], brain=brain)
def signal_handler(signal, frame): """ Handles keyboard signal input Ctrl+C :param signal: signal handler :param frame: execution frame """ Utils.print_warning("\nBrain.ai service stopped\n") sys.exit(0)
def sphinx_callback(self, recognizer, audio): """ called from the background thread """ try: captured_audio = recognizer.recognize_sphinx( audio, language=self.language, keyword_entries=self.keyword_entries, grammar=self.grammar_file) Utils.print_info( "Speech to Text Engine: CMU Sphinx thinks you said %s" % captured_audio) self._analyse_audio(captured_audio) except sr.UnknownValueError: Utils.print_warning( "Speech to Text Engine: CMU Sphinx could not understand audio") # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except sr.RequestError as e: Utils.print_danger( "Speech to Text Engine: could not request results from CMU Sphinx; {0}" .format(e)) # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except AssertionError: Utils.print_warning( "Speech to Text Engine: no audio caught from microphone") self._analyse_audio(audio_to_text=None)
def bing_callback(self, recognizer, audio): """ called from the background thread """ try: captured_audio = recognizer.recognize_bing(audio, key=self.key, language=self.language, show_all=self.show_all) Utils.print_info("Speech to Text Engine: Bing thinks you said %s" % captured_audio) self._analyse_audio(captured_audio) except sr.UnknownValueError: Utils.print_warning( "Speech to Text Engine: Bing could not understand audio") # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except sr.RequestError as e: Utils.print_danger( "Speech to Text Engine: could not request results from Bing; {0}" .format(e)) # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except AssertionError: Utils.print_warning( "Speech to Text Engine: no audio caught from microphone") self._analyse_audio(audio_to_text=None)
def wit_callback(self, recognizer, audio): try: captured_audio = recognizer.recognize_wit(audio, key=self.key, show_all=self.show_all) Utils.print_info( "Speech to Text Engine: Wit.ai thinks you said %s" % captured_audio) self._analyse_audio(captured_audio) except sr.UnknownValueError: Utils.print_warning( "Speech to Text Engine: Wit.ai could not understand audio") # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except sr.RequestError as e: Utils.print_danger( "Speech to Text Engine: could not request results from Wit.ai; {0}" .format(e)) # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except AssertionError: Utils.print_warning( "Speech to Text Engine: no audio caught from microphone") self._analyse_audio(audio_to_text=None)
def apiai_callback(self, recognizer, audio): """ called from the background thread :param recognizer: :param audio: :return: """ try: captured_audio = recognizer.recognize_api(audio, client_access_token=self.key, language=self.language, session_id=self.session_id, show_all=self.show_all) Utils.print_info("Speech to Text Engine: Api.ai thinks you said %s" % captured_audio) self._analyse_audio(captured_audio) except sr.UnknownValueError as e: Utils.print_warning("Speech to Text Engine: Api.ai could not understand audio; {0}".format(e)) # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except sr.RequestError as e: Utils.print_danger("Speech to Text Engine: could not request results from Api.ai; {0}".format(e)) # callback anyway, we need to listen again for a new order self._analyse_audio(audio_to_text=None) except AssertionError: Utils.print_warning("Speech to Text Engine: no audio caught from microphone") self._analyse_audio(audio_to_text=None)
def start_rest_api(settings, brain): """ Start the Rest API if asked in the user settings """ # run the api if the user want it if settings.rest_api.active: Utils.print_info("REST API: listening on port %s" % settings.rest_api.port) app = Flask(__name__) flask_api = FlaskAPI( app=app, port=settings.rest_api.port, brain=brain, allowed_cors_origin=settings.rest_api.allowed_cors_origin) flask_api.daemon = True flask_api.start() else: Utils.print_info("REST API: inactive")
def _generate_audio_file(self): """ Generic method used as a Callback in TTSModule - must provided the audio file and write it on the disk .. raises:: FailToLoadSoundFile """ if self.path is None: # we try to get the path from the env self.path = self._get_pico_path() # if still None, we set a default value if self.path is None: self.path = "/usr/bin/pico2wave" # pico2wave needs that the file path ends with .wav tmp_path = self.file_path + ".wav" pico2wave_options = ["-l=%s" % self.language, "-w=%s" % tmp_path] final_command = list() final_command.extend([self.path]) final_command.extend(pico2wave_options) final_command.append(self.words) logger.debug("[Pico2wave] command: %s" % final_command) # generate the file with pico2wav subprocess.call(final_command) # convert samplerate if self.samplerate is not None: tfm = sox.Transformer() tfm.rate(samplerate=self.samplerate) tfm.build(str(tmp_path), str(tmp_path) + "tmp_name.wav") os.rename(str(tmp_path) + "tmp_name.wav", tmp_path) # remove the extension .wav os.rename(tmp_path, self.file_path) Utils.print_info( "Text to Speech Engine: audio file generated by Pico2wave")
def get_player(settings): """ Instantiate a Player :param settings: setting object :type settings: Settings :return: the Player instance :rtype: Player """ player_instance = None for player in settings.players: if player.name == settings.default_player_name: logger.debug( "PlayerLauncher: Start player %s with parameters: %s" % (player.name, player.parameters)) player_instance = Utils.get_dynamic_class_instantiation( package_name="players", module_name=player.name, parameters=player.parameters) break return player_instance
def _check_dna_file(dna_file): """ Check the content of a DNA file :param dna_file: the dna to check :return: True if ok, False otherwise """ success_loading = True if "name" not in dna_file: Utils.print_danger("The DNA of does not contains a \"name\" tag") success_loading = False if "type" not in dna_file: Utils.print_danger("The DNA of does not contains a \"type\" tag") success_loading = False else: # we have a type, check that is a valid one if dna_file["type"] not in VALID_DNA_MODULE_TYPE: Utils.print_danger("The DNA type %s is not valid" % dna_file["type"]) Utils.print_danger("The DNA type must be one of the following: %s" % VALID_DNA_MODULE_TYPE) success_loading = False if "brain_supported_version" not in dna_file: Utils.print_danger("The DNA of does not contains a \"brain_supported_version\" tag") success_loading = False else: # brain_supported_version must be a non empty list if not isinstance(dna_file["brain_supported_version"], list): Utils.print_danger("brain_supported_version is not a list") success_loading = False else: if not dna_file["brain_supported_version"]: Utils.print_danger("brain_supported_version cannot be empty") success_loading = False else: for supported_version in dna_file["brain_supported_version"]: # check if major version is provided if not re.search('^[\d]*[.][\d]*$', str(supported_version)): Utils.print_danger("brain_supported_version cannot handle this format of version %s. " "Only major version should be provided" % supported_version) success_loading = False return success_loading
def main(): """Entry point of Brain.ai program.""" Utils.print_info("") Utils.print_info("Brain.ai (Version 0.1.4)") Utils.print_info("Copyright (C) 2018 by Alexander Paul P. Quinit") Utils.print_info("This program comes with ABSOLUTELY NO WARRANTY") Utils.print_info( "This is free software, and you are welcome to redistribute it under certain conditions" ) Utils.print_info("") # parse argument. the script name is removed try: parser = parse_args(sys.argv[1:]) except SystemExit: sys.exit(1) # check if we want debug configure_logging(debug=parser.debug) logger.debug("brain args: %s" % parser) # by default, no brain file is set. # Use the default one: brain.yml in the root path brain_file = None # check if user set a brain.yml file if parser.brain_file: brain_file = parser.brain_file # check the user provide a valid action if parser.action not in ACTION_LIST: Utils.print_warning("%s is not a recognised action\n" % parser.action) sys.exit(1) # install modules if parser.action == "install": if not parser.git_url: Utils.print_danger("please specify the git url") sys.exit(1) else: parameters = {"git_url": parser.git_url} res_manager = ResourcesManager(**parameters) res_manager.install() return # uninstall modules if parser.action == "uninstall": if not parser.neuron_name \ and not parser.stt_name \ and not parser.tts_name \ and not parser.trigger_name \ and not parser.signal_name: Utils.print_danger("please specify a module name with " "--neuron-name " "or --stt-name " "or --tts-name " "or --trigger-name " "or --signal-name") sys.exit(1) else: res_manager = ResourcesManager() res_manager.uninstall(neuron_name=parser.neuron_name, stt_name=parser.stt_name, tts_name=parser.tts_name, trigger_name=parser.trigger_name, signal_name=parser.signal_name) return # load the brain once brain_loader = BrainLoader(file_path=brain_file) brain = brain_loader.brain # load settings # get global configuration once settings_loader = SettingLoader() settings = settings_loader.settings if parser.action == "start": # user set a synapse to start if parser.run_synapse is not None: SynapseLauncher.start_synapse_by_list_name([parser.run_synapse], brain=brain) if parser.run_order is not None: SynapseLauncher.run_matching_synapse_from_order(parser.run_order, brain=brain, settings=settings, is_api_call=False) if (parser.run_synapse is None) and (parser.run_order is None): # if --deaf if parser.deaf: settings.options.deaf = True # start rest api start_rest_api(settings, brain) start_brain(settings, brain)