def _modified(word, pos=None, original_lang='eng', translation_lang=None, dynamic=(False, None)): if not translation_lang: translation_lang = language_handler.get_language( ).get_language_alpha_3() translation = translate.ugettext(word) if not translation: if dynamic[0]: plugin_path = sys.modules[ dynamic[1].__class__.__module__].__file__.replace( ".pyc", ".py") plugin_name = plugin_path.split("/")[-1] TRANSLATION_FILES[plugin_name].add_new_entry(word) return wordnet_translation(word, pos, original_lang=original_lang, translation_lang=translation_lang) if word == translation: logger.warn( "Strange translation for '{}' in '{}', trying with wordnet" .format(word, translation_lang)) return wordnet_translation(word, pos, original_lang=original_lang, translation_lang=translation_lang) return [{ "value": translation, "original_value": word, "language": language_handler.get_language().get_code() }]
def wordnet_translation(word, pos=None, original_lang='eng', translation_lang=None, dynamic=(False, None)): if not translation_lang: translation_lang = language_handler.get_language( ).get_language_alpha_3() if pos: try: mapping = { "noun": wn.NOUN, "verb": wn.VERB, "adj": wn.ADJ, "adv": wn.ADV } pos = mapping.get(pos.lower()) except KeyError: pos = None final_lemmas = [] try: for synsets in wn.synsets(word, pos=pos, lang=original_lang): for lemma in synsets.lemmas(translation_lang): lemma_name = lemma.name().replace("_", " ") for saved_lemmas in final_lemmas: if lemma_name in saved_lemmas.values(): break info = { "value": lemma_name, "original_value": word, "language": language_handler.get_language().get_code() } final_lemmas.append(info) except WordNetError: logger.error( "Language {} not supported by WordNet".format(translation_lang)) if not final_lemmas: if original_lang == translation_lang: return word else: original_lang = language_handler.get_language_alpha_2_from_alpha_3( original_lang) translation_lang = language_handler.get_language_alpha_2_from_alpha_3( translation_lang) return google_translator.google_translation(word, src=original_lang, dest=translation_lang, all_info=True) return final_lemmas
def google_translation(sentence, src='en', dest=None, all_info=False): global CACHE, TIMER if not dest: dest = language_handler.get_language().get_language() dict_info = {"sentence": sentence, "src": src, "dest": dest} hash_id = str(hash(frozenset(dict_info.items()))) if hash_id in CACHE: translation = CACHE[hash_id] return translation if not all_info else [{"value": translation, "original_value": sentence, "language": dest}] translator = Translator() translation = translator.translate(sentence, src=src, dest=dest) if not translation: return "" if translation.text == sentence and "_" in sentence: sentence_ = sentence.replace("_", " ") translation = translator.translate(sentence_, src=src, dest=dest) CACHE[hash_id] = translation.text if not TIMER: TIMER = threading.Timer(10, save_cache).start() if all_info: return [{"value": translation.text, "original_value": sentence, "language": dest}] return translation.text
def wrapper(self): for category, info in func(self).iteritems(): if "question" in info: question = info["question"] else: logger.warn( "'{}' entity does not have a question (default)".format( category)) question = "{} missing information".format(category) entities = [] if "values" in info: entities = info["values"] else: logger.warn( "'{}' entity does not have any value".format(category)) if "type" in info and info["type"] == "regex": logger.debug("Adding '{}' regex entity to {} category".format( entities, category)) engine_handler.get_engine().register_regex_entity(entities) continue for entity in entities: if type(entity) in (str, unicode): logger.debug("Adding {} entity to {} category".format( entity, category)) metadata = json.dumps({ "entity_value": entity, "entity_type": category, "missing_phrase": question, "language": language_handler.get_language().get_code(), "entity_original_value": entity }) engine_handler.get_engine().register_entity( entity, category, metadata=metadata) elif type(entity) == list: for e in entity: logger.debug("Adding {} entity to {} category".format( e["value"], category)) metadata = json.dumps({ "entity_value": e["value"], "entity_type": category, "missing_phrase": question, "language": e["language"], "entity_original_value": e["original_value"] }) engine_handler.get_engine().register_entity( e["value"], category, metadata=metadata) return func(self)
def multilingual(self): lang_trans = language_handler.get_language() path = sys.modules[self.__class__.__module__].__file__ localedir = os.path.join(os.path.abspath(os.path.dirname(path)), 'locale') try: translate = gettext.translation(self.__class__.__name__, localedir, languages=[lang_trans.get_code2()]) def _modified(word, pos=None, original_lang='eng', translation_lang=None, dynamic=(False, None)): if not translation_lang: translation_lang = language_handler.get_language( ).get_language_alpha_3() translation = translate.ugettext(word) if not translation: if dynamic[0]: plugin_path = sys.modules[ dynamic[1].__class__.__module__].__file__.replace( ".pyc", ".py") plugin_name = plugin_path.split("/")[-1] TRANSLATION_FILES[plugin_name].add_new_entry(word) return wordnet_translation(word, pos, original_lang=original_lang, translation_lang=translation_lang) if word == translation: logger.warn( "Strange translation for '{}' in '{}', trying with wordnet" .format(word, translation_lang)) return wordnet_translation(word, pos, original_lang=original_lang, translation_lang=translation_lang) return [{ "value": translation, "original_value": word, "language": language_handler.get_language().get_code() }] _ = _modified _sentence = google_translator.google_translation except IOError: _ = wordnet_translation _sentence = google_translator.google_translation return _, _sentence
def load_modules(): start_time = time.time() logger.info("Starting main program") config.load_configuration() engine_handler.initialize_engine() mqtt_handler.initialize(config.get_section("mqtt")["host"], config.get_section("mqtt")["port"], config.get_section("mqtt")["username"], config.get_section("mqtt")["password"]) language_handler.initialize(config.get_section("language")["language_code"]) nlp_providers_handler.initialize(language_handler.get_language().language) utterance_handler.initialize() tts_handler.initialize() devices_handler.initialize() plugins_handler.initialize() mqtt_handler.start() logger.info("Main program started in {} seconds".format(time.time() - start_time))
def __init__(self, module): global TRANSLATION_FILES self.plugin_path = sys.modules[ module.__class__.__module__].__file__.replace(".pyc", ".py") self.plugin_folder = os.path.dirname(self.plugin_path) self.plugin_name = self.plugin_path.split("/")[-1] self.po_folder = "/".join([ self.plugin_folder, "locale", language_handler.get_language().get_code2(), "LC_MESSAGES" ]) self.po_name = self.plugin_name.replace(".py", ".po") self.po_path = "/".join([self.po_folder, self.po_name]) self.mo_path = self.po_path.replace(".po", ".mo") self.po = None self.create_translation_file() TRANSLATION_FILES[self.plugin_name] = self
threading.Thread(target=stream_recognition).start() if __name__ == "__main__": config.load_configuration() logger.info("Starting '{}' mayordomo client".format(config.get_section("main")["name"])) mqtt_handler.initialize(config.get_section("mqtt")["host"], config.get_section("mqtt")["port"], config.get_section("mqtt")["username"], config.get_section("mqtt")["password"]) import language_handler language_handler.initialize(config.get_section("language")["language_code"]) mqtt_handler.start() google_cloud_speech_handler.initialize(language_handler.get_language().code) current_dir = os.path.dirname(os.path.abspath(__file__)) model_path = os.path.join(current_dir, "voice_models/%s" % MODEL_NAME) vch = VoiceCommandHandler(config.get_section("main")["prefix"], config.get_section("main")["name"], config.get_section("main")["location"], config.get_section("main")["owner"], config.get_section("main")["description"]) detector = snowboydecoder.HotwordDetector(model_path, sensitivity=0.4) snowboydecoder.play_on_sound() detector.start(detected_callback=speech_recognition, sleep_time=0.03) detector.terminate()