def test_agressive_pruning_pt(self): self.assertEqual(normalize("uma palavra", lang="pt"), "1 palavra") self.assertEqual(normalize("esta palavra um", lang="pt"), "palavra 1") self.assertEqual(normalize("o homem batia-lhe", lang="pt"), "homem batia") self.assertEqual(normalize("quem disse asneira nesse dia", lang="pt"), "quem disse asneira dia")
def test_spaces(self): self.assertEqual(normalize(" this is a test"), "this is test") self.assertEqual(normalize(" this is a test "), "this is test") self.assertEqual(normalize(" this is one test"), "this is 1 test")
def test_articles_es(self): self.assertEqual(normalize("esta es la prueba", lang="es", remove_articles=True), "esta es prueba") self.assertEqual(normalize("y otra prueba", lang="es", remove_articles=True), "y otra prueba")
def test_articles_es(self): self.assertEqual(normalize("esta es la prueba", lang="es", remove_articles=True), "esta es prueba") self.assertEqual(normalize("y otra prueba", lang="es", remove_articles=True), "y otra prueba")
def test_spaces(self): self.assertEqual(normalize(" this is a test"), "this is test") self.assertEqual(normalize(" this is a test "), "this is test") self.assertEqual(normalize(" this is one test"), "this is 1 test")
def test_spaces_fr(self): self.assertEqual(normalize(" c'est le test", lang="fr-fr"), "c'est test") self.assertEqual(normalize(" c'est le test ", lang="fr-fr"), "c'est test") self.assertEqual(normalize(" c'est un test", lang="fr-fr"), "c'est 1 test")
def test_spaces_fr(self): self.assertEqual(normalize(" c'est le test", lang="fr-fr"), "c'est test") self.assertEqual(normalize(" c'est le test ", lang="fr-fr"), "c'est test") self.assertEqual(normalize(" c'est un test", lang="fr-fr"), "c'est 1 test")
def test_spaces_pt(self): self.assertEqual(normalize(" isto e o teste", lang="pt"), "isto teste") self.assertEqual(normalize(" isto sao os testes ", lang="pt"), "isto sao testes") self.assertEqual(normalize(" isto e um teste", lang="pt", remove_articles=False), "isto e 1 teste")
def test_spaces_pt(self): self.assertEqual(normalize(" isto e o teste", lang="pt"), "isto teste") self.assertEqual(normalize(" isto sao os testes ", lang="pt"), "isto sao testes") self.assertEqual(normalize(" isto e um teste", lang="pt", remove_articles=False), "isto e 1 teste")
def test_agressive_pruning_pt(self): self.assertEqual(normalize("uma palavra", lang="pt"), "1 palavra") self.assertEqual(normalize("esta palavra um", lang="pt"), "palavra 1") self.assertEqual(normalize("o homem batia-lhe", lang="pt"), "homem batia") self.assertEqual(normalize("quem disse asneira nesse dia", lang="pt"), "quem disse asneira dia")
def test_articles_pt(self): self.assertEqual(normalize(u"isto � o teste", lang="pt", remove_articles=True), u"isto teste") self.assertEqual( normalize(u"isto � a frase", lang="pt", remove_articles=True), u"isto frase") self.assertEqual( normalize("e outro teste", lang="pt", remove_articles=True), "outro teste")
def test_numbers(self): self.assertEqual(normalize("dette er en to tre test", lang="da-dk"), "dette er 1 2 3 test") self.assertEqual( normalize("dette er fire fem seks test", lang="da-dk"), "dette er 4 5 6 test") self.assertEqual(normalize("dette er syv otte ni test", lang="da-dk"), "dette er 7 8 9 test") self.assertEqual(normalize("dette er ti elve tolv test", lang="da-dk"), "dette er 10 11 12 test")
def test_articles(self): self.assertEqual( normalize("dies ist der test", lang="de-de", remove_articles=True), "dies ist test") self.assertEqual( normalize("und noch ein Test", lang="de-de", remove_articles=True), "und noch 1 Test") self.assertEqual(normalize("dies ist der Extra-Test", lang="de-de", remove_articles=False), "dies ist der Extra-Test")
def test_articles(self): self.assertEqual( normalize("dette er en test", lang="da-dk", remove_articles=True), "dette er 1 test") self.assertEqual( normalize("og endnu en test", lang="da-dk", remove_articles=True), "og endnu 1 test") self.assertEqual(normalize("dette er en extra-test", lang="da-dk", remove_articles=False), "dette er 1 extra-test")
def test_articles(self): self.assertEqual(normalize("this is a test", remove_articles=True), "this is test") self.assertEqual(normalize("this is the test", remove_articles=True), "this is test") self.assertEqual(normalize("and another test", remove_articles=True), "and another test") self.assertEqual(normalize("this is an extra test", remove_articles=False), "this is an extra test")
def test_articles(self): self.assertEqual(normalize("this is a test", remove_articles=True), "this is test") self.assertEqual(normalize("this is the test", remove_articles=True), "this is test") self.assertEqual(normalize("and another test", remove_articles=True), "and another test") self.assertEqual( normalize("this is an extra test", remove_articles=False), "this is an extra test")
def test_articles(self): self.assertEqual( normalize("dit is de test", LANG, remove_articles=True), "dit is test") self.assertEqual( normalize("en nog een Test", LANG, remove_articles=True), "en nog 1 Test") self.assertEqual( normalize("dit is de Extra-Test", LANG, remove_articles=False), "dit is de Extra-Test")
def test_articles_pt(self): self.assertEqual( normalize(u"isto � o teste", lang="pt", remove_articles=True), u"isto teste") self.assertEqual( normalize(u"isto � a frase", lang="pt", remove_articles=True), u"isto frase") self.assertEqual( normalize("e outro teste", lang="pt", remove_articles=True), "outro teste")
def test_articles(self): self.assertEqual( normalize("dette er en test", lang="da-dk", remove_articles=True), "dette er 1 test") self.assertEqual( normalize("og endnu en test", lang="da-dk", remove_articles=True), "og endnu 1 test") self.assertEqual( normalize("dette er en extra-test", lang="da-dk", remove_articles=False), "dette er 1 extra-test")
def get_lights(self, search_name): if not search_name: return None name = normalize(search_name) self.debug("Searching for: " + name, 2, char="=") # First fuzzy search the groups best = None best_score = 0 if self.wink_groups: for group in self.wink_groups["data"]: groupname = normalize(group["name"]) score = fuzzy_match(groupname, name) self.debug(groupname + " : " + str(score), 5) if score > 0.6 and score > best_score: best_score = score best = group if not self.wink_devices: # can't even return group matches without device info return None best_group_score = best_score group_lights = [] group_IDs = [] if best: # Collect the light IDs from the group that was found for member in best["members"]: if member["object_type"] == "light_bulb": group_IDs.append(member["object_id"]) best = None for dev in self.wink_devices["data"]: if "light_bulb_id" in dev: # check if light_bulb # Gather group lights (just in case the group wins) if dev["light_bulb_id"] in group_IDs: group_lights.append(dev) # score the bulb name match lightname = normalize(dev["name"]) score = fuzzy_match(lightname, name) self.debug(lightname + " : " + str(score), 5) if score > best_score: best_score = score best = dev if group_lights and best_group_score >= best_score: self.debug("Group wins", 3, char="*") return group_lights elif best and best_score > 0.6: return [best] return None
def test_articles(self): self.assertEqual( normalize("dies ist der test", lang="de-de", remove_articles=True), "dies ist test") self.assertEqual( normalize("und noch ein Test", lang="de-de", remove_articles=True), "und noch 1 Test") self.assertEqual( normalize("dies ist der Extra-Test", lang="de-de", remove_articles=False), "dies ist der Extra-Test")
def test_spaces_it(self): self.assertEqual(normalize(u"questo e' il test", lang="it"), u"questo e' test") self.assertEqual(normalize(u"questo è un test ", lang="it"), u"questo è 1 test") self.assertEqual(normalize(u"un altro test ", lang="it"), u"1 altro test") self.assertEqual(normalize(u"questa è un' altra amica ", lang="it", remove_articles=False), u"questa è 1 altra amica") self.assertEqual(normalize(u"questo è un test ", lang="it", remove_articles=False), u"questo è 1 test")
def test_spaces_it(self): self.assertEqual(normalize(u"questo e' il test", lang="it"), u"questo e' test") self.assertEqual(normalize(u"questo è un test ", lang="it"), u"questo è 1 test") self.assertEqual(normalize(u"un altro test ", lang="it"), u"1 altro test") self.assertEqual(normalize(u"questa è un' altra amica ", lang="it", remove_articles=False), u"questa è 1 altra amica") self.assertEqual(normalize(u"questo è un test ", lang="it", remove_articles=False), u"questo è 1 test")
def test_articles_fr(self): self.assertEqual(normalize("c'est le test", remove_articles=True, lang="fr-fr"), "c'est test") self.assertEqual(normalize("et l'autre test", remove_articles=True, lang="fr-fr"), "et autre test") self.assertEqual(normalize("et la tentative", remove_articles=True, lang="fr-fr"), "et tentative") self.assertEqual(normalize("la dernière tentative", remove_articles=False, lang="fr-fr"), "la dernière tentative")
def test_articles_it(self): self.assertEqual(normalize(u"questo è il test", lang="it", remove_articles=True), u"questo è test") self.assertEqual(normalize(u"questa è la frase", lang="it", remove_articles=True), u"questa è frase") self.assertEqual(normalize(u"questo è lo scopo", lang="it", remove_articles=True), u"questo è scopo") self.assertEqual(normalize(u"questo è il test extra", lang="it", remove_articles=False), u"questo è il test extra")
def test_articles_it(self): self.assertEqual(normalize(u"questo è il test", lang="it", remove_articles=True), u"questo è test") self.assertEqual(normalize(u"questa è la frase", lang="it", remove_articles=True), u"questa è frase") self.assertEqual(normalize(u"questo è lo scopo", lang="it", remove_articles=True), u"questo è scopo") self.assertEqual(normalize(u"questo è il test extra", lang="it", remove_articles=False), u"questo è il test extra")
def test_spaces_it(self): """ Test cases for Italian remove spaces """ self.assertEqual(normalize('questo è un test ', lang='it'), 'questo è 1 test') self.assertEqual(normalize('un altro test ', lang='it'), '1 altro test') self.assertEqual(normalize('questa è un\' altra amica ', lang='it', remove_articles=False), 'questa è 1 altra amica') self.assertEqual(normalize('questo è un test ', lang='it', remove_articles=False), 'questo è 1 test')
def test_articles_fr(self): self.assertEqual(normalize("c'est le test", remove_articles=True, lang="fr-fr"), "c'est test") self.assertEqual(normalize("et l'autre test", remove_articles=True, lang="fr-fr"), "et autre test") self.assertEqual(normalize("et la tentative", remove_articles=True, lang="fr-fr"), "et tentative") self.assertEqual(normalize("la dernière tentative", remove_articles=False, lang="fr-fr"), "la dernière tentative")
def test_numbers(self): self.assertEqual( normalize("dette er en to tre test", lang="da-dk"), "dette er 1 2 3 test") self.assertEqual( normalize("dette er fire fem seks test", lang="da-dk"), "dette er 4 5 6 test") self.assertEqual( normalize("dette er syv otte ni test", lang="da-dk"), "dette er 7 8 9 test") self.assertEqual( normalize("dette er ti elve tolv test", lang="da-dk"), "dette er 10 11 12 test")
def test_numbers_pt(self): self.assertEqual(normalize(u"isto e o um dois tr�s teste", lang="pt"), u"isto 1 2 3 teste") self.assertEqual(normalize(u"� a sete oito nove test", lang="pt"), u"7 8 9 test") self.assertEqual( normalize("teste zero dez onze doze treze", lang="pt"), "teste 0 10 11 12 13") self.assertEqual( normalize("teste mil seiscentos e sessenta e seis", lang="pt", remove_articles=False), "teste 1000 600 e 66") self.assertEqual( normalize("teste sete e meio", lang="pt", remove_articles=False), "teste 7 e meio") self.assertEqual( normalize("teste dois ponto nove", lang="pt"), "teste 2 ponto 9") self.assertEqual( normalize("teste cento e nove", lang="pt", remove_articles=False), "teste 100 e 9") self.assertEqual( normalize("teste vinte e 1", lang="pt"), "teste 20 1")
def test_numbers(self): self.assertEqual( normalize("dies ist eins zwei drei test", lang="de-de"), "dies ist 1 2 3 test") self.assertEqual( normalize(u"es ist vier fünf sechs test", lang="de-de"), "es ist 4 5 6 test") self.assertEqual( normalize("es ist sieben acht neun test", lang="de-de"), "es ist 7 8 9 test") self.assertEqual( normalize("es ist sieben acht neun test", lang="de-de"), "es ist 7 8 9 test") self.assertEqual( normalize(u"dies ist zehn elf zwölf test", lang="de-de"), "dies ist 10 11 12 test") self.assertEqual( normalize("dies ist dreizehn vierzehn test", lang="de-de"), "dies ist 13 14 test") self.assertEqual( normalize(u"dies ist fünfzehn sechzehn siebzehn", lang="de-de"), "dies ist 15 16 17") self.assertEqual( normalize("dies ist achtzehn neunzehn zwanzig", lang="de-de"), "dies ist 18 19 20")
def test_numbers(self): self.assertEqual( normalize("dies ist eins zwei drei test", lang="de-de"), "dies ist 1 2 3 test") self.assertEqual( normalize(u"es ist vier fünf sechs test", lang="de-de"), "es ist 4 5 6 test") self.assertEqual( normalize("es ist sieben acht neun test", lang="de-de"), "es ist 7 8 9 test") self.assertEqual( normalize("es ist sieben acht neun test", lang="de-de"), "es ist 7 8 9 test") self.assertEqual( normalize(u"dies ist zehn elf zwölf test", lang="de-de"), "dies ist 10 11 12 test") self.assertEqual( normalize("dies ist dreizehn vierzehn test", lang="de-de"), "dies ist 13 14 test") self.assertEqual( normalize(u"dies ist fünfzehn sechzehn siebzehn", lang="de-de"), "dies ist 15 16 17") self.assertEqual( normalize("dies ist achtzehn neunzehn zwanzig", lang="de-de"), "dies ist 18 19 20")
def test_articles_pt(self): self.assertEqual(normalize(u"isto é o teste", lang="pt", remove_articles=True), u"isto teste") self.assertEqual( normalize(u"isto é a frase", lang="pt", remove_articles=True), u"isto frase") self.assertEqual( normalize("e outro teste", lang="pt", remove_articles=True), "outro teste") self.assertEqual(normalize(u"isto é o teste extra", lang="pt", remove_articles=False), u"isto e o teste" u" extra")
def test_articles_pt(self): self.assertEqual(normalize(u"isto é o teste", lang="pt", remove_articles=True), u"isto teste") self.assertEqual( normalize(u"isto é a frase", lang="pt", remove_articles=True), u"isto frase") self.assertEqual( normalize("e outro teste", lang="pt", remove_articles=True), "outro teste") self.assertEqual(normalize(u"isto é o teste extra", lang="pt", remove_articles=False), u"isto e o teste" u" extra")
def test_numbers_pt(self): self.assertEqual(normalize(u"isto e o um dois tr�s teste", lang="pt"), u"isto 1 2 3 teste") self.assertEqual(normalize(u"� a sete oito nove test", lang="pt"), u"7 8 9 test") self.assertEqual( normalize("teste zero dez onze doze treze", lang="pt"), "teste 0 10 11 12 13") self.assertEqual( normalize("teste mil seiscentos e sessenta e seis", lang="pt", remove_articles=False), "teste 1000 600 e 66") self.assertEqual( normalize("teste sete e meio", lang="pt", remove_articles=False), "teste 7 e meio") self.assertEqual( normalize("teste dois ponto nove", lang="pt"), "teste 2 ponto 9") self.assertEqual( normalize("teste cento e nove", lang="pt", remove_articles=False), "teste 100 e 9") self.assertEqual( normalize("teste vinte e 1", lang="pt"), "teste 20 1")
def test_numbers(self): self.assertEqual( normalize("det här är ett ett två tre test", lang='sv-se'), "det här är 1 1 2 3 test") self.assertEqual( normalize(" det är fyra fem sex test", lang='sv-se'), "det är 4 5 6 test") self.assertEqual(normalize("det är sju åtta nio test", lang='sv-se'), "det är 7 8 9 test") self.assertEqual(normalize("det är tio elva tolv test", lang='sv-se'), "det är 10 11 12 test") self.assertEqual( normalize("det är arton nitton tjugo test", lang='sv-se'), "det är 18 19 20 test")
def utterance_remainder(self): """ For intents get the portion not consumed by Adapt. For example: if they say 'Turn on the family room light' and there are entity matches for "turn on" and "light", then it will leave behind " the family room " which is then normalized to "family room". Returns: str: Leftover words or None if not an utterance. """ utt = normalize(self.data.get("utterance", "")) if utt and "__tags__" in self.data: for token in self.data["__tags__"]: utt = utt.replace(token.get("key", ""), "") return normalize(utt)
def _adapt_intent_match(self, utterances, lang): """ Run the Adapt engine to search for an matching intent Args: utterances (list): list of utterances lang (string): 4 letter ISO language code Returns: Intent structure, or None if no match was found. """ best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except Exception as e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) # update active skills skill_id = best_intent['intent_type'].split(":")[0] self.add_active_skill(skill_id) return best_intent
def handle_query_future_time(self, message): utt = normalize(message.data.get('utterance', "").lower()) extract = extract_datetime(utt) dt = None if extract: dt = extract[0] utt = extract[1] location = self._extract_location(utt) future_time = self.get_spoken_current_time(location, dt, True) if not future_time: return # speak it self.speak_dialog("time.future", {"time": future_time}) # and briefly show the time self.answering_query = True self.enclosure.deactivate_mouth_events() self.display(self.get_display_current_time(location, dt)) time.sleep(5) mycroft.audio.wait_while_speaking() self.enclosure.mouth_reset() self.enclosure.activate_mouth_events() self.answering_query = False self.displayed_time = None
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration, e: logger.exception(e) continue
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration, e: LOG.exception(e) continue
def parse_utterances(self, utterances, lang): """ Parse the utteracne using adapt to find a matching intent. Args: utterances (list): list of utterances lang (string): 4 letter ISO language code Returns: Intent structure, or None if no match was found. """ best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except Exception as e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) # update active skills skill_id = int(best_intent['intent_type'].split(":")[0]) self.add_active_skill(skill_id) return best_intent
def handle_get_adapt(self, message): utterance = message.data["utterance"] lang = message.data.get("lang", "en-us") norm = normalize(utterance, lang, remove_articles=False) intent = self._adapt_intent_match([utterance], [norm], lang) self.bus.emit( message.reply("intent.service.adapt.reply", {"intent": intent}))
def test_articles_it(self): """ Test cases for Italian remove_articles """ self.assertEqual(normalize('questo è il test', lang='it', remove_articles=True), 'questo è test') self.assertEqual(normalize('questa è la frase', lang='it', remove_articles=True), 'questa è frase') self.assertEqual(normalize('questo è lo scopo', lang='it', remove_articles=True), 'questo è scopo') self.assertEqual(normalize('questo è il test extra', lang='it', remove_articles=False), 'questo è il test extra')
def _normalize_all_utterances(utterances): """Create normalized versions and pair them with the original utterance. This will create a list of tuples with the original utterance as the first item and if normalizing changes the utterance the normalized version will be set as the second item in the tuple, if normalization doesn't change anything the tuple will only have the "raw" original utterance. Args: utterances (list): list of utterances to normalize Returns: list of tuples, [(original utterance, normalized) ... ] """ # normalize() changes "it's a boy" to "it is a boy", etc. norm_utterances = [ normalize(u.lower(), remove_articles=False) for u in utterances ] # Create pairs of original and normalized counterparts for each entry # in the input list. combined = [] for utt, norm in zip(utterances, norm_utterances): if utt == norm: combined.append((utt, )) else: combined.append((utt, norm)) LOG.debug("Utterances: {}".format(combined)) return combined
def parse_brightness(self, brightness): """ parse text for brightness percentage Args: brightness (str): string containing brightness level return: (int): brightness as percentage (0-100) """ try: # Handle "full", etc. name = normalize(brightness) if name in self.brightness_dict: return self.brightness_dict[name] if '%' in brightness: brightness = brightness.replace("%", "").strip() return int(brightness) if 'percent' in brightness: brightness = brightness.replace("percent", "").strip() return int(brightness) i = int(brightness) if i < 0 or i > 100: return None if i < 30: # Assmume plain 0-30 is "level" return int((i * 100.0) / 30.0) # Assume plain 31-100 is "percentage" return i except: return None # failed in an int() conversion
def handle_fallback(self, message): utt = message.data.get('utterance') LOG.debug("DuckDuckGo fallback attempt: " + utt) lang = message.data.get('lang') if not lang: lang = "en-us" utterance = normalize(utt, lang) parsed_question = self.question_parser.parse(utterance) query = utterance if parsed_question: # Try to store pieces of utterance (None if not parsed_question) utt_word = parsed_question.get('QuestionWord') utt_verb = parsed_question.get('QuestionVerb') utt_query = parsed_question.get('Query') if utt_verb == "'s": utt_verb = 'is' parsed_question['QuestionVerb'] = 'is' query = "%s %s %s" % (utt_word, utt_verb, utt_query) phrase = "know %s %s %s" % (utt_word, utt_query, utt_verb) LOG.debug("Falling back to DuckDuckGo: " + query) else: # This utterance doesn't look like a question, don't waste # time with DuckDuckgo. # TODO: Log missed intent LOG.debug("Unknown intent: " + utterance) return resp = duckduckgo.get_zci(utt_query, web_fallback=False) print resp.split("(")[0] self.speak(resp.split("(")[0])
def utterance_remainder(self): """ For intents get the portion not consumed by Adapt. For example: if they say 'Turn on the family room light' and there are entity matches for "turn on" and "light", then it will leave behind " the family room " which is then normalized to "family room". Returns: str: Leftover words or None if not an utterance. """ utt = normalize(self.data.get("utterance", "")) if utt and "__tags__" in self.data: for token in self.data["__tags__"]: # Substitute only whole words matching the token utt = re.sub(r'\b' + token.get("key", "") + r"\b", "", utt) return normalize(utt)
def CQS_match_query_phrase(self, utt): self.log.debug("WolframAlpha query: " + utt) # TODO: Localization. Wolfram only allows queries in English, # so perhaps autotranslation or other languages? That # would also involve auto-translation of the result, # which is a lot of room for introducting translation # issues. # Automatic translation to English orig_utt = utt if self.autotranslate and self.lang[:2] != 'en': utt = translate(utt, from_language=self.lang[:2], to_language='en') self.log.debug("translation: {}".format(utt)) utterance = normalize(utt, self.lang, remove_articles=False) parsed_question = self.question_parser.parse(utterance) query = utterance if parsed_question: # Try to store pieces of utterance (None if not parsed_question) utt_word = parsed_question.get('QuestionWord') utt_verb = parsed_question.get('QuestionVerb') utt_query = parsed_question.get('Query') query = "%s %s %s" % (utt_word, utt_verb, utt_query) phrase = "know %s %s %s" % (utt_word, utt_query, utt_verb) self.log.debug("Querying WolframAlpha: " + query) else: # This utterance doesn't look like a question, don't waste # time with WolframAlpha. self.log.debug("Non-question, ignoring: " + utterance) return False try: response = self.client.spoken(utt, (self.location['coordinate']['latitude'], self.location['coordinate']['longitude']), self.config_core['system_unit']) if response: response = self.process_wolfram_string(response) # Automatic re-translation to 'self.lang' if self.autotranslate and self.lang[:2] != 'en': response = translate(response, from_language='en', to_language=self.lang[:2]) utt = orig_utt self.log.debug("utt: {} res: {}".format(utt, response)) return (utt, CQSMatchLevel.GENERAL, response, {'query': utt, 'answer': response}) else: return None except HTTPError as e: if e.response.status_code == 401: self.emitter.emit(Message("mycroft.not.paired")) return True except Exception as e: self.log.exception(e) return False
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) reply = message.reply( best_intent.get('intent_type'), best_intent) self.emitter.emit(reply) # update active skills skill_id = int(best_intent['intent_type'].split(":")[0]) self.add_active_skill(skill_id) else: self.emitter.emit(Message("intent_failure", { "utterance": utterances[0], "lang": lang }))
def test_numbers(self): self.assertEqual(normalize("this is a one two three test"), "this is 1 2 3 test") self.assertEqual(normalize(" it's a four five six test"), "it is 4 5 6 test") self.assertEqual(normalize("it's a seven eight nine test"), "it is 7 8 9 test") self.assertEqual(normalize("it's a seven eight nine test"), "it is 7 8 9 test") self.assertEqual(normalize("that's a ten eleven twelve test"), "that is 10 11 12 test") self.assertEqual(normalize("that's a thirteen fourteen test"), "that is 13 14 test") self.assertEqual(normalize("that's fifteen sixteen seventeen"), "that is 15 16 17") self.assertEqual(normalize("that's eighteen nineteen twenty"), "that is 18 19 20")
def utterance_remainder(self): """ For intents get the portion not consumed by Adapt. For example: if they say 'Turn on the family room light' and there are entity matches for "turn on" and "light", then it will leave behind " the family room " which is then normalized to "family room". Returns: str: Leftover words or None if not an utterance. """ utt = self.data.get("utterance", None) if utt and "__tags__" in self.data: for token in self.data["__tags__"]: utt = utt.replace(token["key"], "") return normalize(utt)
def test_numbers_it(self): self.assertEqual(normalize(u"questo è il test uno due tre", lang="it"), u"questo è test 1 2 3") self.assertEqual(normalize(u"è un test sette otto nove", lang="it"), u"è 1 test 7 8 9") self.assertEqual(normalize("test zero dieci undici dodici tredici", lang="it"), "test 0 10 11 12 13") self.assertEqual(normalize("test mille seicento sessanta e sei", lang="it", remove_articles=False), "test 1000 600 60 e 6") self.assertEqual(normalize("test sette e mezzo", lang="it", remove_articles=False), "test 7 e mezzo") self.assertEqual(normalize("test due punto nove", lang="it"), "test 2 punto 9") self.assertEqual(normalize("test cento e nove", lang="it", remove_articles=False), "test 100 e 9") self.assertEqual(normalize("test venti e 1", lang="it"), "test 20 e 1") self.assertEqual(normalize("test ventuno e ventisette", lang="it"), "test 21 e 27")
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration, e: logger.exception(e) continue
def test_numbers_it(self): """ Test cases for Italian normalize lang='it' """ self.assertEqual(normalize('è un test sette otto nove', lang='it'), 'è 1 test 7 8 9') self.assertEqual(normalize('test zero dieci undici dodici tredici', lang='it'), 'test 0 10 11 12 13') self.assertEqual(normalize('test mille seicento sessanta e sei', lang='it', remove_articles=False), 'test 1000 600 60 e 6') self.assertEqual(normalize('test sette e mezzo', lang='it', remove_articles=False), 'test 7 e 0.5') self.assertEqual(normalize('test due punto nove', lang='it'), 'test 2 punto 9') self.assertEqual(normalize('test cento e nove', lang='it', remove_articles=False), 'test 100 e 9') self.assertEqual(normalize('test venti e 1', lang='it'), 'test 20 e 1') self.assertEqual(normalize('test ventuno e ventisette', lang='it'), 'test 21 e 27')
def _adapt_intent_match(self, utterances, lang): """ Run the Adapt engine to search for an matching intent Args: utterances (list): list of utterances lang (string): 4 letter ISO language code Returns: Intent structure, or None if no match was found. """ best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except Exception as e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) # update active skills skill_id = best_intent['intent_type'].split(":")[0] self.add_active_skill(skill_id) # adapt doesn't handle context injection for one_of keywords # correctly. Workaround this issue if possible. try: best_intent = workaround_one_of_context(best_intent) except LookupError: LOG.error('Error during workaround_one_of_context') return best_intent
def test_numbers_es(self): self.assertEqual(normalize("esto es un uno una", lang="es"), "esto es 1 1 1") self.assertEqual(normalize("esto es dos tres prueba", lang="es"), "esto es 2 3 prueba") self.assertEqual(normalize("esto es cuatro cinco seis prueba", lang="es"), "esto es 4 5 6 prueba") self.assertEqual(normalize(u"siete m�s ocho m�s nueve", lang="es"), u"7 m�s 8 m�s 9") self.assertEqual(normalize("diez once doce trece catorce quince", lang="es"), "10 11 12 13 14 15") self.assertEqual(normalize(u"diecis�is diecisiete", lang="es"), "16 17") self.assertEqual(normalize(u"dieciocho diecinueve", lang="es"), "18 19") self.assertEqual(normalize(u"veinte treinta cuarenta", lang="es"), "20 30 40") self.assertEqual(normalize(u"treinta y dos caballos", lang="es"), "32 caballos") self.assertEqual(normalize(u"cien caballos", lang="es"), "100 caballos") self.assertEqual(normalize(u"ciento once caballos", lang="es"), "111 caballos") self.assertEqual(normalize(u"hab�a cuatrocientas una vacas", lang="es"), u"hab�a 401 vacas") self.assertEqual(normalize(u"dos mil", lang="es"), "2000") self.assertEqual(normalize(u"dos mil trescientas cuarenta y cinco", lang="es"), "2345") self.assertEqual(normalize( u"ciento veintitr�s mil cuatrocientas cincuenta y seis", lang="es"), "123456") self.assertEqual(normalize( u"quinientas veinticinco mil", lang="es"), "525000") self.assertEqual(normalize( u"novecientos noventa y nueve mil novecientos noventa y nueve", lang="es"), "999999")
def test_combinations(self): self.assertEqual(normalize("I couldn't have guessed there'd be two"), "I could not have guessed there would be 2") self.assertEqual(normalize("I wouldn't have"), "I would not have") self.assertEqual(normalize("I hadn't been there"), "I had not been there") self.assertEqual(normalize("I would've"), "I would have") self.assertEqual(normalize("it hadn't"), "it had not") self.assertEqual(normalize("it hadn't have"), "it had not have") self.assertEqual(normalize("it would've"), "it would have") self.assertEqual(normalize("she wouldn't have"), "she would not have") self.assertEqual(normalize("she would've"), "she would have") self.assertEqual(normalize("someone wouldn't have"), "someone would not have") self.assertEqual(normalize("someone would've"), "someone would have") self.assertEqual(normalize("what's the weather like"), "what is weather like") self.assertEqual(normalize("that's what I told you"), "that is what I told you") self.assertEqual(normalize("whats 8 + 4"), "what is 8 + 4")