def match_sentence(self, client_context, pattern_sentence, topic_pattern, that_pattern): topic_sentence = Sentence(client_context.brain.tokenizer, topic_pattern) that_sentence = Sentence(client_context.brain.tokenizer, that_pattern) YLogger.debug(client_context, "AIML Parser matching sentence [%s], topic=[%s], that=[%s] ", pattern_sentence.text(), topic_pattern, that_pattern) sentence = Sentence(client_context.brain.tokenizer) sentence.append_sentence(pattern_sentence) sentence.append_word('__TOPIC__') sentence.append_sentence(topic_sentence) sentence.append_word('__THAT__') sentence.append_sentence(that_sentence) YLogger.debug(client_context, "Matching [%s]", sentence.words_from_current_pos(0)) context = MatchContext(max_search_depth=client_context.bot.configuration.max_search_depth, max_search_timeout=client_context.bot.configuration.max_search_timeout, tokenizer=client_context.brain.tokenizer) template = self._pattern_parser._root_node.match(client_context, context, sentence) if template is not None: context._template_node = template context.list_matches(client_context) # Save the matched context for the associated sentence pattern_sentence.matched_context = context return context return None
def test_node_with_star(self): root = TemplateNode() node = TemplateStarNode() root.append(node) conversation = Conversation(self._client_context) question = Question.create_from_text( self._client_context, "Hello world", self._client_context.bot.sentence_splitter) question.current_sentence()._response = "Hello matey" conversation.record_dialog(question) question = Question.create_from_text( self._client_context, "How are you", self._client_context.bot.sentence_splitter) question.current_sentence()._response = "Very well thanks" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) context.add_match(Match(Match.WORD, match, "Matched")) question.current_sentence()._matched_context = context conversation.record_dialog(question) self._client_context.bot._conversation_mgr._conversations[ "testid"] = conversation self.assertEqual("Matched", root.resolve(self._client_context))
def test_resolve_no_defaults_inside_topic(self): root = TemplateNode() self.assertIsNotNone(root) self.assertIsNotNone(root.children) self.assertEqual(len(root.children), 0) node = TemplateTopicStarNode(index=1) self.assertIsNotNone(node) self.assertEqual(1, node.index) root.append(node) self.assertEqual(len(root.children), 1) conversation = Conversation(self._client_context) question = Question.create_from_text(self._client_context.brain.tokenizer, "Hello world") question.current_sentence()._response = "Hello matey" conversation.record_dialog(question) question = Question.create_from_text(self._client_context.brain.tokenizer, "How are you") question.current_sentence()._response = "Very well thanks" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) context.add_match(Match(Match.TOPIC, match, "Matched")) question.current_sentence()._matched_context = context conversation.record_dialog(question) self._client_context.bot._conversations["testid"] = conversation self.assertEqual("Matched", node.resolve(self._client_context))
def match_sentence(self, bot, clientid, pattern_sentence, topic_pattern, that_pattern): topic_sentence = Sentence(bot.brain.tokenizer, topic_pattern) that_sentence = Sentence(bot.brain.tokenizer, that_pattern) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug("AIML Parser matching sentence [%s], topic=[%s], that=[%s] ", pattern_sentence.text(), topic_pattern, that_pattern) sentence = Sentence(bot.brain.tokenizer) sentence.append_sentence(pattern_sentence) sentence.append_word('__TOPIC__') sentence.append_sentence(topic_sentence) sentence.append_word('__THAT__') sentence.append_sentence(that_sentence) if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug("Matching [%s]", sentence.words_from_current_pos(0)) context = MatchContext(max_search_depth=bot.configuration.max_search_depth, max_search_timeout=bot.configuration.max_search_timeout, tokenizer=bot.brain.tokenizer) template = self._pattern_parser._root_node.match(bot, clientid, context, sentence) if template is not None: context._template_node = template context.list_matches() # Save the matched context for the associated sentence pattern_sentence.matched_context = context return context return None
def match_sentence(self, bot, clientid, pattern_sentence, topic_pattern, that_pattern): topic_sentence = Sentence(topic_pattern) that_sentence = Sentence(that_pattern) logging.debug( "AIML Parser matching sentence [%s], topic=[%s], that=[%s] ", pattern_sentence.text(), topic_pattern, that_pattern) sentence = Sentence() sentence.append_sentence(pattern_sentence) sentence.append_word('__TOPIC__') sentence.append_sentence(topic_sentence) sentence.append_word('__THAT__') sentence.append_sentence(that_sentence) logging.debug("Matching [%s]" % sentence.words_from_current_pos(0)) context = MatchContext() template = self.pattern_parser._root_node.match( bot, clientid, context, sentence) if template is not None: context._template_node = template context.list_matches() # Save the matched context for the associated sentence pattern_sentence.matched_context = context return context return None
def test_check_child_is_wildcard_hash(self): wildcard = MockPatternWildCardNode("*") self.assertIsNotNone(wildcard) wildcard._0ormore_hash = PatternZeroOrMoreWildCardNode('#') wildcard._0ormore_hash._template = PatternTemplateNode(TemplateNode()) context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) sentence = Sentence(self._client_context.brain.tokenizer, "TEST SENTENCE") match = wildcard.check_child_is_wildcard("", self._client_context, context, sentence, 0, Match.WORD, 0) self.assertIsNotNone(match) context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) sentence = Sentence(self._client_context.brain.tokenizer, "TEST") match = wildcard.check_child_is_wildcard("", self._client_context, context, sentence, 0, Match.WORD, 0) self.assertIsNotNone(match)
def match_sentence(self, bot, clientid, pattern_sentence, topic_pattern, that_pattern): topic_sentence = Sentence(topic_pattern) that_sentence = Sentence(that_pattern) logging.debug("AIML Parser matching sentence [%s], topic=[%s], that=[%s] ", pattern_sentence.text(), topic_pattern, that_pattern) sentence = Sentence() sentence.append_sentence(pattern_sentence) sentence.append_word('__TOPIC__') sentence.append_sentence(topic_sentence) sentence.append_word('__THAT__') sentence.append_sentence(that_sentence) logging.debug("Matching [%s]"%sentence.words_from_current_pos(0)) context = MatchContext() template = self.pattern_parser._root_node.match(bot, clientid, context, sentence) if template is not None: context._template_node = template context.list_matches() # Save the matched context for the associated sentence pattern_sentence.matched_context = context return context return None
def test_match_context_depth(self): context1 = MatchContext() self.assertEquals(MatchContext.MAX_SEARCH_DEPTH, context1.max_search_depth) context2 = MatchContext(999) self.assertEquals(999, context2.max_search_depth)
def test_resolve_no_defaults_inside_topic(self): root = TemplateNode() self.assertIsNotNone(root) self.assertIsNotNone(root.children) self.assertEqual(len(root.children), 0) node = TemplateTopicStarNode(index=1) self.assertIsNotNone(node) self.assertEqual(1, node.index) root.append(node) self.assertEqual(len(root.children), 1) conversation = Conversation("testid", self.bot) question = Question.create_from_text("Hello world") question.current_sentence()._response = "Hello matey" conversation.record_dialog(question) question = Question.create_from_text("How are you") question.current_sentence()._response = "Very well thanks" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=-1) context.add_match(Match(Match.TOPIC, match, "Matched")) question.current_sentence()._matched_context = context conversation.record_dialog(question) self.bot._conversations["testid"] = conversation self.assertEqual("Matched", node.resolve(self.bot, "testid"))
def match_sentence(self, client_context, pattern_sentence, topic_pattern, that_pattern): topic_sentence = Sentence(client_context.brain.tokenizer, topic_pattern) if len(topic_sentence.words) == 0: topic_sentence.words.append('*') that_sentence = Sentence(client_context.brain.tokenizer, that_pattern) if len(that_sentence.words) == 0: that_sentence.words.append('*') if client_context.match_nlu is True: YLogger.debug(client_context, "AIML Parser NLU matching topic=[%s], that=[%s] ", topic_pattern, that_pattern) else: YLogger.debug( client_context, "AIML Parser matching sentence [%s], topic=[%s], that=[%s] ", pattern_sentence.text(), topic_pattern, that_pattern) sentence = Sentence(client_context.brain.tokenizer) sentence.append_sentence(pattern_sentence) sentence.append_word('__TOPIC__') sentence.append_sentence(topic_sentence) sentence.append_word('__THAT__') sentence.append_sentence(that_sentence) YLogger.debug(client_context, "Matching [%s]", sentence.words_from_current_pos(0)) context = MatchContext( max_search_depth=client_context.bot.configuration.max_search_depth, max_search_timeout=client_context.bot.configuration. max_search_timeout, tokenizer=client_context.brain.tokenizer, start_time=client_context.question_start_time) template = self._pattern_parser._root_node.match( client_context, context, sentence) if template is not None: context._template_node = template context.list_matches(client_context) # Save the matched context for the associated sentence pattern_sentence.matched_context = context return context return None
def test_time_functions(self): context = MatchContext(max_search_depth=100, max_search_timeout=-1) self.assertEqual(-1, context.max_search_timeout) self.assertFalse(context.search_time_exceeded()) context = MatchContext(max_search_depth=100, max_search_timeout=0) self.assertEqual(0, context.max_search_timeout) self.assertTrue(context.search_time_exceeded()) context = MatchContext(max_search_depth=100, max_search_timeout=60) time_now = datetime.datetime.now() prev_time = time_now - datetime.timedelta(seconds=-70) context._total_search_start = prev_time self.assertTrue(context.search_time_exceeded())
def setUp(self): self._client = TemplateGraphClient() self._client_context = self._client.create_client_context("testid") self._graph = self._client_context.bot.brain.aiml_parser.template_parser self.test_sentence = Sentence(self._client_context.brain.tokenizer, "test sentence") test_node = PatternOneOrMoreWildCardNode("*") self.test_sentence._matched_context = MatchContext( max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) self.test_sentence._matched_context._matched_nodes = [ Match(Match.WORD, test_node, 'one'), Match(Match.WORD, test_node, 'two'), Match(Match.WORD, test_node, 'three'), Match(Match.WORD, test_node, 'four'), Match(Match.WORD, test_node, 'five'), Match(Match.WORD, test_node, 'six'), Match(Match.TOPIC, test_node, '*'), Match(Match.THAT, test_node, '*') ] conversation = self._client_context.bot.get_conversation( self._client_context) question = Question.create_from_sentence(self.test_sentence) conversation._questions.append(question)
def test_check_child_is_wildcard_star(self): wildcard = MockPatternWildCardNode("*") self.assertIsNotNone(wildcard) wildcard._1ormore_star = PatternOneOrMoreWildCardNode('*') wildcard._1ormore_star._template = PatternTemplateNode(TemplateNode()) context = MatchContext(max_search_depth=100, max_search_timeout=-1) sentence = Sentence("TEST SENTENCE") match = wildcard.check_child_is_wildcard("", self.bot, self.clientid, context, sentence, 0, Match.WORD, 0) self.assertIsNotNone(match) context = MatchContext(max_search_depth=100, max_search_timeout=-1) sentence = Sentence("TEST") match = wildcard.check_child_is_wildcard("", self.bot, self.clientid, context, sentence, 0, Match.WORD, 0) self.assertIsNone(match)
def setUp(self): self.parser = TemplateGraph(AIMLParser()) self.assertIsNotNone(self.parser) self.test_brain = None self.test_sentence = Sentence("test sentence") test_node = PatternOneOrMoreWildCardNode("*") self.test_sentence._matched_context = MatchContext(max_search_depth=100, max_search_timeout=-1) self.test_sentence._matched_context._matched_nodes = [Match(Match.WORD, test_node, 'one'), Match(Match.WORD, test_node, 'two'), Match(Match.WORD, test_node, 'three'), Match(Match.WORD, test_node, 'four'), Match(Match.WORD, test_node, 'five'), Match(Match.WORD, test_node, 'six'), Match(Match.TOPIC, test_node, '*'), Match(Match.THAT, test_node, '*')] test_config = ProgramyConfiguration(self.get_client_config(), brain_config=self.get_brain_config(), bot_config=self.get_bot_config()) self.test_bot = Bot(Brain(self.get_brain_config()), config=test_config.bot_configuration) self.test_clientid = "testid" conversation = self.test_bot.get_conversation(self.test_clientid) question = Question.create_from_sentence(self.test_sentence) conversation._questions.append(question)
def test_match_context_depth(self): context = MatchContext() self.assertFalse(context.matched()) template = PatternTemplateNode(template=TemplateNode) context.set_template(template) self.assertEquals(template, context.template_node()) self.assertTrue(context.matched())
def test_check_child_is_wildcard_no_wildcard_children(self): wildcard = MockPatternWildCardNode("*") self.assertIsNotNone(wildcard) context = MatchContext(max_search_depth=100, max_search_timeout=-1) sentence = Sentence("TEST SENTENCE") match = wildcard.check_child_is_wildcard("", self.bot, self.clientid, context, sentence, 0, Match.WORD, 0) self.assertIsNone(match)
def test_match_context_depth(self): context = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._client_context.brain.tokenizer) self.assertEqual(100, context.max_search_depth) self.assertEqual(60, context.max_search_timeout) self.assertFalse(context.matched()) template = PatternTemplateNode(template=TemplateNode) context.set_template(template) self.assertEqual(template, context.template_node()) self.assertTrue(context.matched())
def test_match_context_depth(self): context = MatchContext(max_search_depth=100, max_search_timeout=60) self.assertEquals(100, context.max_search_depth) self.assertEquals(60, context.max_search_timeout) self.assertFalse(context.matched()) template = PatternTemplateNode(template=TemplateNode) context.set_template(template) self.assertEquals(template, context.template_node()) self.assertTrue(context.matched())
def test_check_child_is_wildcard_arrow(self): wildcard = MockPatternWildCardNode("*") self.assertIsNotNone(wildcard) wildcard._0ormore_arrow = PatternZeroOrMoreWildCardNode('^') wildcard._0ormore_arrow._template = PatternTemplateNode(TemplateNode()) context = MatchContext() sentence = Sentence("TEST SENTENCE") match = wildcard.check_child_is_wildcard("", self.bot, self.clientid, context, sentence, 0, Match.WORD, 0) self.assertIsNotNone(match) context = MatchContext() sentence = Sentence("TEST") match = wildcard.check_child_is_wildcard("", self.bot, self.clientid, context, sentence, 0, Match.WORD, 0) self.assertIsNotNone(match)
def test_node_with_star(self): root = TemplateNode() node = TemplateStarNode() root.append(node) conversation = Conversation(self._client_context) question = Question.create_from_text(self._client_context.brain.tokenizer, "Hello world") question.current_sentence()._response = "Hello matey" conversation.record_dialog(question) question = Question.create_from_text(self._client_context.brain.tokenizer, "How are you") question.current_sentence()._response = "Very well thanks" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) context.add_match(Match(Match.WORD, match, "Matched")) question.current_sentence()._matched_context = context conversation.record_dialog(question) self._client_context.bot._conversations["testid"] = conversation self.assertEqual("Matched", root.resolve(self._client_context))
def test_node_with_star(self): root = TemplateNode() node = TemplateStarNode() root.append(node) conversation = Conversation("testid", self.bot) question = Question.create_from_text("Hello world") question.current_sentence()._response = "Hello matey" conversation.record_dialog(question) question = Question.create_from_text("How are you") question.current_sentence()._response = "Very well thanks" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext() context.add_match(Match(Match.WORD, match, "Matched")) question.current_sentence()._matched_context = context conversation.record_dialog(question) self.bot._conversations["testid"] = conversation self.assertEqual("Matched", root.resolve(self.bot, self.clientid))
def test_invalid_topic_or_that(self): wildcard = MockPatternWildCardNode("*") self.assertIsNotNone(wildcard) context = MatchContext(max_search_depth=100, max_search_timeout=-1) matches_added = 1 self.assertTrue(wildcard.invalid_topic_or_that("", PatternTopicNode.TOPIC, context, matches_added)) self.assertTrue(wildcard.invalid_topic_or_that("", PatternTopicNode.THAT, context, matches_added)) self.assertFalse(wildcard.invalid_topic_or_that("", "TEST", context, matches_added))
def test_match_context_depth(self): context = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._bot.brain.tokenizer) self.assertEquals(100, context.max_search_depth) self.assertEquals(60, context.max_search_timeout) self.assertFalse(context.matched()) template = PatternTemplateNode(template=TemplateNode) context.set_template(template) self.assertEquals(template, context.template_node()) self.assertTrue(context.matched())
def test_time_functions(self): context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._bot.brain.tokenizer) self.assertEqual(-1, context.max_search_timeout) self.assertFalse(context.search_time_exceeded()) context = MatchContext(max_search_depth=100, max_search_timeout=0, tokenizer=self._bot.brain.tokenizer) self.assertEqual(0, context.max_search_timeout) self.assertTrue(context.search_time_exceeded()) context = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._bot.brain.tokenizer) time_now = datetime.datetime.now() prev_time = time_now - datetime.timedelta(seconds=-70) context._total_search_start = prev_time self.assertTrue(context.search_time_exceeded())
def test_match_context_star(self): word = PatternOneOrMoreWildCardNode("*") topic = PatternOneOrMoreWildCardNode("*") that = PatternOneOrMoreWildCardNode("*") context = MatchContext() context.add_match(Match(Match.WORD, word, "Hello")) context.add_match(Match(Match.TOPIC, topic, "Hello Topic")) context.add_match(Match(Match.THAT, that, "Hello That")) self.assertEquals(3, len(context.matched_nodes)) self.assertEqual("Hello", context.star(1)) self.assertIsNone(context.star(2)) self.assertEqual("Hello Topic", context.topicstar(1)) self.assertIsNone(context.topicstar(2)) self.assertEqual("Hello That", context.thatstar(1)) self.assertIsNone(context.thatstar(2))
def test_match_context_star(self): word = PatternOneOrMoreWildCardNode("*") topic = PatternOneOrMoreWildCardNode("*") that = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._bot.brain.tokenizer) context.add_match(Match(Match.WORD, word, "Hello")) context.add_match(Match(Match.TOPIC, topic, "Hello Topic")) context.add_match(Match(Match.THAT, that, "Hello That")) self.assertEquals(3, len(context.matched_nodes)) self.assertEqual("Hello", context.star(1)) self.assertIsNone(context.star(2)) self.assertEqual("Hello Topic", context.topicstar(1)) self.assertIsNone(context.topicstar(2)) self.assertEqual("Hello That", context.thatstar(1)) self.assertIsNone(context.thatstar(2))
def test_match_context_pop_push(self): topic = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._bot.brain.tokenizer) context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(1, len(context.matched_nodes)) context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(2, len(context.matched_nodes)) context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(3, len(context.matched_nodes)) context.pop_match() self.assertEquals(2, len(context.matched_nodes)) context.pop_match() self.assertEquals(1, len(context.matched_nodes)) context.pop_match() self.assertEquals(0, len(context.matched_nodes)) context.pop_match() self.assertEquals(0, len(context.matched_nodes))
def test_match_context_depth(self): context1 = MatchContext(max_search_depth=100, max_search_timeout=60, tokenizer=self._bot.brain.tokenizer) self.assertEquals(100, context1.max_search_depth) self.assertEquals(60, context1.max_search_time)
def test_attrib_with_html(self): template = ET.fromstring(""" <template> <a target="_new" href="http://www.google.com/search?q=<star />"> Google Search </a> </template> """) conversation = Conversation(self._client_context) question = Question.create_from_text( self._client_context, "GOOGLE AIML", self._client_context.bot.sentence_splitter) question.current_sentence()._response = "OK" conversation.record_dialog(question) match = PatternOneOrMoreWildCardNode("*") context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer) context.add_match(Match(Match.WORD, match, "AIML")) question.current_sentence()._matched_context = context self._client_context.bot._conversation_mgr._conversations[ "testid"] = conversation ast = self._graph.parse_template_expression(template) self.assertIsNotNone(ast) self.assertIsInstance(ast, TemplateNode) self.assertIsNotNone(ast.children) self.assertEqual(len(ast.children), 1) xml_node = ast.children[0] self.assertIsNotNone(xml_node) self.assertIsInstance(xml_node, TemplateXMLNode) attribs = xml_node.attribs self.assertEquals(2, len(attribs)) self.assertIsInstance(attribs['target'], TemplateWordNode) target = attribs['target'] self.assertEquals(len(target.children), 0) self.assertEquals("_new", target.word) self.assertIsInstance(attribs['href'], TemplateNode) href = attribs['href'] self.assertEquals(len(href.children), 3) self.assertIsInstance(href.children[0], TemplateWordNode) self.assertEquals('http://www.google.com/search?q=', href.children[0].word) self.assertIsInstance(href.children[1], TemplateNode) self.assertEquals(1, len(href.children[1].children)) star = href.children[1].children[0] self.assertIsInstance(star, TemplateStarNode) self.assertIsInstance(href.children[2], TemplateWordNode) self.assertEquals('', href.children[2].word) result = xml_node.resolve(self._client_context) self.assertIsNotNone(result) self.assertEquals( result, '<a target="_new" href="http://www.google.com/search?q=AIML">Google Search</a>' )
def test_match_context_pop_push(self): topic = PatternOneOrMoreWildCardNode("*") context = MatchContext() context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(1, len(context.matched_nodes)) context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(2, len(context.matched_nodes)) context.add_match(Match(Match.TOPIC, topic, None)) self.assertEquals(3, len(context.matched_nodes)) context.pop_match() self.assertEquals(2, len(context.matched_nodes)) context.pop_match() self.assertEquals(1, len(context.matched_nodes)) context.pop_match() self.assertEquals(0, len(context.matched_nodes)) context.pop_match() self.assertEquals(0, len(context.matched_nodes))
def test_match_context_depth(self): context1 = MatchContext(max_search_depth=100, max_search_timeout=60) self.assertEquals(100, context1.max_search_depth) self.assertEquals(60, context1.max_search_time)