def _extract_entities(self, message: Message) -> List[Dict[Text, Any]]:
        """Extract entities of the given type from the given user message."""
        entities = []

        flags = 0  # default flag
        if not self.case_sensitive:
            flags = re.IGNORECASE

        for pattern in self.patterns:
            matches = re.finditer(pattern["pattern"],
                                  message.get(TEXT),
                                  flags=flags)
            matches = list(matches)

            for match in matches:
                start_index = match.start()
                end_index = match.end()
                entities.append({
                    ENTITY_ATTRIBUTE_TYPE:
                    pattern["name"],
                    ENTITY_ATTRIBUTE_START:
                    start_index,
                    ENTITY_ATTRIBUTE_END:
                    end_index,
                    ENTITY_ATTRIBUTE_VALUE:
                    message.get(TEXT)[start_index:end_index],
                })

        return entities
示例#2
0
def test_entity_synonyms_substitute_two_entity():
    example = Message(
        text="Looking for a chines restaurant in New York tomorrow",
        data={
            "entities": [{
                "entity": "type",
                "value": "chinese",
                "start": 14,
                "end": 20
            }, {
                "entity": "city",
                "value": "New York",
                "start": 35,
                "end": 43
            }]
        })
    ent_synonyms = {"chines": "chinese", "new york": "NYC"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)

    assert example.text == "Looking for a chinese restaurant in NYC tomorrow"
    e_type = list(
        filter(lambda e: e["entity"] == 'type', example.get("entities")))[0]
    e_city = list(
        filter(lambda e: e["entity"] == 'city', example.get("entities")))[0]

    assert e_type["start"] == 14
    assert e_type["end"] == 21
    assert e_city["start"] == 36
    assert e_city["end"] == 39
    def process(self, message: Message, **kwargs: Any) -> None:
        if not self.patterns:
            return

        extracted_entities = self._extract_entities(message)
        extracted_entities = self.add_extractor_name(extracted_entities)

        message.set(ENTITIES,
                    message.get(ENTITIES, []) + extracted_entities,
                    add_to_output=True)
示例#4
0
def parse_training_example(example: Text, intent: Optional[Text] = None) -> "Message":
    """Extract entities and synonyms, and convert to plain text."""

    entities = find_entities_in_training_example(example)
    plain_text = replace_entities(example)

    return Message.build(plain_text, intent, entities)
示例#5
0
def _collect_messages(evts: List[Dict[Text, Any]]) -> List[Message]:
    """Collect the message text and parsed data from the UserMessage events
    into a list"""
    from rasa.nlu.extractors.duckling_http_extractor import \
        DucklingHTTPExtractor
    from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
    from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor

    msgs = []

    for evt in evts:
        if evt.get("event") == UserUttered.type_name:
            data = evt.get("parse_data")

            for entity in data.get("entities", []):

                excluded_extractors = [
                    DucklingHTTPExtractor.__name__,
                    SpacyEntityExtractor.__name__,
                    MitieEntityExtractor.__name__
                ]
                logger.debug("Exclude entity marking of following extractors"
                             " {} when writing nlu data "
                             "to file.".format(excluded_extractors))

                if entity.get("extractor") in excluded_extractors:
                    data["entities"].remove(entity)

            msg = Message.build(data["text"], data["intent"]["name"],
                                data["entities"])
            msgs.append(msg)

    return msgs
示例#6
0
def test_entity_synonyms_substitute_and_replace():
    initial_text = "Looking for a chines restaurant in New York tomorrow for three people"
    initial_entities = [{
        "entity": "type",
        "value": "chines",
        "start": 14,
        "end": 20
    }, {
        "entity": "city",
        "value": "New York",
        "start": 35,
        "end": 43
    }, {
        "entity": "count",
        "value": "three",
        "start": 57,
        "end": 62
    }]

    example = Message(text=initial_text, data={
        "entities": initial_entities,
    })
    ent_synonyms = {"chines": "chinese", "new york": "NYC", "three": "3"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)
    EntitySynonymEnd().process(example)
示例#7
0
def _get_example(config=None, gazette=None, primary=None):
    if primary is None:
        primary = {
            "entity": "type",
            "value": "chines",
            "start": 14,
            "end": 20,
            "extractor": "ner_crf",
        }
    return _process_example(
        Message(
            text="Looking for a chines restaurant in New York",
            data={
                "entities": [
                    primary,
                    {
                        "entity": "type",
                        "value": "restaurant",
                        "start": 21,
                        "end": 31,
                        "extractor": "ner_crf",
                    },
                    {
                        "entity": "city",
                        "value": "New York",
                        "start": 35,
                        "end": 43,
                        "extractor": "ner_crf",
                    },
                ]
            },
        ),
        config=config,
        gazette=gazette,
    )
示例#8
0
def test_entity_sweeper():
    entities = [{
        "entity": "cuisine",
        "value": "chinese",
        "start": 0,
        "end": 6
    }, {
        "entity": "time",
        "value": "whatever",
        "start": 0,
        "end": 6
    }]
    sweeper = Sweeper(component_config={'entity_names': ['time']})
    message = Message("xxx", {'entities': entities})
    sweeper.process(message)
    assert len(message.get('entities')) == 1
    assert message.get('entities')[0]["entity"] == "cuisine"
def test_entity_synonyms_substitute_three_entity():
    example = Message(
        text=
        "Looking for a chines restaurant in New York tomorrow for three people",
        data={
            "entities": [
                {
                    "entity": "type",
                    "value": "chines",
                    "start": 14,
                    "end": 20
                },
                {
                    "entity": "city",
                    "value": "New York",
                    "start": 35,
                    "end": 43
                },
                {
                    "entity": "count",
                    "value": "three",
                    "start": 57,
                    "end": 62
                },
            ]
        },
    )
    ent_synonyms = {"chines": "chinese", "new york": "NYC", "three": "3"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)

    assert (example.text ==
            "Looking for a chinese restaurant in NYC tomorrow for 3 people")
    e_type = list(
        filter(lambda e: e["entity"] == "type", example.get("entities")))[0]
    e_city = list(
        filter(lambda e: e["entity"] == "city", example.get("entities")))[0]
    e_count = list(
        filter(lambda e: e["entity"] == "count", example.get("entities")))[0]

    assert e_type["start"] == 14
    assert e_type["end"] == 21
    assert e_city["start"] == 36
    assert e_city["end"] == 39

    assert e_count["start"] == 53
    assert e_count["end"] == 54
示例#10
0
def test_entity_synonyms_substitute_one_entity():
    example = Message(text="Looking for a chines restaurant",
                      data={
                          "entities": [{
                              "entity": "type",
                              "value": "chinese",
                              "start": 14,
                              "end": 20
                          }]
                      })
    ent_synonyms = {"chines": "chinese"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)

    assert example.text == "Looking for a chinese restaurant"
    e_type = list(
        filter(lambda e: e["entity"] == 'type', example.get("entities")))[0]

    assert e_type["start"] == 14
    assert e_type["end"] == 21
示例#11
0
def test_entity_synonyms_substitute():
    example = Message(text="Looking for a chines restaurant in New York",
                      data={
                          "entities": [{
                              "entity": "type",
                              "value": "chinese",
                              "start": 14,
                              "end": 20
                          }, {
                              "entity": "city",
                              "value": "New York",
                              "start": 35,
                              "end": 43
                          }]
                      })
    ent_synonyms = {"chines": "chinese", "new york": "NYC"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)
    assert example.text == "Looking for a chinese restaurant in NYC"
示例#12
0
def _setup_example(config=None):
    instance = _get_instance(config=config)
    message = Message(text='This is a tst message')
    flagged_tokens = [
        {
          "offset": 10,
          "token": "tst",
          "type": "UnknownToken",
          "suggestions": [
            {
              "suggestion": "test",
              "score": 0.95155325585711
            },
            {
              "suggestion": "text",
              "score": 0.805342621979041
            }
          ]
        }
    ]

    return instance, message, flagged_tokens
示例#13
0
 def test_classification(self, trained_classifier, message, intent):
     text = Message(message)
     trained_classifier.process(text)
     assert text.get("intent").get("name", "NOT_CLASSIFIED") == intent
示例#14
0
def test_entity_synonyms_substitute_and_replace_w_insertions():
    text_initial = "Looking for a chines restaurant in New York tomorrow for three people"
    initial_entities = [{
        "entity": "type",
        "value": "chines",
        "start": 14,
        "end": 20
    }, {
        "entity": "city",
        "value": "New York",
        "start": 35,
        "end": 43
    }, {
        "entity": "count",
        "value": "three",
        "start": 57,
        "end": 62
    }]

    example = Message(text=text_initial, data={
        "entities": initial_entities,
    })
    ent_synonyms = {"chines": "chinese", "new york": "NYC", "three": "3"}
    EntitySynonymBegin(synonyms=ent_synonyms).process(example)

    # import IPython
    # IPython.embed()
    example.data["entities"].extend([
        {
            "entity": "action",
            "value": "Looking",
            "start": 0,
            "end": 7,
        },
        {
            "entity": "place",
            "value": "restaurant",
            "start": 22,
            "end": 32,
        },
        {
            "entity": "species",
            "value": "people",
            "start": 55,
            "end": 61,
        },
    ])

    EntitySynonymEnd().process(example)

    def has_changed(entity):
        return entity["value"] != example.text[entity["start"]:entity["end"]]

    assert example.text == text_initial

    changed_entities = filter(has_changed, example.data["entities"])
    # Check the unchanged entities match value <-> text[start:end]
    assert len(list(changed_entities)) == 3
    # Check the changed entities are reverted properly
    for initial, entity in zip(initial_entities, changed_entities):
        assert raises(KeyError, lambda x: print(x["literal"]), entity)
        assert entity["start"] == initial["start"]
        assert entity["end"] == initial["end"]
示例#15
0
def test_multiple_errors():
    instance = _get_instance()
    message = Message(text='Ths i a tst mesae')
    flagged_tokens = [
      {
        "offset": 0,
        "token": "Ths",
        "type": "UnknownToken",
        "suggestions": [
          {
            "suggestion": "This",
            "score": 0.825389307284585
          }
        ]
      },
      {
        "offset": 4,
        "token": "i",
        "type": "UnknownToken",
        "suggestions": [
          {
            "suggestion": "is",
            "score": 0.825389307284585
          }
        ]
      },
      {
        "offset": 8,
        "token": "tst",
        "type": "UnknownToken",
        "suggestions": [
          {
            "suggestion": "test",
            "score": 0.825389307284585
          },
          {
            "suggestion": "text",
            "score": 0.646529276890009
          }
        ]
      },
      {
        "offset": 12,
        "token": "mesae",
        "type": "UnknownToken",
        "suggestions": [
          {
            "suggestion": "message",
            "score": 0.825389307284585
          },
          {
            "suggestion": "mesa",
            "score": 0.761621385590906
          }
        ]
      }
    ]

    tokens = instance._get_replacements(flagged_tokens)
    assert len(tokens) == len(flagged_tokens)

    text = instance._replace(message.text, tokens)
    assert text == 'This is a test message'