def read_from_json(self, js: Dict[Text, Any], **_: Any) -> "TrainingData": """Loads training data stored in the rasa NLU data format.""" import rasa.shared.nlu.training_data.schemas.data_schema as schema import rasa.shared.utils.validation as validation_utils validation_utils.validate_training_data(js, schema.rasa_nlu_data_schema()) data = js["rasa_nlu_data"] common_examples = data.get("common_examples", []) entity_synonyms = data.get("entity_synonyms", []) regex_features = data.get("regex_features", []) lookup_tables = data.get("lookup_tables", []) entity_synonyms = transform_entity_synonyms(entity_synonyms) training_examples = [] for ex in common_examples: # taking care of custom entries msg = Message.build( text=ex.pop(TEXT, ""), intent=ex.pop(INTENT, None), entities=ex.pop(ENTITIES, None), **ex, ) training_examples.append(msg) return TrainingData(training_examples, entity_synonyms, regex_features, lookup_tables)
def _read_entities(entity_js, examples_js) -> "TrainingData": entity_synonyms = transform_entity_synonyms(examples_js) name = entity_js.get("name") lookup_tables = DialogflowReader._extract_lookup_tables( name, examples_js) return TrainingData([], entity_synonyms, [], lookup_tables)
def _read_entities( entity: Dict[Text, Any], examples: List[Dict[Text, Any]] ) -> "TrainingData": entity_synonyms = transform_entity_synonyms(examples) if entity["isRegexp"]: regex_features = DialogflowReader._extract_regex_features(entity, examples) return TrainingData([], entity_synonyms, regex_features, []) else: lookup_tables = DialogflowReader._extract_lookup_tables(entity, examples) return TrainingData([], entity_synonyms, [], lookup_tables)