Esempio n. 1
0
def test_nlu(model: Text, nlu_data: Text, **kwargs: Dict):
    from rasa_nlu.test import run_evaluation

    unpacked_model = get_model(model)
    nlu_model = os.path.join(unpacked_model, "nlu")
    kwargs = minimal_kwargs(kwargs, run_evaluation)
    run_evaluation(nlu_data, nlu_model, **kwargs)
Esempio n. 2
0
    def train_nlu(self, data_path, configs, model_path):

        training_data = load_data(data_path)
        trainer = Trainer(config.load(configs))
        trainer.train(training_data)
        model_directory = trainer.persist(model_path, project_name='rasaproject', fixed_model_name='SmallTalk')
        run_evaluation(data_path, model_directory)
Esempio n. 3
0
def train_nlu(data_path, configs, model_path):
    logging.basicConfig(filename=logfile, level=logging.DEBUG)
    training_data = load_data(data_path)
    trainer = Trainer(config.load(configs))
    trainer.train(training_data)
    model_directory = trainer.persist(model_path, project_name='current', fixed_model_name='nlu')
    run_evaluation(data_path, model_directory)
Esempio n. 4
0
def evaluateModel(pathToData, model_dir):
    path_to_data = "data/test_data.json"

    # Create a directory if not exist.
    if not os.path.exists(model_dir +"/evaluation"):
        os.mkdir(model_dir +"/evaluation")
        print("Directory ", "evaluation", " Created ")

    # save the error file, Confusion matrix image, and histogram file
    errors_path = model_dir + "/evaluation/errors.json"
    confmat_path = model_dir + "/evaluation/confmat"
    intent_hist_path = model_dir + "/evaluation/hist"
    run_evaluation(path_to_data, model_dir, errors_filename=errors_path, confmat_filename=confmat_path,
                   intent_hist_filename=intent_hist_path)
Esempio n. 5
0
    def evaluate(self,
                 data: Text,
                 project: Optional[Text] = None,
                 model: Optional[Text] = None) -> Dict[Text, Any]:
        """Perform a model evaluation."""

        project = project or RasaNLUModelConfig.DEFAULT_PROJECT_NAME
        model = model or None
        file_name = utils.create_temporary_file(data, "_training_data")

        if project not in self.project_store:
            raise InvalidProjectError("Project {} could not "
                                      "be found".format(project))

        model_name = self.project_store[project]._dynamic_load_model(model)

        self.project_store[project]._loader_lock.acquire()
        try:
            if not self.project_store[project]._models.get(model_name):
                interpreter = self.project_store[project]. \
                    _interpreter_for_model(model_name)
                self.project_store[project]._models[model_name] = interpreter
        finally:
            self.project_store[project]._loader_lock.release()

        return run_evaluation(
            data_path=file_name,
            model=self.project_store[project]._models[model_name],
            errors_filename=None)
Esempio n. 6
0
    def evaluate(self,
                 data: Text,
                 project: Optional[Text] = None,
                 model: Optional[Text] = None) -> Dict[Text, Any]:
        """Perform a model evaluation."""

        project = project or RasaNLUModelConfig.DEFAULT_PROJECT_NAME
        model = model or None
        file_name = utils.create_temporary_file(data, "_training_data")

        if project not in self.project_store:
            raise InvalidProjectError("Project {} could not "
                                      "be found".format(project))

        model_name = self.project_store[project]._dynamic_load_model(model)

        self.project_store[project]._loader_lock.acquire()
        try:
            if not self.project_store[project]._models.get(model_name):
                interpreter = self.project_store[project]. \
                    _interpreter_for_model(model_name)
                self.project_store[project]._models[model_name] = interpreter
        finally:
            self.project_store[project]._loader_lock.release()

        return run_evaluation(
            data_path=file_name,
            model=self.project_store[project]._models[model_name],
            errors_filename=None
        )
Esempio n. 7
0
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa NLU model."""

        # create `tmpdir` and cast as str for py3.5 compatibility
        tmpdir = str(tempfile.mkdtemp())

        zipped_model_path = os.path.join(tmpdir, 'model.tar.gz')
        write_request_body_to_file(request, zipped_model_path)

        model_path, nlu_files = \
            await nlu_model_and_evaluation_files_from_archive(
                zipped_model_path, tmpdir)

        if len(nlu_files) == 1:
            data_path = os.path.abspath(nlu_files[0])
            try:
                evaluation = run_evaluation(data_path, model_path)
                return response.json(evaluation)
            except ValueError as e:
                return ErrorResponse(
                    400, "FailedIntentEvaluation",
                    "Evaluation could not be created. "
                    "Error: {}".format(e))
        else:
            return ErrorResponse(
                400, "FailedIntentEvaluation",
                "NLU evaluation file could not be found. "
                "This endpoint requires a single file ending "
                "on `.md` or `.json`.")
Esempio n. 8
0
def train_nlu(train_path, test_path, configs, model_path):
    logging.basicConfig(filename=logfile, level=logging.DEBUG)
    training_data = load_data(train_path)
    trainer = Trainer(config.load(configs))
    trainer.train(training_data)
    model_directory = trainer.persist(model_path,
                                      project_name='current',
                                      fixed_model_name='nlu')
    result = run_evaluation(test_path, model_directory)
    predictions = result['intent_evaluation']['predictions']
    for predict in predictions:
        print('{}:{}-{}'.format(predict['text'], predict['intent'],
                                predict['confidence']))

    print('Acc: {}'.format(result['intent_evaluation']['accuracy']))
    print('F1 : {}'.format(result['intent_evaluation']['f1_score']))
    print('Pre: {}'.format(result['intent_evaluation']['precision']))
Esempio n. 9
0
async def test_stack_model_intent_evaluation(tmpdir, trained_stack_model,
                                             default_nlu_data):
    with open(default_nlu_data, 'r') as f:
        nlu_data = f.read()

    # add evaluation data to model archive
    new_model_path = add_evaluation_file_to_model(trained_stack_model,
                                                  nlu_data,
                                                  data_format='md')

    nlu_model_path, nlu_files = \
        await nlu_model_and_evaluation_files_from_archive(
            new_model_path, tmpdir)

    assert len(nlu_files) == 1
    evaluation = run_evaluation(nlu_files[0], nlu_model_path)

    assert set(evaluation.keys()) == {"intent_evaluation", "entity_evaluation"}
Esempio n. 10
0
def train_nlu(data_path, configs, model_path):
    training_data = load_data(data_path)
    trainer = Trainer(config.load(configs))
    trainer.train(training_data)
    model_directory = trainer.persist(model_path, project_name='current', fixed_model_name='nlu')
    run_evaluation(data_path, model_directory)
Esempio n. 11
0
def evaluateNLU():
    run_evaluation('./data/data.json', './models/nlu/default/restaurantnlu')
Esempio n. 12
0
#def run_nlu():
#interpreter = Interpreter.load('./models/nlu/default/mentalnlu',config.load('config.yml'))
#print(interpreter.parse(u"I am depressed today."))
#if __name__ == "__main__":
#train_nlu('./data/nlu.md','config.yml','./models/nlu')
#run_nlu()

from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu import config

# loading the nlu training samples
training_data = load_data("./data/nlu.md")

# trainer to educate our pipeline
trainer = Trainer(config.load("config.yml"))

# train the model!
interpreter = trainer.train(training_data)

# store it for future use
model_directory = trainer.persist("./models/nlu", fixed_model_name="mentalnlu")

#interpreter = Interpreter.load('./models/nlu/default/mentalnlu',config.load('config.yml'))
#print(interpreter.parse(u"I am depressed today."))

from rasa_nlu.test import run_evaluation
run_evaluation("./data/nlu.md", model_directory)
Esempio n. 13
0
from rasa_nlu import config
import json
from rasa_nlu.test import run_evaluation


def train(data, config_file, model_dir):
    training_data = load_data(data)
    trainer = Trainer(config.load(config_file))
    trainer.train(training_data)
    model_directory = trainer.persist(model_dir, fixed_model_name='nlu_model')
    return model_directory


model_directory = train("data/nlu.md", "config.yml", 'models/nlu')
#
#
# def ask_question(text):
#     print(text)
#     interpreter = Interpreter.load(model_directory)
#     t = interpreter.parse(text)
#     print(json.dumps(t, indent=2))
#
#
# ask_question("How do I get to Douglass")

result = run_evaluation("./data/nlu.md",
                        model_directory,
                        confmat_filename="trainresult.png")

# print(result["intent_evaluation"]['report'])