Example #1
0
def test_nlu(model: Text, nlu_data: Text, **kwargs: Dict):
    from rasa.nlu.test import run_evaluation

    unpacked_model = get_model(model)
    nlu_model = os.path.join(unpacked_model, "nlu")
    kwargs = minimal_kwargs(kwargs, run_evaluation)
    run_evaluation(nlu_data, nlu_model, **kwargs)
Example #2
0
def train_nlu(data_path, configs, model_path):
    logging.basicConfig(filename=logfile, level=logging.DEBUG)
    training_data = load_data(data_path)
    trainer = Trainer(config.load(configs))
    trainer.train(training_data)
    model_directory = trainer.persist(model_path, fixed_model_name='nlu')
    run_evaluation(data_path, model_directory)
Example #3
0
def test_nlu(
    model: Optional[Text],
    nlu_data: Optional[Text],
    output_directory: Text = DEFAULT_RESULTS_PATH,
    kwargs: Optional[Dict] = None,
):
    from rasa.nlu.test import run_evaluation
    from rasa.model import get_model

    try:
        unpacked_model = get_model(model)
    except ModelNotFound:
        print_error(
            "Could not find any model. Use 'rasa train nlu' to train a "
            "Rasa model and provide it via the '--model' argument.")
        return

    io_utils.create_directory(output_directory)

    nlu_model = os.path.join(unpacked_model, "nlu")

    if os.path.exists(nlu_model):
        kwargs = utils.minimal_kwargs(kwargs, run_evaluation,
                                      ["data_path", "model"])
        run_evaluation(nlu_data,
                       nlu_model,
                       output_directory=output_directory,
                       **kwargs)
    else:
        print_error(
            "Could not find any model. Use 'rasa train nlu' to train a "
            "Rasa model and provide it via the '--model' argument.")
Example #4
0
def test_nlu(model: Optional[Text], nlu_data: Optional[Text], kwargs: Optional[Dict]):
    from rasa.nlu.test import run_evaluation

    unpacked_model = get_model(model)
    nlu_model = os.path.join(unpacked_model, "nlu")

    if os.path.exists(nlu_model):
        kwargs = minimal_kwargs(kwargs, run_evaluation, ["data_path", "model"])
        run_evaluation(nlu_data, nlu_model, **kwargs)
def train_eval_rasa_nlu_model(lang='en', cross=False, save=''):
    """ Train snips data from all brat annotation object 

    :param lang: abbreviate language name 
    :param save: path where model will be save
    :rtype: None
    """
    from rasa.nlu.training_data import load_data
    from rasa.nlu.model import Trainer
    from rasa.nlu.components import ComponentBuilder
    from rasa.nlu import config
    from rasa.nlu.test import run_evaluation

    config_file = source_config / "config_rasa_converrt.yml"

    if cross:
        filename_results = source_result / "rasa_cross_semeval_2020_model_task1_{}".format(save)

        train_data_obj = BuildSnipsDataTask1(lang, cross=cross, vers=save)
        train_data = train_data_obj.build_rasa_data_task1()

        training_data = load_data(str(train_data[0]))
        builder = ComponentBuilder(use_cache=True)  
        trainer = Trainer(config.load(str(config_file)), builder)
        
        print("--> Training patent data with Rasa...")
        trainer.train(training_data, num_threads=8, n_jobs=-1, verbose=True)
        
        print("--> Saving model trained with Rasa (Rasa)...")
        model_directory = trainer.persist(filename_results)
        
        print("--> Evaluating training data with Rasa metrics (Cross-validation)...")
        import os
        from datetime import datetime
        filename_test = str(train_data[1])
        print(filename_test)
        dmtime = "test_{}_{}".format(save, datetime.now().strftime("%Y%m%d-%H%M%S"))
        out_test = source_result / "rasa_cross_evaluation_task1" / dmtime
        model_directory = sorted(filename_results.glob("nlu_*"), key=os.path.getmtime)[-1] 
        run_evaluation(filename_test, str(model_directory), output_directory=str(out_test))

    else:
        filename_results = source_result / "rasa_semeval_2020_model_task1_{}".format(save)
        train_data_obj = BuildSnipsDataTask1(lang, cross=cross, vers=save)
        train_file = train_data_obj.build_rasa_data_task1()

        training_data = load_data(train_file)
        builder = ComponentBuilder(use_cache=True)  
        trainer = Trainer(config.load(str(config_file)), builder)
        
        print("--> Training patent data with Rasa...")
        trainer.train(training_data, num_threads=8, verbose=True, n_jobs=-1, fixed_model_name="nlu")
        
        print("--> Saving model trained with Rasa (Rasa)...")
        model_directory = trainer.persist(filename_results)
Example #6
0
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa model."""
        validate_request_body(
            request,
            "You must provide some nlu data in the request body in order to "
            "evaluate your model.",
        )

        nlu_data = rasa.utils.io.create_temporary_file(request.body,
                                                       mode="w+b")
        data_path = os.path.abspath(nlu_data)

        if not os.path.exists(app.agent.model_directory):
            raise ErrorResponse(409, "Conflict",
                                "Loaded model file not found.")

        model_directory = app.agent.model_directory
        _, nlu_model = get_model_subdirectories(model_directory)

        try:
            evaluation = run_evaluation(data_path, nlu_model)
            return response.json(evaluation)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TestingError",
                "An unexpected error occurred during evaluation. Error: {}".
                format(e),
            )
Example #7
0
    def evaluate(
        self, data: Text, project: Optional[Text] = None, model: Optional[Text] = None
    ) -> Dict[Text, Any]:
        """Perform a model evaluation."""

        project = project or RasaNLUModelConfig.DEFAULT_PROJECT_NAME
        model = model or None
        file_name = utils.create_temporary_file(data, "_training_data")

        if project not in self.project_store:
            raise InvalidProjectError("Project {} could not be found".format(project))

        model_name = self.project_store[project]._dynamic_load_model(model)

        self.project_store[project]._loader_lock.acquire()
        try:
            if not self.project_store[project]._models.get(model_name):
                interpreter = self.project_store[project]._interpreter_for_model(
                    model_name
                )
                self.project_store[project]._models[model_name] = interpreter
        finally:
            self.project_store[project]._loader_lock.release()

        return run_evaluation(
            data_path=file_name,
            model=self.project_store[project]._models[model_name],
            errors_filename=None,
        )
Example #8
0
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa NLU model."""

        # create `tmpdir` and cast as str for py3.5 compatibility
        tmpdir = str(tempfile.mkdtemp())

        zipped_model_path = os.path.join(tmpdir, "model.tar.gz")
        write_request_body_to_file(request, zipped_model_path)

        model_path, nlu_files = await nlu_model_and_evaluation_files_from_archive(
            zipped_model_path, tmpdir)

        if len(nlu_files) == 1:
            data_path = os.path.abspath(nlu_files[0])
            try:
                evaluation = run_evaluation(data_path, model_path)
                return response.json(evaluation)
            except ValueError as e:
                return ErrorResponse(
                    400,
                    "FailedIntentEvaluation",
                    "Evaluation could not be created. Error: {}".format(e),
                )
        else:
            return ErrorResponse(
                400,
                "FailedIntentEvaluation",
                "NLU evaluation file could not be found. "
                "This endpoint requires a single file ending "
                "on `.md` or `.json`.",
            )
Example #9
0
def _get_nlu_evaluation_loss(model_path, metric, data_path):
    logger.info("Calculating '{}' loss.".format(metric))
    evaluation_result = run_evaluation(data_path, model_path)
    metric_result = evaluation_result['intent_evaluation'][metric]
    logger.info("{}: {}".format(metric, metric_result))

    return 1 - metric_result
Example #10
0
def test_run_evaluation(trained_moodbot_path):
    data = DEFAULT_DATA_PATH
    model = trained_moodbot_path

    result = run_evaluation(data, model, errors=None)
    assert result.get("intent_evaluation")
    assert result.get("entity_evaluation").get("CRFEntityExtractor")
Example #11
0
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa model."""
        validate_request_body(
            request,
            "You must provide some nlu data in the request body in order to "
            "evaluate your model.",
        )

        eval_agent = app.agent

        model_path = request.args.get("model", None)
        if model_path:
            model_server = app.agent.model_server
            if model_server is not None:
                model_server.url = model_path
            eval_agent = await _load_agent(model_path, model_server,
                                           app.agent.remote_storage)

        nlu_data = rasa.utils.io.create_temporary_file(request.body,
                                                       mode="w+b")
        data_path = os.path.abspath(nlu_data)

        if not eval_agent.model_directory or not os.path.exists(
                eval_agent.model_directory):
            raise ErrorResponse(409, "Conflict",
                                "Loaded model file not found.")

        model_directory = eval_agent.model_directory
        _, nlu_model = model.get_model_subdirectories(model_directory)

        try:
            # bf >
            language = request.args.get("language", None)
            evaluation = run_evaluation(
                data_path,
                nlu_model.get(language),
                errors=True,
                output_directory=model_directory,
            )

            for classifier in evaluation.get("entity_evaluation", {}):
                entity_errors_file = os.path.join(model_directory,
                                                  f"{classifier}_errors.json")
                if os.path.isfile(entity_errors_file):
                    import json

                    entity_errors = json.loads(
                        rasa.utils.io.read_file(entity_errors_file))
                    evaluation["entity_evaluation"][classifier][
                        "predictions"] = entity_errors
            # </ bf
            return response.json(evaluation)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TestingError",
                f"An unexpected error occurred during evaluation. Error: {e}",
            )
Example #12
0
def test_run_evaluation(unpacked_trained_moodbot_path):
    result = run_evaluation(
        DEFAULT_DATA_PATH,
        os.path.join(unpacked_trained_moodbot_path, "nlu"),
        errors=False,
    )

    assert result.get("intent_evaluation")
Example #13
0
def test_run_evaluation(unpacked_trained_moodbot_path):
    data = DEFAULT_DATA_PATH

    result = run_evaluation(data,
                            os.path.join(unpacked_trained_moodbot_path, "nlu"),
                            errors=None)
    assert result.get("intent_evaluation")
    assert result.get("entity_evaluation").get("CRFEntityExtractor")
Example #14
0
def test_run_evaluation(unpacked_trained_moodbot_path):
    result = run_evaluation(
        DEFAULT_DATA_PATH,
        os.path.join(unpacked_trained_moodbot_path, "nlu"),
        errors=False,
    )

    assert result["intent_evaluation"]
    assert result["entity_evaluation"]["DIETClassifier"]
Example #15
0
def test_run_evaluation(unpacked_trained_moodbot_path: Text):
    result = run_evaluation(
        DEFAULT_DATA_PATH,
        os.path.join(unpacked_trained_moodbot_path, "nlu"),
        errors=False,
        successes=False,
        disable_plotting=True,
    )

    assert result.get("intent_evaluation")
Example #16
0
def test_nlu(model: Optional[Text], nlu_data: Optional[Text], kwargs: Optional[Dict]):
    from rasa.nlu.test import run_evaluation

    unpacked_model = get_model(model)

    if unpacked_model is None:
        print_error(
            "Could not find any model. Use 'rasa train nlu' to train an NLU model."
        )
        return

    nlu_model = os.path.join(unpacked_model, "nlu")

    if os.path.exists(nlu_model):
        kwargs = minimal_kwargs(kwargs, run_evaluation, ["data_path", "model"])
        run_evaluation(nlu_data, nlu_model, **kwargs)
    else:
        print_error(
            "Could not find any model. Use 'rasa train nlu' to train an NLU model."
        )
Example #17
0
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa model."""
        validate_request_body(
            request,
            "You must provide some nlu data in the request body in order to "
            "evaluate your model.",
        )

        eval_agent = app.agent

        model_path = request.args.get("model", None)
        if model_path:
            model_server = app.agent.model_server
            if model_server is not None:
                model_server.url = model_path
            eval_agent = await _load_agent(model_path, model_server,
                                           app.agent.remote_storage)

        nlu_data = rasa.utils.io.create_temporary_file(request.body,
                                                       mode="w+b")
        data_path = os.path.abspath(nlu_data)

        if not os.path.exists(eval_agent.model_directory):
            raise ErrorResponse(409, "Conflict",
                                "Loaded model file not found.")

        model_directory = eval_agent.model_directory
        # bf mod
        model_directory = os.path.abspath(
            os.path.join(model_directory, os.pardir))
        # /bf mod
        _, nlu_models = get_model_subdirectories(model_directory)

        try:
            # bf mod
            language = request.args.get("language", None)
            evaluation = run_evaluation(data_path, nlu_models.get(language))
            # /bf mod
            return response.json(evaluation)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TestingError",
                "An unexpected error occurred during evaluation. Error: {}".
                format(e),
            )
Example #18
0
async def test_stack_model_intent_evaluation(tmpdir, trained_stack_model,
                                             default_nlu_data):
    with open(default_nlu_data, "r") as f:
        nlu_data = f.read()

    # add evaluation data to model archive
    new_model_path = add_evaluation_file_to_model(trained_stack_model,
                                                  nlu_data,
                                                  data_format="md")

    nlu_model_path, nlu_files = await nlu_model_and_evaluation_files_from_archive(
        new_model_path, tmpdir)

    assert len(nlu_files) == 1
    evaluation = run_evaluation(nlu_files[0], nlu_model_path)

    assert set(evaluation.keys()) == {"intent_evaluation", "entity_evaluation"}
Example #19
0
    async def evaluate_intents(request: Request) -> HTTPResponse:
        """Evaluate intents against a Rasa model."""
        validate_request_body(
            request,
            "You must provide some nlu data in the request body in order to "
            "evaluate your model.",
        )

        test_data = _test_data_file_from_payload(request)

        eval_agent = app.agent

        model_path = request.args.get("model", None)
        if model_path:
            model_server = app.agent.model_server
            if model_server is not None:
                model_server.url = model_path
            eval_agent = await _load_agent(
                model_path, model_server, app.agent.remote_storage
            )

        data_path = os.path.abspath(test_data)

        if not eval_agent.model_directory or not os.path.exists(
            eval_agent.model_directory
        ):
            raise ErrorResponse(
                HTTPStatus.CONFLICT, "Conflict", "Loaded model file not found."
            )

        model_directory = eval_agent.model_directory
        _, nlu_model = model.get_model_subdirectories(model_directory)

        try:
            evaluation = run_evaluation(data_path, nlu_model, disable_plotting=True)
            return response.json(evaluation)
        except Exception as e:
            logger.error(traceback.format_exc())
            raise ErrorResponse(
                HTTPStatus.INTERNAL_SERVER_ERROR,
                "TestingError",
                f"An unexpected error occurred during evaluation. Error: {e}",
            )
def train_eval_rasa_nlu_model(lang='en', cross=False, save=''):
    """ Train rasa data from all brat annotation object 

    :param lang: abbreviate language name 
    :param save: path where model will be save
    :return: None
    :rtype: None
    """
    from rasa.nlu.training_data import load_data
    from rasa.nlu.model import Trainer
    from rasa.nlu.components import ComponentBuilder
    from rasa.nlu import config
    from rasa.nlu.test import run_evaluation
    import pickle

    config_file = source_config / "config_rasa_bert.yml"

    if cross:
        train_data_obj = BuildSnipsDataTask2(lang, cross=cross, vers=save)
        train_data = train_data_obj.build_rasa_data_task2()
        filename_results = source_result / "rasa_cross_semeval_2020_model_task2_{}".format(
            save)
        if Path(filename_results).exists():
            training_data = load_data(str(train_data[0]))
            builder = ComponentBuilder(use_cache=True)
            with codecs.open(
                    source_result / "builder_task2_{}.pkl".format(save),
                    "wb") as ant:
                pickle.dump(builder, ant)
            trainer = Trainer(config.load(str(config_file)), builder)
            print("\n--> Training patent data with Rasa (Cross-validation)...")
            trainer.train(training_data, num_threads=8, verbose=True)
            print("--> Saving model trained with Rasa (Cross-validation)...")
            model_directory = trainer.persist(filename_results)

        print(
            "--> Evaluating training data with Rasa metrics (Cross-validation)..."
        )
        import os
        from datetime import datetime
        filename_test = str(train_data[1])
        dmtime = "test_{}_{}".format(save,
                                     datetime.now().strftime("%Y%m%d-%H%M%S"))
        out_test = source_result / "rasa_cross_evaluation_task2" / dmtime
        model_directory = sorted(filename_results.glob("nlu_*"),
                                 key=os.path.getmtime)[-1]
        print(out_test)
        run_evaluation(filename_test,
                       str(model_directory),
                       output_directory=str(out_test))

    else:
        filename_results = source_result / "rasa_semeval_2020_results_task2_{}".format(
            save)
        train_data_obj = BuildSnipsDataTask2(lang, cross=cross, vers=save)
        train_file = train_data_obj.build_rasa_data_task2()

        print("\n--> Training will use the file: {}...".format(
            str(train_file)))
        training_data = load_data(str(train_file))
        builder = ComponentBuilder(use_cache=True)
        with codecs.open(source_result / "builder_task2_{}.pkl".format(save),
                         "wb") as ant:
            pickle.dump(builder, ant)
        trainer = Trainer(config.load(str(config_file)), builder)
        print("\n--> Training patent data with Rasa...")
        trainer.train(training_data,
                      num_threads=12,
                      n_jobs=8,
                      verbose=True,
                      fixed_model_name="nlu")
        print("--> Saving model trained with Rasa...")
        model_directory = trainer.persist(filename_results)
        """
Example #21
0
def evaluate_model(td_file, model_loc):
    # evaluates the model on the training data
    # wrapper for rasa_nlu.evaluate.run_evaluation

    run_evaluation(td_file, model_loc)
def evaluate_model(td_file, model_loc):
    """evaluates the model on the training data."""
    run_evaluation(td_file, model_loc)