Пример #1
0
def test_test_with_no_user_utterance(
        run_in_simple_project_with_model: Callable[..., RunResult]):
    write_yaml(
        {
            "pipeline": "KeywordIntentClassifier",
            "policies": [{
                "name": "TEDPolicy"
            }],
        },
        "config.yml",
    )

    simple_test_story_yaml = """
stories:
- story: happy path 1
  steps:
  - intent: greet
  - action: utter_greet
  - intent: mood_great
  - action: utter_happy
"""
    with open("tests/test_story_no_utterance.yaml", "w") as f:
        f.write(simple_test_story_yaml)

    run_in_simple_project_with_model("test", "--fail-on-prediction-errors")
    assert os.path.exists("results")
    assert not os.path.exists("results/failed_test_stories.yml")
Пример #2
0
def test_test_nlu_cross_validation_with_autoconfig(
        testdir: Testdir, moodbot_nlu_data_path: Path):
    os.environ["LOG_LEVEL"] = "ERROR"
    config_path = str(testdir.tmpdir / "config.yml")
    nlu_path = str(testdir.tmpdir / "nlu.yml")
    shutil.copy(str(moodbot_nlu_data_path), nlu_path)
    write_yaml(
        {
            "language": "en",
            "pipeline": None,
            "policies": None,
        },
        config_path,
    )
    args = [
        shutil.which("rasa"),
        "test",
        "nlu",
        "--cross-validation",
        "-c",
        "config.yml",
        "--nlu",
        "nlu.yml",
    ]

    # we don't wanna run the cross validation for real, just want to see that it does
    # not crash
    try:
        run_result = testdir.run(*args, timeout=8.0)
        # we'll only get here if the run fails due to an exception
        assert run_result.ret != ExitCode.TESTS_FAILED
    except Pytester.TimeoutExpired:
        pass
Пример #3
0
def create_simple_project(path: Path):
    scaffold.create_initial_project(str(path))

    # create a config file
    # for the cli test the resulting model is not important, use components that are
    # fast to train
    write_yaml(
        {
            "language":
            "en",
            "pipeline": [{
                "name": "KeywordIntentClassifier"
            }],
            "policies": [
                {
                    "name": "RulePolicy"
                },
                {
                    "name": "MemoizationPolicy",
                    "max_history": 3
                },
            ],
        },
        path / "config.yml",
    )
    return path
Пример #4
0
def test_test_core_warnings(
        run_in_simple_project_with_model: Callable[..., RunResult]):
    write_yaml(
        {
            "language":
            "en",
            "pipeline": [],
            "policies": [
                {
                    "name": "MemoizationPolicy",
                    "max_history": 3
                },
                {
                    "name": "UnexpecTEDIntentPolicy",
                    "max_history": 5,
                    "epochs": 1
                },
                {
                    "name": "TEDPolicy",
                    "max_history": 5,
                    "epochs": 1,
                    "constrain_similarities": True,
                },
                {
                    "name": "RulePolicy"
                },
            ],
        },
        "config.yml",
    )

    simple_test_story_yaml = """
version: "3.0"
stories:
- story: unlikely path
  steps:
  - user: |
      very terrible
    intent: mood_unhappy
  - action: utter_cheer_up
  - action: utter_did_that_help
  - intent: affirm
  - action: utter_happy
"""
    with open("tests/test_stories.yaml", "w") as f:
        f.write(simple_test_story_yaml)

    run_in_simple_project_with_model("test", "core", "--no-warnings")
    assert not os.path.exists(f"results/{STORIES_WITH_WARNINGS_FILE}")

    run_in_simple_project_with_model("test", "core")
    assert os.path.exists(f"results/{STORIES_WITH_WARNINGS_FILE}")
Пример #5
0
def test_test_core_comparison_after_train(
        run_in_simple_project: Callable[..., RunResult]):
    write_yaml({
        "language": "en",
        "policies": [{
            "name": "MemoizationPolicy"
        }]
    }, "config_1.yml")

    write_yaml({
        "language": "en",
        "policies": [{
            "name": "MemoizationPolicy"
        }]
    }, "config_2.yml")

    run_in_simple_project(
        "train",
        "core",
        "-c",
        "config_1.yml",
        "config_2.yml",
        "--stories",
        "data/stories.yml",
        "--runs",
        "2",
        "--percentages",
        "25",
        "75",
        "--out",
        "comparison_models",
    )

    assert os.path.exists("comparison_models")
    assert os.path.exists("comparison_models/run_1")
    assert os.path.exists("comparison_models/run_2")

    run_in_simple_project(
        "test",
        "core",
        "-m",
        "comparison_models",
        "--stories",
        "data/stories",
        "--evaluate-model-directory",
    )

    assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE))
    assert os.path.exists(
        os.path.join(DEFAULT_RESULTS_PATH, "core_model_comparison_graph.pdf"))
Пример #6
0
def test_test(run_in_simple_project_with_model: Callable[..., RunResult]):
    write_yaml(
        {
            "pipeline": "KeywordIntentClassifier",
            "policies": [{"name": "MemoizationPolicy"}],
        },
        "config2.yml",
    )

    run_in_simple_project_with_model("test")

    assert os.path.exists("results")
    assert os.path.exists("results/intent_histogram.png")
    assert os.path.exists("results/intent_confusion_matrix.png")
Пример #7
0
def test_test_nlu_comparison(run_in_simple_project: Callable[..., RunResult]):
    write_yaml({"pipeline": "KeywordIntentClassifier"}, "config.yml")
    write_yaml({"pipeline": "KeywordIntentClassifier"}, "config2.yml")

    run_in_simple_project(
        "test",
        "nlu",
        "--config",
        "config.yml",
        "config2.yml",
        "--run",
        "2",
        "--percentages",
        "75",
        "25",
    )

    assert os.path.exists("results/run_1")
    assert os.path.exists("results/run_2")
Пример #8
0
def test_test_core_comparison_after_train(
        run_in_simple_project: Callable[..., RunResult]):
    temp_dir = os.getcwd()

    write_yaml({
        "language": "en",
        "policies": [{
            "name": "MemoizationPolicy"
        }]
    }, "config_1.yml")

    write_yaml({
        "language": "en",
        "policies": [{
            "name": "MemoizationPolicy"
        }]
    }, "config_2.yml")

    run_in_simple_project(
        "train",
        "core",
        "-c",
        "config_1.yml",
        "config_2.yml",
        "--stories",
        "data/stories.yml",
        "--runs",
        "2",
        "--percentages",
        "25",
        "75",
        "--out",
        "comparison_models",
    )

    import rasa.shared.utils.io

    assert os.path.exists(os.path.join(temp_dir, "comparison_models"))
    assert os.path.exists(os.path.join(temp_dir, "comparison_models", "run_1"))
    assert os.path.exists(os.path.join(temp_dir, "comparison_models", "run_2"))
    run_directories = rasa.shared.utils.io.list_subdirectories(
        os.path.join(temp_dir, "comparison_models"))
    assert len(run_directories) == 2
    model_files = rasa.shared.utils.io.list_files(
        os.path.join(temp_dir, "comparison_models", run_directories[0]))
    assert len(model_files) == 4
    assert model_files[0].endswith("tar.gz")

    run_in_simple_project(
        "test",
        "core",
        "-m",
        "comparison_models",
        "--stories",
        "data/stories",
        "--evaluate-model-directory",
    )

    assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE))
    assert os.path.exists(
        os.path.join(DEFAULT_RESULTS_PATH, "core_model_comparison_graph.pdf"))