Exemplo n.º 1
0
def test_text_classifier_tutorial(tmp_path):
    notebook_path = Path(TUTORIALS_PATH) / "Training_a_text_classifier.ipynb"

    # adapt notebook to CI (make its execution quicker + comment lines)
    notebook = load_notebook(str(notebook_path))
    for cell in notebook["cells"]:
        if cell["source"].startswith("!pip install"):
            cell["source"] = re.sub(r"!pip install", r"#!pip install",
                                    cell["source"])
        if cell["source"].startswith("trainer_config ="):
            cell["source"] = re.sub(r"num_epochs=[0-9][0-9]?", r"num_epochs=1",
                                    cell["source"])
        if cell["source"].startswith("pl.train("):
            cell["source"] = re.sub(r"training=train_ds", r"training=valid_ds",
                                    cell["source"])
        if cell["source"].startswith("pl_trained.explore"):
            cell["source"] = re.sub(r"pl_trained.explore",
                                    r"#pl_trained.explore", cell["source"])

    # dump adapted notebook
    mod_notebook_path = tmp_path / notebook_path.name
    with mod_notebook_path.open("w") as file:
        file.write(str(dump_notebook(notebook)))

    # test adapted notebook
    fixture = NBRegressionFixture(exec_timeout=100)
    fixture.check(str(mod_notebook_path))
Exemplo n.º 2
0
def test_jupyter_example():
    """ Test for climate_observations jupyter notebook """
    fixture = NBRegressionFixture(
        diff_ignore=(
            "/metadata/language_info",  # Python version depends on testing
            "/cells/*/outputs/",
        ),
        force_regen=True,
    )

    fixture.check(EXAMPLE_DIR / "climate_observations.ipynb")
def test_regression_coverage():
    """Test a regression that will fail."""
    fixture = NBRegressionFixture()
    fixture.diff_ignore = ("/metadata/language_info/version", )
    fixture.coverage = True
    result = fixture.check(
        os.path.join(PATH, "raw_files", "coverage_test", "call_package.ipynb"))

    assert COVERAGE_KEY in result.process_resources
    assert "!coverage.py:" in result.process_resources[COVERAGE_KEY]
    assert "package.py" in result.process_resources[COVERAGE_KEY]
    assert "[1,2,3]" in result.process_resources[COVERAGE_KEY]
Exemplo n.º 4
0
def test_regression_diff_ignore_pass():
    """Test a regression that will succeed by ignoring certain notebook paths."""
    fixture = NBRegressionFixture()
    fixture.diff_ignore = (
        "/metadata/language_info/version",
        "/cells/*/execution_count",
        "/cells/*/outputs/*/traceback",
        "/cells/*/outputs/*/execution_count",
        "/cells/12/outputs/0/data/text/latex",
        "/cells/9/outputs/0/metadata/application/json",
    )
    fixture.check(os.path.join(PATH, "raw_files", "different_outputs.ipynb"))
Exemplo n.º 5
0
def test_slot_filling_tutorial(tmp_path):
    notebook_path = (Path(TUTORIALS_PATH) /
                     "Training_a_sequence_tagger_for_Slot_Filling.ipynb")

    # adapt notebook to CI (make its execution quicker + comment lines)
    notebook = load_notebook(str(notebook_path))
    for cell in notebook["cells"]:
        if cell["source"].startswith("!pip install"):
            cell["source"] = re.sub(r"!pip install", r"#!pip install",
                                    cell["source"])
        if cell["source"].startswith(
                "from biome.text.configuration import FeaturesConfiguration"):
            cell["source"] = re.sub(
                r"https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip",
                r"https://biome-tutorials-data.s3-eu-west-1.amazonaws.com/token_classifier/wiki-news-300d-1M.head.vec",
                cell["source"],
            )
        if cell["source"].startswith("trainer_config ="):
            cell["source"] = re.sub(
                r"TrainerConfiguration\(\)",
                r"TrainerConfiguration(num_epochs=1)",
                cell["source"],
            )
        if cell["source"].startswith("pl.train("):
            cell["source"] = re.sub(
                r"pl.train",
                r"from biome.text.configuration import TrainerConfiguration\npl.train",
                cell["source"],
            )
            cell["source"] = re.sub(
                r"training=train_ds",
                r"training=valid_ds",
                cell["source"],
            )
            cell["source"] = re.sub(
                r"test=test_ds,",
                r"test=test_ds, trainer=TrainerConfiguration(num_epochs=1)",
                cell["source"],
            )

    # dump adapted notebook
    mod_notebook_path = tmp_path / notebook_path.name
    with mod_notebook_path.open("w") as file:
        file.write(str(dump_notebook(notebook)))

    # test adapted notebook
    fixture = NBRegressionFixture(exec_timeout=200)
    fixture.check(str(mod_notebook_path))
def test_nb_regression_cmndline_setting_init(testdir):
    """Test the nb_regression fixture is initialised with the commandline settings."""

    testdir.makepyfile("""
        import attr

        def test_nb(nb_regression):
            assert attr.asdict(nb_regression) == {config}
    """.format(config=attr.asdict(
        NBRegressionFixture(
            **{
                "exec_allow_errors": True,
                "exec_timeout": 90,
                "force_regen": True,
                # the following are the defaults for pytest-cov
                "cov_source": (),
                "cov_config": ".coveragerc",
            }))))

    result = testdir.runpytest("-vv", "--nb-exec-timeout", "90",
                               "--nb-exec-errors", "--nb-force-regen")

    # fnmatch_lines does an assertion internally
    result.stdout.fnmatch_lines(["*::test_nb PASSED*"])

    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
Exemplo n.º 7
0
def nb_tester():
    """Test notebooks using pytest-notebook"""
    nb_regression = NBRegressionFixture(
        diff_ignore=(
            "/metadata/language_info",
            "/cells/*/execution_count",
            "/cells/*/outputs/*",
        ),
        exec_timeout=1800,
    )
    return nb_regression
Exemplo n.º 8
0
def test_regression_regex_replace_pass():
    """Test a regression that will succeed by regex replacing certain paths."""
    fixture = NBRegressionFixture()
    fixture.diff_ignore = (
        "/metadata/language_info/version",
        "/cells/*/execution_count",
        "/cells/*/outputs/*/execution_count",
        "/cells/12/outputs/0/data/text/latex",
        "/cells/9/outputs/0/metadata/application/json",
    )
    fixture.diff_replace = (
        ("/cells/*/outputs/*/traceback", r"\<module\>.*\n", "<module>\n"),
        ("/cells/*/outputs/*/traceback", r"[\-]+", "-"),
        (
            "/cells/*/outputs/*/traceback",
            r"\s*Traceback \(most recent call last\)",
            " Traceback (most recent call last)",
        ),
        (
            "/cells/*/outputs/*/traceback",
            r"\<ipython\-input\-[\-0-9a-zA-Z]*\>",
            "<ipython-input-XXX>",
        ),
    )
    fixture.check(os.path.join(PATH, "raw_files", "different_outputs.ipynb"))
Exemplo n.º 9
0
def test_notebook1(nb_regression: nb.NBRegressionFixture, notebook):

    ## Generate from the un-run notebook
    nb_regression.force_regen = True
    nb_regression.check(notebook, False)

    ## Run notebook against generated
    ## ignore output for now
    nb_regression.diff_ignore = ("/cells/*/outputs/*/text", )
    nb_regression.force_regen = False
    nb_regression.check(notebook)
Exemplo n.º 10
0
def test_jupyter_notebook():
    """Test that the `RmqThreadCommunicator` can be used in a Jupyter notebook."""
    from pytest_notebook.nb_regression import NBRegressionFixture

    fixture = NBRegressionFixture(exec_timeout=50)
    fixture.diff_color_words = False
    fixture.diff_ignore = ('/metadata/language_info/version', )

    my_dir = pathlib.Path(__file__).parent
    with open(my_dir / pathlib.Path('notebooks/communicator.ipynb')) as handle:
        fixture.check(handle)
Exemplo n.º 11
0
def test_notebook1(nb_regression: nb.NBRegressionFixture, notebook):

    ## Generate from the un-run notebook
    nb_regression.force_regen = True
    try:
        nb_regression.check(notebook, False)
    except TimeoutError as te:
        assert (
            False
        ), f"pynotebook `{NOTEBOOK_NAME}` timed out after {nb_regression.exec_timeout}s during test: {te}.\nFor more details see: https://jupyterbook.org/content/execute.html#setting-execution-timeout"
    ## Run notebook against generated
    ## ignore output for now
    nb_regression.diff_ignore = ("/cells/*/outputs/*/text",)
    nb_regression.force_regen = False
    nb_regression.check(notebook)
Exemplo n.º 12
0
from pytest_notebook.nb_regression import NBRegressionFixture

EXEC_CWD = str(pathlib.Path(__file__).resolve().parent.parent)

fixture = NBRegressionFixture(
    exec_timeout=120,
    exec_cwd=EXEC_CWD,
    diff_color_words=True,
    diff_ignore=(
        "/cells/*/outputs/*/data/text/plain",
        "/cells/*/outputs/*/data/image/svg+xml",
        "/cells/*/outputs/*/text",
        "/cells/*/outputs/",
        "/cells/*/outputs/*/data/image/png",
        "/cells/*/outputs/*/data/text/html",
        "/cells/*/outputs/*/output/data/"
        "/cells/*/outputs/*/data/application/vnd.plotly.v1+json",
        "/cells/*/outputs/*/execution_count",
        "/cells/*/execution_count",
        "/cells/1/outputs/",
        "/cells/*/execution_count/",
        "/cells/*/metadata/",
        "/cells/*/outputs/metadata/",
        "/metadata/",
    ),
)


def test_cluster_analysis_notebook():
    notebook = os.path.join(EXEC_CWD, "examples", "Cluster_Analysis.ipynb")
    fixture.check(notebook, raise_errors=True)
Exemplo n.º 13
0
def test_init_fixture():
    """Test initialisation of NBRegressionFixture."""
    fixture = NBRegressionFixture(exec_timeout=10)
    assert fixture.exec_timeout == 10
Exemplo n.º 14
0
 def runtest(self):
     """Run the test."""
     kwargs, other_args = gather_config_options(self.config)
     fixture = NBRegressionFixture(**kwargs)
     fixture.check(self.fspath)
Exemplo n.º 15
0
def nb_regression(pytestconfig):
    """Fixture to execute a Jupyter Notebook, and test its output is as expected."""

    kwargs, other_args = gather_config_options(pytestconfig)
    return NBRegressionFixture(**kwargs)
from pathlib import Path

import pytest
from pytest_notebook.nb_regression import NBRegressionFixture

EXAMPLE_DIR = Path(__file__).parent.parent.parent / "example"

FIXTURE = NBRegressionFixture(
    diff_ignore=(
        "/metadata/language_info",  # Python version depends on testing
        "/cells/*/outputs/",
    ),
    force_regen=True,
)


@pytest.mark.slow
def test_jupyter_example():
    """ Test for climate_observations jupyter notebook """
    FIXTURE.check(EXAMPLE_DIR / "climate_observations.ipynb")
Exemplo n.º 17
0
def test_regression_fail():
    """Test a regression that will fail."""
    fixture = NBRegressionFixture()
    with pytest.raises(NBRegressionError):
        fixture.check(os.path.join(PATH, "raw_files", "different_outputs.ipynb"))
Exemplo n.º 18
0
def test_notebook(pytestconfig, db_test_app, filename):
    """Execute Jupyter Notebook, using a clean AiiDA database/profile, and test its output is as expected.

    Can be executed by: ``pytest --cry17-nb-tests --log-cli-level=info``
    """
    from pytest_notebook.nb_regression import NBRegressionFixture
    from pytest_notebook.plugin import gather_config_options

    kwargs, other_args = gather_config_options(pytestconfig)

    nb_regression = NBRegressionFixture(**kwargs)
    nb_regression.diff_replace = (
        (
            "/cells/*/outputs/*/text",
            "\\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\\b",
            "<UUID>",
        ),
        (
            "/cells/*/outputs/*/data/text",
            "\\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\\b",
            "<UUID>",
        ),
        ("/cells/*/outputs/*/text", "[-/|\\\\]\b", ""),
        (
            "/cells/*/outputs/*/text",
            "postgres:\\s+Connected as.*\n",
            "postgres:    Connected as <ADDRESS>",
        ),
        ("/cells/*/outputs/*/text", "repository:\\s+/.+\n",
         "repository:  <DIRPATH>"),
        (
            "/cells/*/outputs/*/text",
            "Daemon is running as PID.+\n",
            "Daemon is running as <PID>\n",
        ),
        ("/cells/*/outputs/*/text", "\\d+s\\sago", "XXs ago"),
        (
            "/cells/*/outputs/*/text",
            "(ctime|mtime)\\s+\\d{2,4}-\\d{1,2}-\\d{1,2}.+\n",
            "(c/m)time <DATETIME>\n",
        ),
        (
            "/cells/*/outputs/*/text",
            "time an entry changed state\\:.+\n",
            "time an entry changed state: <TIME>\n",
        ),
        (
            "/cells/*/outputs/*/text",
            "\\d{2,4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2}",
            "<DATETIME>",
        ),
        (
            "/cells/*/outputs/*/data/text",
            "\\'remote_workdir\\'\\:\\s*\\'[\\_\\/a-zA-Z0-9]+\\'",
            "'remote_workdir': '<DIRPATH>'",
        ),
        (
            "/cells/*/outputs/*/data/text",
            "\\'\\_aiida_hash\\'\\:\\s*\\'[a-z0-9]+\\'",
            "'_aiida_hash': '<HASH>'",
        ),
        (
            "/cells/*/outputs/*/data/text",
            "\\<graphviz.dot.Digraph at .+\\>",
            "<graphviz.dot.Digraph>",
        ),
        (
            "/cells/*/outputs/*/data/image/svg+xml",
            "\\<\\!\\-\\-\\sGenerated\\sby\\sgraphviz\\sversion.*\\-\\-\\>",
            "<!-- Generated by graphviz version XXX -->",
        ),
    )

    from aiida.cmdline.utils.common import get_env_with_venv_bin

    # This environmental variable propagates in the jupyter kernel,
    # so that the test aiida database/profile is used.
    os.environ["AIIDA_PATH"] = db_test_app.environment.config_dir

    # We don't actually need to start a daemon, because ``aiida.engine.run`` creates its own.
    # However, for `verdi status` and `verdi process list`, we then get warning messages.
    curr_env = get_env_with_venv_bin()
    output = subprocess.check_output(["verdi", "daemon", "start"],
                                     env=curr_env,
                                     stderr=subprocess.STDOUT)
    logger.info(output)

    try:
        source_dir = os.path.abspath(os.path.dirname(__file__))
        with io.open(os.path.join(source_dir, filename), "r+") as handle:
            nb_regression.check(handle)
    finally:
        output = subprocess.check_output(["verdi", "daemon", "stop"],
                                         env=curr_env,
                                         stderr=subprocess.STDOUT)
        logger.info(output)
def test_nb_regression_ini_setting_init(testdir):
    """Test the nb_regression fixture is initialised with the config file settings."""
    testdir.makeini(r"""
        [pytest]
        nb_exec_cwd = {path}
        nb_exec_allow_errors = True
        nb_exec_timeout = 100
        nb_diff_use_color = True
        nb_diff_color_words = True
        nb_diff_ignore =
            /metadata/language_info/version
            /cells/*/execution_count
            /cells/*/outputs/*/traceback
            /cells/*/outputs/*/execution_count
            /cells/12/outputs/0/data/text/latex
            /cells/9/outputs/0/metadata/application/json
        nb_post_processors =
        nb_diff_replace =
            /cells/*/outputs/*/traceback \<ipython\-input\-[\-0-9a-zA-Z]*\> "< >"
        """.format(path=os.path.join(PATH, "raw_files")))

    testdir.makepyfile("""
        import attr

        def test_nb(nb_regression):
            assert attr.asdict(nb_regression) == {config}
    """.format(config=attr.asdict(
        NBRegressionFixture(
            **{
                "exec_cwd":
                os.path.join(PATH, "raw_files"),
                "exec_allow_errors":
                True,
                "exec_timeout":
                100,
                "post_processors": (),
                "diff_ignore": (
                    "/metadata/language_info/version",
                    "/cells/*/execution_count",
                    "/cells/*/outputs/*/traceback",
                    "/cells/*/outputs/*/execution_count",
                    "/cells/12/outputs/0/data/text/latex",
                    "/cells/9/outputs/0/metadata/application/json",
                ),
                "diff_replace": ((
                    "/cells/*/outputs/*/traceback",
                    "\\<ipython\\-input\\-[\\-0-9a-zA-Z]*\\>",
                    "< >",
                ), ),
                "diff_use_color":
                True,
                "diff_color_words":
                True,
                # the following are the defaults for pytest-cov
                "cov_source": (),
                "cov_config":
                ".coveragerc",
            }))))

    result = testdir.runpytest("-vv")

    # fnmatch_lines does an assertion internally
    result.stdout.fnmatch_lines(["*::test_nb PASSED*"])

    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
Exemplo n.º 20
0
import importlib_resources
from pytest_notebook.nb_regression import NBRegressionFixture

import notebooks

fixture = NBRegressionFixture(exec_timeout=300)
fixture.diff_color_words = False
fixture.diff_replace = (("/cells/*/outputs", "\\r", ""),)
# ignoring some output cells,
# because pd.DataFrame.value_counts' return value is inconsistent
fixture.diff_ignore = (
    "/cells/*/execution_count",
    "/metadata/language_info/version",
    "/cells/68/outputs/0/text",
    "/cells/69/outputs/0/text",
    "/cells/78/outputs/0/text",
    "/cells/134/outputs/0/text",
    "/cells/135/outputs/0/text",
    "/cells/136/outputs/0/text",
)


def cli_notebook_output():
    with importlib_resources.path(notebooks, "CLI.ipynb") as path:
        fixture.check(str(path))