def test_baseline_deep_dive_integration(notebooks, output_notebook,
                                        kernel_name, size, expected_values):
    notebook_path = notebooks["baseline_deep_dive"]
    pm.execute_notebook(
        notebook_path,
        output_notebook,
        kernel_name=kernel_name,
        parameters=dict(TOP_K=10, MOVIELENS_DATA_SIZE=size),
    )
    results = sb.read_notebook(output_notebook).scraps.dataframe.set_index(
        "name")["data"]

    for key, value in expected_values.items():
        assert results[key] == pytest.approx(value, rel=TOL, abs=ABS_TOL)
Esempio n. 2
0
def test_01_notebook_run(detection_notebooks):
    epochs = 3
    notebook_path = detection_notebooks["01"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(PM_VERSION=pm.__version__, EPOCHS=epochs),
        kernel_name=KERNEL_NAME,
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert len(nb_output.scraps["training_losses"].data) == epochs
    assert nb_output.scraps["training_losses"].data[-1] < 0.5
    assert nb_output.scraps["training_average_precision"].data[-1] > 0.5
def test_01_notebook_run(classification_notebooks):
    if linux_with_gpu():
        notebook_path = classification_notebooks["01_training_introduction"]
        pm.execute_notebook(
            notebook_path,
            OUTPUT_NOTEBOOK,
            parameters=dict(PM_VERSION=pm.__version__),
            kernel_name=KERNEL_NAME,
        )

        nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
        assert len(nb_output.scraps["training_accuracies"].data) == 10
        assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
        assert nb_output.scraps["validation_accuracy"].data > 0.70
Esempio n. 4
0
def test_10_notebook_run(classification_notebooks, tiny_ic_data_path):
    notebook_path = classification_notebooks["10_image_annotation"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(
            PM_VERSION=pm.__version__,
            IM_DIR=os.path.join(tiny_ic_data_path, "can"),
        ),
        kernel_name=KERNEL_NAME,
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert nb_output.scraps["num_images"].data == 6
Esempio n. 5
0
async def generate_report(athlete, activity_id):

    input_path = Path(NOTEBOOK_TEMPLATES_PATH,
                      f'{NOTEBOOK_TEMPLATE_NAME}.ipynb')
    output_dir = Path(REPORT_OUTPUT_DIR, str(athlete.id))
    output_dir.mkdir(parents=True, exist_ok=True)
    notebook_filename = f'{activity_id}.ipynb'
    html_filename = f'{activity_id}.html'
    notebook_path = Path(output_dir, notebook_filename)
    html_path = Path(output_dir, html_filename)

    try:
        papermill.execute_notebook(input_path=input_path.as_posix(),
                                   output_path=notebook_path.as_posix(),
                                   parameters=dict(
                                       access_token=athlete.access_token,
                                       activity_id=activity_id))
    except papermill.exceptions.PapermillExecutionError:
        pass

    nb = sb.read_notebook(notebook_path.as_posix())
    start_date_local = nb.scraps['activity_detail'].data['start_date_local']
    dt = datetime.fromisoformat(start_date_local)
    title = nb.scraps['activity_detail'].data['name']

    await Report.objects.create(activity_id=activity_id,
                                strava_athlete=athlete,
                                title=title,
                                datetime=dt,
                                notebook_filename=notebook_filename,
                                html_filename=html_filename)

    with notebook_path.open('r') as f:
        notebook = nbformat.reads(f.read(), as_version=4)

    c = Config()
    c.TagRemovePreprocessor.enabled = True
    c.TagRemovePreprocessor.remove_cell_tags = ("remove_cell", "parameters",
                                                "injected-parameters")
    c.TagRemovePreprocessor.remove_all_outputs_tags = ('remove_output', )
    c.TagRemovePreprocessor.remove_input_tags = ('remove_input', )
    c.preprocessors = ["TagRemovePreprocessor"]

    html_exporter = HTMLExporter(config=c)
    html_exporter.template_file = 'full'
    body, _ = html_exporter.from_notebook_node(notebook)

    with html_path.open('w') as f:
        f.write(body)
def test_ncf_smoke(notebooks, output_notebook, kernel_name):
    notebook_path = notebooks["ncf"]
    pm.execute_notebook(
        notebook_path,
        output_notebook,
        kernel_name=kernel_name,
        parameters=dict(TOP_K=10, MOVIELENS_DATA_SIZE="100k", EPOCHS=1, BATCH_SIZE=256),
    )
    results = sb.read_notebook(output_notebook).scraps.dataframe.set_index("name")[
        "data"
    ]

    assert results["map"] == pytest.approx(0.0409234, rel=TOL, abs=ABS_TOL)
    assert results["ndcg"] == pytest.approx(0.1773, rel=TOL, abs=ABS_TOL)
    assert results["precision"] == pytest.approx(0.160127, rel=TOL, abs=ABS_TOL)
    assert results["recall"] == pytest.approx(0.0879193, rel=TOL, abs=ABS_TOL)
def test_cornac_bivae_smoke(notebooks, output_notebook, kernel_name):
    notebook_path = notebooks["cornac_bivae_deep_dive"]
    pm.execute_notebook(
        notebook_path,
        output_notebook,
        kernel_name=kernel_name,
        parameters=dict(MOVIELENS_DATA_SIZE="100k"),
    )
    results = sb.read_notebook(output_notebook).scraps.dataframe.set_index("name")[
        "data"
    ]

    assert results["map"] == pytest.approx(0.146552, rel=TOL, abs=ABS_TOL)
    assert results["ndcg"] == pytest.approx(0.474124, rel=TOL, abs=ABS_TOL)
    assert results["precision"] == pytest.approx(0.412527, rel=TOL, abs=ABS_TOL)
    assert results["recall"] == pytest.approx(0.225064, rel=TOL, abs=ABS_TOL)
def test_lstur_smoke(notebooks, output_notebook, kernel_name):
    notebook_path = notebooks["lstur_quickstart"]
    pm.execute_notebook(
        notebook_path,
        output_notebook,
        kernel_name=kernel_name,
        parameters=dict(epochs=1, seed=40, MIND_type="demo"),
    )
    results = sb.read_notebook(output_notebook).scraps.dataframe.set_index("name")[
        "data"
    ]

    assert results["res_syn"]["group_auc"] == pytest.approx(
        0.5977, rel=TOL, abs=ABS_TOL
    )
    assert results["res_syn"]["mean_mrr"] == pytest.approx(0.2618, rel=TOL, abs=ABS_TOL)