Exemplo n.º 1
0
    def test_generate_evaluator_target(self, requests_mock):

        data_set = create_data_set("data-set-1")
        mock_get_data_set(requests_mock, data_set)

        target = create_evaluator_target("evaluator-target-1", [data_set.id])

        with temporary_cd():

            OptimizationInputFactory._generate_evaluator_target(
                target, 8000, None)

            assert os.path.isfile("training-set.json")
            off_data_set = PhysicalPropertyDataSet.from_json(
                "training-set.json")
            assert off_data_set.json() == data_set.to_evaluator().json()

            assert os.path.isfile("options.json")
Exemplo n.º 2
0
def test_study_with_children(requests_mock, monkeypatch):

    # Overwrite the child factories so we don't need to provide
    # sensible children and wait for them to be buit.
    def mock_generate(model, **_):
        os.makedirs(model.id, exist_ok=True)

    monkeypatch.setattr(OptimizationInputFactory, "generate", mock_generate)
    monkeypatch.setattr(BenchmarkInputFactory, "generate", mock_generate)

    mock_get_data_set(requests_mock, create_data_set("data-set-1"))

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        targets=[create_evaluator_target("evaluator-target", ["data-set-1"])],
    )
    benchmark = create_benchmark(
        "project-1",
        "study-1",
        "benchmark-1",
        ["data-set-1"],
        None,
        create_force_field(),
    )

    study = create_study("project-1", "study-1")
    study.optimizations = [optimization]
    study.benchmarks = [benchmark]

    mock_get_study(requests_mock, study)

    with temporary_cd():

        InputFactory.generate(study, "test-env", "12:34", "lilac-dask", 8000, 1, False)

        assert os.path.isdir(study.id)
        assert os.path.isdir(os.path.join(study.id, "optimizations"))
        assert os.path.isdir(os.path.join(study.id, "benchmarks"))

        assert os.path.isdir(os.path.join(study.id, "optimizations", optimization.id))
        assert os.path.isdir(os.path.join(study.id, "benchmarks", benchmark.id))
Exemplo n.º 3
0
    def test_delete_with_dependent(self, db: Session, create_results: bool,
                                   expected_error):
        """Test that a benchmark cannot be deleted until its results have
        also been deleted.
        """

        create_dependencies(db, self.dependencies())
        model = self.create_model()

        db.add(self.crud_class().create(db, model))
        db.commit()

        # Create the benchmark results
        if create_results:

            data_set = create_data_set("data-set-1")
            data_set.entries[0].id = 1

            db_result = BenchmarkResultCRUD.create(
                db,
                create_benchmark_result(model.project_id, model.study_id,
                                        model.id, data_set),
            )
            db.add(db_result)
            db.commit()

        # Delete the model.
        with expected_error:
            self.crud_class().delete(db, model.project_id, model.study_id,
                                     model.id)

        if not create_results:
            return

        BenchmarkResultCRUD.delete(db, model.project_id, model.study_id,
                                   model.id)
        db.commit()

        self.crud_class().delete(db, model.project_id, model.study_id,
                                 model.id)
        db.commit()

        self.check_has_deleted(db)
Exemplo n.º 4
0
def test_plot_scatter_results(
    benchmarks_and_results,
    file_type: Literal["png", "pdf"],
    tmpdir,
):

    benchmarks, results, _ = benchmarks_and_results

    plot_scatter_results(
        benchmarks,
        results,
        [create_data_set("data-set-1", 1)],
        tmpdir,
        file_type,
    )

    assert os.path.isfile(
        os.path.join(tmpdir, "scatter-plots", f"density-2.{file_type}")
    )
Exemplo n.º 5
0
def test_plot(force_field, monkeypatch):

    from nonbonded.library.plotting.seaborn import optimization as optimization_module

    # Mock the required file inputs
    data_set = create_data_set("data-set-1", 1)
    data_set_collection = DataSetCollection(data_sets=[data_set])

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_evaluator_target("target-1", ["data-set-1"])],
    )
    optimization_result = create_optimization_result("project-1", "study-1",
                                                     "optimization-1",
                                                     ["target-1"], [])

    # Mock the already tested plotting methods.
    monkeypatch.setattr(optimization_module, "plot_parameter_changes",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_objective_per_iteration",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_rmse_change",
                        lambda *args: None)

    if "nonbonded.library.factories.plots.optimization" in sys.modules:
        sys.modules.pop("nonbonded.library.factories.plots.optimization")

    from nonbonded.library.factories.plots.optimization import OptimizationPlotFactory

    with temporary_cd():

        # Save the inputs in their expected locations.
        data_set_collection.to_file("test-set-collection.json")
        optimization.to_file("optimization.json")
        os.makedirs("analysis")
        optimization_result.to_file(
            os.path.join("analysis", "optimization-results.json"))

        OptimizationPlotFactory.plot([""], "png")

        assert os.path.isdir("plots")
Exemplo n.º 6
0
    def test_generate_request_options_default(self, allow_reweighting):

        training_set = create_data_set("data-set-1", 1)

        target = create_evaluator_target("evaluator-target-1", ["data-set-1"])
        target.allow_direct_simulation = True
        target.allow_reweighting = allow_reweighting

        request_options = OptimizationInputFactory._generate_request_options(
            target, training_set.to_evaluator())

        if allow_reweighting:
            assert request_options.calculation_layers == [
                "ReweightingLayer",
                "SimulationLayer",
            ]
        else:
            assert request_options.calculation_layers == ["SimulationLayer"]

        assert request_options.calculation_schemas == UNDEFINED
Exemplo n.º 7
0
    def _create_model(cls, db, create_dependencies=True):

        project_id = "project-1"
        study_id = "study-1"
        benchmark_id = "benchmark-1"

        data_sets = create_data_set("data-set-1")

        for index, entry in enumerate(data_sets.entries):
            entry.id = index + 1

        if create_dependencies:

            project, study, benchmark, data_set, _, _ = commit_benchmark(
                db, False)

            project_id = project.id
            study_id = study.id
            benchmark_id = benchmark.id

            data_sets = data_set

        benchmark_result = create_benchmark_result(
            project_id,
            study_id,
            benchmark_id,
            data_sets,
        )

        return (
            benchmark_result,
            {
                "project_id": project_id,
                "study_id": study_id,
                "model_id": benchmark_id
            },
        )
Exemplo n.º 8
0
 def _create_model(cls, db, create_dependencies=True):
     data_set = create_data_set("data-set-1")
     data_set.entries[0].id = 1
     return data_set, {"data_set_id": data_set.id}
Exemplo n.º 9
0
                "mock-env",
                "01:00",
                "lilac-dask",
                8000,
                1,
                False,
                None,
                optimization_result,
            )


@pytest.mark.parametrize(
    "reference_data_sets, expected_raises",
    [
        (
            [create_data_set("data-set-1"), create_qc_data_set("data-set-1")],
            does_not_raise(),
        ),
        (
            [
                create_data_set("data-set-1"),
                create_data_set("data-set-1"),
                create_qc_data_set("data-set-1"),
            ],
            pytest.raises(AssertionError, match="multiple reference data sets of"),
        ),
        (
            [
                create_data_set("data-set-1"),
                create_qc_data_set("data-set-1"),
                create_qc_data_set("data-set-1"),
Exemplo n.º 10
0
def unsuccessful_result():

    result = RequestResult()
    result.unsuccessful_properties = create_data_set("data-set-1",
                                                     1).to_evaluator()
    return result
Exemplo n.º 11
0
def create_dependencies(db: Session, dependencies: List[str]):
    """Create any dependencies such as parent studies, projects, or data sets and
    commit them to the database.

    Parameters
    ----------
    db
        The current database session.
    dependencies
        The required dependencies.
    """

    project = None
    data_set_ids = []
    qc_data_set_ids = []

    if "data-set" in dependencies:
        data_set_ids.append("data-set-1")
    if "qc-data-set" in dependencies:
        qc_data_set_ids.append("qc-data-set-1")

    for data_set_id in data_set_ids:
        data_set = create_data_set(data_set_id)
        db_data_set = DataSetCRUD.create(db, data_set)
        db.add(db_data_set)

    for qc_data_set_id in qc_data_set_ids:
        qc_data_set = create_qc_data_set(qc_data_set_id)
        db_qc_data_set = QCDataSetCRUD.create(db, qc_data_set)
        db.add(db_qc_data_set)

    db.commit()

    if ("project" in dependencies or "study" in dependencies
            or "evaluator-target" in dependencies
            or "recharge-target" in dependencies
            or "benchmark" in dependencies):
        project = create_project("project-1")

    if ("study" in dependencies or "evaluator-target" in dependencies
            or "recharge-target" in dependencies
            or "benchmark" in dependencies):
        project.studies = [create_study(project.id, "study-1")]

    if "evaluator-target" in dependencies or "recharge-target" in dependencies:

        targets = []

        if "evaluator-target" in dependencies:
            targets.append(
                create_evaluator_target("evaluator-target-1", ["data-set-1"]))
        if "recharge-target" in dependencies:
            targets.append(
                create_recharge_target("recharge-target-1", ["qc-data-set-1"]))

        optimization = create_optimization(project.id, project.studies[0].id,
                                           "optimization-1", targets)

        project.studies[0].optimizations = [optimization]

    if "benchmark" in dependencies:
        benchmark = create_benchmark(
            project.id,
            project.studies[0].id,
            "benchmark-1",
            ["data-set-1"],
            None,
            create_force_field(),
        )

        project.studies[0].benchmarks = [benchmark]

    if project is not None:
        db_project = ProjectCRUD.create(db, project)
        db.add(db_project)
        db.commit()
Exemplo n.º 12
0
 def create_model(cls, include_children=False, index=1):
     data_set = create_data_set(f"data-set-{index}")
     data_set.entries[0].id = index
     return data_set
Exemplo n.º 13
0
def benchmarks_and_results(
    force_field: ForceField,
) -> Tuple[List[Benchmark], List[BenchmarkResult], List[DataSet]]:

    benchmarks = []
    benchmark_results = []
    data_sets = [create_data_set("data-set-1", 1)]

    for index in range(2):

        benchmark = create_benchmark(
            "project-1",
            "study-1",
            f"benchmark-{index + 1}",
            ["data-set-1"],
            None,
            force_field,
        )
        benchmark.name = f"Benchmark {index + 1}"
        benchmarks.append(benchmark)

        benchmark_result = create_benchmark_result(
            "project-1", "study-1", "benchmark-1", data_sets
        )

        for statistic_entry in benchmark_result.data_set_result.statistic_entries:
            statistic_entry.value /= index + 1
            statistic_entry.lower_95_ci /= index + 1
            statistic_entry.upper_95_ci /= index + 1

        benchmark_results.append(benchmark_result)

    # benchmarks = [
    #     Benchmark.from_rest(
    #         project_id="binary-mixture",
    #         study_id="expanded",
    #         sub_study_id="openff-1-0-0",
    #     ),
    #     Benchmark.from_rest(
    #         project_id="binary-mixture",
    #         study_id="expanded",
    #         sub_study_id="h-mix-rho-x-rho",
    #     ),
    #     Benchmark.from_rest(
    #         project_id="binary-mixture", study_id="expanded", sub_study_id="h-mix-rho-x"
    #     ),
    # ]
    # benchmark_results = [
    #     BenchmarkResult.from_rest(
    #         project_id="binary-mixture", study_id="expanded", model_id="openff-1-0-0"
    #     ),
    #     BenchmarkResult.from_rest(
    #         project_id="binary-mixture", study_id="expanded", model_id="h-mix-rho-x-rho"
    #     ),
    #     BenchmarkResult.from_rest(
    #         project_id="binary-mixture", study_id="expanded", model_id="h-mix-rho-x"
    #     ),
    # ]
    #
    # data_set_ids = {
    #     test_set_id
    #     for benchmark in benchmarks
    #     for test_set_id in benchmark.test_set_ids
    # }
    # data_sets = [
    #     DataSet.from_rest(data_set_id=data_set_id) for data_set_id in data_set_ids
    # ]

    return benchmarks, benchmark_results, data_sets