Exemple #1
0
def optimizations_and_results(
    smirnoff_force_field,
) -> Tuple[List[Optimization], List[OptimizationResult]]:

    optimization_1 = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization_1.name = "Optimization 1"
    optimization_1.force_field = ForceField.from_openff(smirnoff_force_field)
    optimization_2 = create_optimization(
        "project-1",
        "study-1",
        "optimization-2",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization_2.force_field = ForceField.from_openff(smirnoff_force_field)
    optimization_2.name = "Optimization 2"

    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].epsilon *= 2
    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].sigma *= 3

    optimization_result_1 = create_optimization_result(
        "project-1",
        "study-1",
        "optimization-1",
        ["evaluator-target-1"],
        ["recharge-target-1"],
    )
    optimization_result_1.refit_force_field = ForceField.from_openff(
        smirnoff_force_field
    )

    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].epsilon /= 4
    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].sigma /= 6

    optimization_result_2 = create_optimization_result(
        "project-1",
        "study-1",
        "optimization-2",
        ["evaluator-target-1"],
        ["recharge-target-1"],
    )
    optimization_result_2.refit_force_field = ForceField.from_openff(
        smirnoff_force_field
    )

    return (
        [optimization_1, optimization_2],
        [optimization_result_1, optimization_result_2],
    )
    def _create_model(cls, db, create_dependencies=True):

        project_id = "project-1"
        study_id = "study-1"

        data_set_ids = ["data-set-1"]

        if create_dependencies:

            project, study = commit_study(db)

            project_id = project.id
            study_id = study.id

            data_set = commit_data_set(db)
            data_set_ids = [data_set.id]

        optimization = create_optimization(
            project_id,
            study_id,
            "optimization-1",
            [create_evaluator_target("name", data_set_ids)],
        )

        return (
            optimization,
            {
                "project_id": project_id,
                "study_id": study_id,
                "sub_study_id": optimization.id,
            },
        )
Exemple #3
0
def mock_target(tmpdir) -> Tuple[Optimization, EvaluatorTarget, str]:
    """Create a mock evaluator target directory which is populated with a dummy
    set of results.

    Returns
    -------
        A tuple of the parent optimization, the mock target and the path to the
        directory in which the files were created.
    """

    with temporary_cd(str(tmpdir)):

        # Mock the target to analyze.
        target = create_evaluator_target("evaluator-target-1", ["data-set-1"])

        optimization = create_optimization("project-1", "study-1",
                                           "optimization-1", [target])
        optimization.analysis_environments = []

        # Create a dummy data set and estimated result.
        reference_data_set = create_data_set("data-set-1", 1)
        DataSetCollection(data_sets=[reference_data_set]).to_evaluator().json(
            "training-set.json")

        results = RequestResult()
        results.estimated_properties = reference_data_set.to_evaluator()
        results.json("results.json")

        lp_dump({"X": 1.0}, "objective.p")

    return optimization, target, str(tmpdir)
Exemple #4
0
    def create_model(cls, include_children=False, index=1):

        study = create_study("project-1", f"study-{index}")

        if include_children:

            study.optimizations = [
                create_optimization(
                    "project-1",
                    study.id,
                    "optimization-1",
                    [
                        create_evaluator_target("evaluator-target-1",
                                                ["data-set-1"])
                    ],
                )
            ]
            study.benchmarks = [
                create_benchmark(
                    "project-1",
                    study.id,
                    "benchmark-1",
                    ["data-set-1"],
                    None,
                    create_force_field(),
                )
            ]

        return study
def mock_target(tmpdir) -> Tuple[Optimization, RechargeTarget, str]:
    """Create a mock recharge target directory which is populated with a dummy
    set of results.

    Returns
    -------
        A tuple of the parent optimization, the mock target and the path to the
        directory in which the files were created.
    """

    with temporary_cd(str(tmpdir)):

        # Mock the target to analyze.
        target = create_recharge_target("recharge-target-1", ["qc-data-set-1"])

        optimization = create_optimization("project-1", "study-1",
                                           "optimization-1", [target])
        optimization.analysis_environments = [
            ChemicalEnvironment.Alkane,
            ChemicalEnvironment.Alcohol,
        ]

        # Create a dummy set of residuals.
        with open("residuals.json", "w") as file:
            json.dump({"C": 9.0, "CO": 4.0}, file)

        lp_dump({"X": 1.0}, "objective.p")

    return optimization, target, str(tmpdir)
Exemple #6
0
def test_analysis_n_iteration(monkeypatch, force_field):
    """Test that the correction exception is raised in the case where a refit
    force field is found but no target outputs are."""

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization.force_field = force_field

    with temporary_cd():

        # Save the mock optimization file.
        with open("optimization.json", "w") as file:
            file.write(optimization.json())

        # Mock successfully reading a refit force field.
        monkeypatch.setattr(OptimizationAnalysisFactory,
                            "_load_refit_force_field", lambda: force_field)

        with pytest.raises(RuntimeError) as error_info:
            OptimizationAnalysisFactory.analyze(True)

        assert "No iteration results could be found" in str(error_info.value)
Exemple #7
0
def test_analysis_missing_result(monkeypatch, force_field):
    """Test that the correction exception is raised in the case where a the
    expected results of a target are missing."""

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization.force_field = force_field

    with temporary_cd():

        # Save the expected results files.
        os.makedirs(os.path.join("result", "optimize"))

        for target in optimization.targets:
            os.makedirs(os.path.join("targets", target.id))
            os.makedirs(os.path.join("optimize.tmp", target.id, "iter_0000"))

            lp_dump(
                {"X": 1.0},
                os.path.join("optimize.tmp", target.id, "iter_0000",
                             "objective.p"),
            )

        with open("optimization.json", "w") as file:
            file.write(optimization.json())

        monkeypatch.setattr(OptimizationAnalysisFactory,
                            "_load_refit_force_field", lambda: force_field)

        # Mock a missing target result.
        monkeypatch.setattr(
            EvaluatorAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: EvaluatorTargetResult(
                objective_function=1.0, statistic_entries=[]),
        )
        monkeypatch.setattr(
            RechargeAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: None,
        )

        with pytest.raises(RuntimeError) as error_info:
            OptimizationAnalysisFactory.analyze(True)

        assert "The results of the recharge-target-1 target could not be found" in str(
            error_info.value)
Exemple #8
0
    def create_model(cls, include_children=False, index=1):

        optimization = create_optimization(
            "project-1",
            "study-1",
            f"optimization-{index}",
            targets=[
                create_evaluator_target("evaluator-target-1", ["data-set-1"]),
                create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
            ],
        )

        return optimization
def optimization(force_field) -> Optimization:
    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization.force_field = force_field

    return optimization
def test_prepare_restart_finished(caplog):

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
            create_recharge_target("recharge-target-2", ["qc-data-set-1"]),
        ],
    )

    with temporary_cd():

        directories = [
            os.path.join("optimize.tmp", "recharge-target-1", "iter_0000"),
            os.path.join("optimize.tmp", "recharge-target-1", "iter_0001"),
            os.path.join("optimize.tmp", "recharge-target-2", "iter_0000"),
            os.path.join("optimize.tmp", "recharge-target-2", "iter_0001"),
        ]

        for directory in directories:

            os.makedirs(directory)

            for file_name in [
                    "mvals.txt", "force-field.offxml", "objective.p"
            ]:

                with open(os.path.join(directory, file_name), "w") as file:
                    file.write("")

        assert len(glob(os.path.join("optimize.tmp", "recharge-target-1",
                                     "*"))) == 2
        assert len(glob(os.path.join("optimize.tmp", "recharge-target-2",
                                     "*"))) == 2

        with caplog.at_level(logging.INFO):
            _prepare_restart(optimization)

        assert len(glob(os.path.join("optimize.tmp", "recharge-target-1",
                                     "*"))) == 2
        assert len(glob(os.path.join("optimize.tmp", "recharge-target-2",
                                     "*"))) == 2

        assert (
            "2 iterations had previously been completed. The optimization will be "
            "restarted from iteration 0002") in caplog.text
def test_run_command(restart: bool, create_save: bool, runner, monkeypatch):

    from nonbonded.cli.projects.optimization import run

    monkeypatch.setattr(run, "_remove_previous_files", lambda: print("REMOVE"))
    monkeypatch.setattr(run, "_prepare_restart",
                        lambda *args: print("PREPARE"))
    monkeypatch.setattr(subprocess, "check_call", lambda *args, **kwargs: None)

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_recharge_target("recharge-target-1", ["qc-data-set-1"])],
    )

    # Save a copy of the result model.
    with temporary_cd():

        with open("optimization.json", "w") as file:
            file.write(optimization.json())

        if create_save:

            with open("optimize.sav", "w") as file:
                file.write("")

        arguments = [] if not restart else ["--restart", True]

        result = runner.invoke(run_command(), arguments)

        if restart and create_save:
            assert "REMOVE" not in result.output
            assert "PREPARE" in result.output

        elif restart and not create_save:
            assert "REMOVE" in result.output
            assert "PREPARE" not in result.output

        if not restart:
            assert "REMOVE" in result.output
            assert "PREPARE" not in result.output

    if result.exit_code != 0:
        raise result.exception
def test_plot(force_field, monkeypatch):

    from nonbonded.library.plotting.seaborn import optimization as optimization_module

    # Mock the required file inputs
    data_set = create_data_set("data-set-1", 1)
    data_set_collection = DataSetCollection(data_sets=[data_set])

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_evaluator_target("target-1", ["data-set-1"])],
    )
    optimization_result = create_optimization_result("project-1", "study-1",
                                                     "optimization-1",
                                                     ["target-1"], [])

    # Mock the already tested plotting methods.
    monkeypatch.setattr(optimization_module, "plot_parameter_changes",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_objective_per_iteration",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_rmse_change",
                        lambda *args: None)

    if "nonbonded.library.factories.plots.optimization" in sys.modules:
        sys.modules.pop("nonbonded.library.factories.plots.optimization")

    from nonbonded.library.factories.plots.optimization import OptimizationPlotFactory

    with temporary_cd():

        # Save the inputs in their expected locations.
        data_set_collection.to_file("test-set-collection.json")
        optimization.to_file("optimization.json")
        os.makedirs("analysis")
        optimization_result.to_file(
            os.path.join("analysis", "optimization-results.json"))

        OptimizationPlotFactory.plot([""], "png")

        assert os.path.isdir("plots")
Exemple #13
0
def test_study_with_children(requests_mock, monkeypatch):

    # Overwrite the child factories so we don't need to provide
    # sensible children and wait for them to be buit.
    def mock_generate(model, **_):
        os.makedirs(model.id, exist_ok=True)

    monkeypatch.setattr(OptimizationInputFactory, "generate", mock_generate)
    monkeypatch.setattr(BenchmarkInputFactory, "generate", mock_generate)

    mock_get_data_set(requests_mock, create_data_set("data-set-1"))

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        targets=[create_evaluator_target("evaluator-target", ["data-set-1"])],
    )
    benchmark = create_benchmark(
        "project-1",
        "study-1",
        "benchmark-1",
        ["data-set-1"],
        None,
        create_force_field(),
    )

    study = create_study("project-1", "study-1")
    study.optimizations = [optimization]
    study.benchmarks = [benchmark]

    mock_get_study(requests_mock, study)

    with temporary_cd():

        InputFactory.generate(study, "test-env", "12:34", "lilac-dask", 8000, 1, False)

        assert os.path.isdir(study.id)
        assert os.path.isdir(os.path.join(study.id, "optimizations"))
        assert os.path.isdir(os.path.join(study.id, "benchmarks"))

        assert os.path.isdir(os.path.join(study.id, "optimizations", optimization.id))
        assert os.path.isdir(os.path.join(study.id, "benchmarks", benchmark.id))
Exemple #14
0
def commit_optimization(
    db: Session,
) -> Tuple[Project, Study, Optimization, DataSetCollection,
           QCDataSetCollection]:
    """Commits a new project and study to the current session and appends an
    empty optimization onto it. Additionally, this function commits two data sets
    to the session to use as the training set.

    Parameters
    ----------
    db
        The current data base session.
    """

    training_set = commit_data_set_collection(db)
    training_set_ids = [x.id for x in training_set.data_sets]

    qc_data_set = commit_qc_data_set_collection(db)
    qc_data_set_ids = [x.id for x in qc_data_set.data_sets]

    study = create_study("project-1", "study-1")
    study.optimizations = [
        create_optimization(
            "project-1",
            "study-1",
            "optimization-1",
            [
                create_evaluator_target("evaluator-target-1",
                                        training_set_ids),
                create_recharge_target("recharge-target-1", qc_data_set_ids),
            ],
        )
    ]

    project = create_project(study.project_id)
    project.studies = [study]

    db_project = ProjectCRUD.create(db, project)
    db.add(db_project)
    db.commit()

    project = ProjectCRUD.db_to_model(db_project)
    return project, study, study.optimizations[0], training_set, qc_data_set
Exemple #15
0
def test_generate_validate_data_sets(reference_data_sets, expected_raises):

    model = create_optimization(
        "mock-project-1",
        "mock-study-1",
        "mock-optimization-1",
        targets=[create_evaluator_target("phys-prop-1", ["data-set-1"])],
    )

    with temporary_cd():

        with expected_raises:

            InputFactory._generate(
                model,
                "mock-env",
                "01:00",
                "lilac-dask",
                8000,
                1,
                False,
                reference_data_sets,
                None,
            )
def test_prepare_restart_unfinished(partial_restart, caplog):

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
            create_recharge_target("recharge-target-2", ["qc-data-set-1"]),
        ],
    )

    with temporary_cd():

        directories = [
            os.path.join("optimize.tmp", "recharge-target-1", "iter_0000"),
            os.path.join("optimize.tmp", "recharge-target-2", "iter_0000"),
            os.path.join("optimize.tmp", "recharge-target-1", "iter_0001"),
            os.path.join("optimize.tmp", "recharge-target-2", "iter_0001"),
        ]

        for index, directory in enumerate(directories):

            os.makedirs(directory)

            expected_files = ["mvals.txt"]

            if index < 3:
                expected_files.append("objective.p")
            if index < (3 if not partial_restart else 4):
                expected_files.append("force-field.offxml")

            for file_name in expected_files:

                with open(os.path.join(directory, file_name), "w") as file:
                    file.write("")

        assert len(glob(os.path.join("optimize.tmp", "recharge-target-1",
                                     "*"))) == 2
        assert len(glob(os.path.join("optimize.tmp", "recharge-target-2",
                                     "*"))) == 2

        with caplog.at_level(logging.INFO):
            _prepare_restart(optimization)

        expected_directories = 2 if partial_restart else 1

        assert (len(
            glob(os.path.join("optimize.tmp", "recharge-target-1",
                              "*"))) == expected_directories)
        assert (len(
            glob(os.path.join("optimize.tmp", "recharge-target-2",
                              "*"))) == expected_directories)

        if not partial_restart:
            assert (
                f"Removing the {directories[2]} directory which was not expected to be "
                f"present") in caplog.text
            assert (
                f"Removing the {directories[3]} directory which was not expected to be "
                f"present") in caplog.text
        else:
            assert "Removing the" not in caplog.text

        assert (
            "1 iterations had previously been completed. The optimization will be "
            f"restarted from iteration {'0000' if not partial_restart else '0001'}"
        ) in caplog.text
Exemple #17
0
def test_analysis(monkeypatch, force_field, dummy_conda_env):

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization.force_field = force_field

    with temporary_cd(os.path.dirname(dummy_conda_env)):

        # Save the expected results files.
        os.makedirs(os.path.join("result", "optimize"))

        for target in optimization.targets:
            os.makedirs(os.path.join("targets", target.id))

            os.makedirs(os.path.join("optimize.tmp", target.id, "iter_0000"))
            os.makedirs(os.path.join("optimize.tmp", target.id, "iter_0001"))

            # Add enough output files to make it look like only one full iteration has
            # finished.
            lp_dump(
                {"X": 1.0},
                os.path.join("optimize.tmp", target.id, "iter_0000",
                             "objective.p"),
            )

        lp_dump(
            {"X": 1.0},
            os.path.join("optimize.tmp", optimization.targets[0].id,
                         "iter_0001", "objective.p"),
        )

        with open("optimization.json", "w") as file:
            file.write(optimization.json())

        optimization.force_field.to_openff().to_file(
            os.path.join("result", "optimize", "force-field.offxml"))

        # Mock the already tested functions.
        monkeypatch.setattr(OptimizationAnalysisFactory,
                            "_load_refit_force_field", lambda: force_field)
        monkeypatch.setattr(
            EvaluatorAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: EvaluatorTargetResult(
                objective_function=1.0, statistic_entries=[]),
        )
        monkeypatch.setattr(
            RechargeAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: RechargeTargetResult(
                objective_function=1.0, statistic_entries=[]),
        )

        OptimizationAnalysisFactory.analyze(True)

        for target in optimization.targets:

            assert os.path.isfile(
                os.path.join("analysis", target.id, "iteration-0.json"))
            assert not os.path.isfile(
                os.path.join("analysis", target.id, "iteration-1.json"))

        result = OptimizationResult.parse_file(
            os.path.join("analysis", "optimization-results.json"))

        assert len(result.target_results) == 1
        assert all(target.id in result.target_results[0]
                   for target in optimization.targets)
        assert result.refit_force_field.inner_content == force_field.inner_content
Exemple #18
0
def create_dependencies(db: Session, dependencies: List[str]):
    """Create any dependencies such as parent studies, projects, or data sets and
    commit them to the database.

    Parameters
    ----------
    db
        The current database session.
    dependencies
        The required dependencies.
    """

    project = None
    data_set_ids = []
    qc_data_set_ids = []

    if "data-set" in dependencies:
        data_set_ids.append("data-set-1")
    if "qc-data-set" in dependencies:
        qc_data_set_ids.append("qc-data-set-1")

    for data_set_id in data_set_ids:
        data_set = create_data_set(data_set_id)
        db_data_set = DataSetCRUD.create(db, data_set)
        db.add(db_data_set)

    for qc_data_set_id in qc_data_set_ids:
        qc_data_set = create_qc_data_set(qc_data_set_id)
        db_qc_data_set = QCDataSetCRUD.create(db, qc_data_set)
        db.add(db_qc_data_set)

    db.commit()

    if ("project" in dependencies or "study" in dependencies
            or "evaluator-target" in dependencies
            or "recharge-target" in dependencies
            or "benchmark" in dependencies):
        project = create_project("project-1")

    if ("study" in dependencies or "evaluator-target" in dependencies
            or "recharge-target" in dependencies
            or "benchmark" in dependencies):
        project.studies = [create_study(project.id, "study-1")]

    if "evaluator-target" in dependencies or "recharge-target" in dependencies:

        targets = []

        if "evaluator-target" in dependencies:
            targets.append(
                create_evaluator_target("evaluator-target-1", ["data-set-1"]))
        if "recharge-target" in dependencies:
            targets.append(
                create_recharge_target("recharge-target-1", ["qc-data-set-1"]))

        optimization = create_optimization(project.id, project.studies[0].id,
                                           "optimization-1", targets)

        project.studies[0].optimizations = [optimization]

    if "benchmark" in dependencies:
        benchmark = create_benchmark(
            project.id,
            project.studies[0].id,
            "benchmark-1",
            ["data-set-1"],
            None,
            create_force_field(),
        )

        project.studies[0].benchmarks = [benchmark]

    if project is not None:
        db_project = ProjectCRUD.create(db, project)
        db.add(db_project)
        db.commit()
Exemple #19
0
def study_model_perturbations():

    updated_optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_evaluator_target("evaluator-target-1", ["data-set-1"])],
    )
    updated_optimization.max_iterations += 1

    updated_benchmark = create_benchmark(
        "project-1",
        "study-1",
        "benchmark-1",
        ["data-set-1"],
        None,
        create_force_field(),
    )
    updated_benchmark.name = "updated"

    return [
        ({
            "name": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "description": "updated"
        }, lambda db: [], does_not_raise()),
        # Delete an optimization.
        (
            {
                "optimizations": []
            },
            lambda db: [db.query(models.Optimization.id).count() == 0],
            does_not_raise(),
        ),
        # Update an optimization.
        (
            {
                "optimizations": [updated_optimization]
            },
            lambda db: [db.query(models.Optimization.id).count() == 1],
            does_not_raise(),
        ),
        # Add an optimization.
        (
            {
                "optimizations": [
                    create_optimization(
                        "project-1",
                        "study-1",
                        f"optimization-{index + 1}",
                        [
                            create_evaluator_target("evaluator-target-1",
                                                    ["data-set-1"])
                        ],
                    ) for index in range(2)
                ]
            },
            lambda db: [db.query(models.Optimization.id).count() == 2],
            does_not_raise(),
        ),
        # Delete a benchmark.
        (
            {
                "benchmarks": []
            },
            lambda db: [db.query(models.Benchmark.id).count() == 0],
            does_not_raise(),
        ),
        # Update a benchmark.
        (
            {
                "benchmarks": [updated_benchmark]
            },
            lambda db: [db.query(models.Benchmark.id).count() == 1],
            does_not_raise(),
        ),
        # Add a benchmark.
        (
            {
                "benchmarks": [
                    create_benchmark(
                        "project-1",
                        "study-1",
                        f"benchmark-{index + 1}",
                        ["data-set-1"],
                        None,
                        create_force_field(),
                    ) for index in range(2)
                ]
            },
            lambda db: [db.query(models.Benchmark.id).count() == 2],
            does_not_raise(),
        ),
    ]
Exemple #20
0
    [("lilac-local", DaskLocalClusterConfig), ("lilac-dask", DaskHPCClusterConfig)],
)
def test_generate_evaluator_config(present, expected_class):

    config = InputFactory._generate_evaluator_config(present, "env", 1, 8000)
    assert isinstance(config.backend_config, expected_class)


@pytest.mark.parametrize(
    "model, optimization_result, expected_raises",
    [
        (
            create_optimization(
                "mock-project-1",
                "mock-study-1",
                "mock-optimization-2",
                targets=[create_evaluator_target("phys-prop-1", ["data-set-1"])],
                optimization_result_id="mock-optimization-1",
            ),
            create_optimization_result(
                "mock-project-1",
                "mock-study-1",
                "mock-optimization-1",
                ["phys-prop-1"],
                [],
            ),
            does_not_raise(),
        ),
        (
            create_optimization(
                "mock-project-1",
Exemple #21
0
    def test_update_delete_with_dependant(self, db: Session,
                                          with_children: bool):
        """Test that an optimization which has dependants can only be
        updated / deleted once the dependants have been deleted.
        """

        # Create the model.
        create_dependencies(db, self.dependencies())
        model = self.create_model(True)

        db_model = self.crud_class().create(db, model)
        db.add(db_model)
        db.commit()

        # Create the results
        db_result = OptimizationResultCRUD.create(
            db,
            create_optimization_result(
                model.project_id,
                model.study_id,
                model.id,
                [
                    target.id for target in model.targets
                    if isinstance(target, EvaluatorTarget)
                ],
                [
                    target.id for target in model.targets
                    if isinstance(target, RechargeTarget)
                ],
            ),
        )
        db.add(db_result)
        db.commit()

        if with_children:

            db_benchmark = BenchmarkCRUD.create(
                db,
                create_benchmark(
                    model.project_id,
                    model.study_id,
                    "benchmark-1",
                    ["data-set-1"],
                    model.id,
                    None,
                ),
            )
            db.add(db_benchmark)

            db_optimization = OptimizationCRUD.create(
                db,
                Optimization(
                    **create_optimization(
                        model.project_id,
                        model.study_id,
                        "optimization-2",
                        [
                            create_evaluator_target("evaluator-target-1",
                                                    ["data-set-1"])
                        ],
                    ).dict(exclude={"force_field", "optimization_id"}),
                    force_field=None,
                    optimization_id="optimization-1",
                ),
            )
            db.add(db_optimization)
            db.commit()

        error_matches = (["results"] if not with_children else
                         ["benchmark-1", "optimization-2"])

        with pytest.raises(UnableToDeleteError) as error_info:
            OptimizationCRUD.delete(db, model.project_id, model.study_id,
                                    model.id)

        assert all(error_match in str(error_info.value)
                   for error_match in error_matches)

        with pytest.raises(UnableToUpdateError) as error_info:
            OptimizationCRUD.update(db, model)

        assert all(error_match in str(error_info.value)
                   for error_match in error_matches)

        # Delete the dependants and try again.
        if with_children:

            BenchmarkCRUD.delete(db, model.project_id, model.study_id,
                                 "benchmark-1")
            OptimizationCRUD.delete(db, model.project_id, model.study_id,
                                    "optimization-2")

            db.commit()

        OptimizationResultCRUD.delete(db, model.project_id, model.study_id,
                                      model.id)
        db.commit()

        OptimizationCRUD.update(db, model)
        db.commit()
        OptimizationCRUD.delete(db, model.project_id, model.study_id, model.id)
        db.commit()
from nonbonded.library.utilities import temporary_cd
from nonbonded.tests.utilities.comparison import does_not_raise
from nonbonded.tests.utilities.factory import (
    create_evaluator_target,
    create_optimization,
    create_recharge_target,
)


@pytest.mark.parametrize(
    "optimization, server_config, expected_raises",
    [
        (
            create_optimization(
                "project-1",
                "study-1",
                "optimization-1",
                [create_recharge_target("recharge-target", ["qc-data-set-1"])],
            ),
            None,
            does_not_raise(),
        ),
        (
            create_optimization(
                "project-1",
                "study-1",
                "optimization-1",
                [create_evaluator_target("evaluator-target", ["data-set-1"])],
            ),
            None,
            pytest.raises(RuntimeError),
        ),