Exemple #1
0
def optimizations_and_results(
    smirnoff_force_field,
) -> Tuple[List[Optimization], List[OptimizationResult]]:

    optimization_1 = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization_1.name = "Optimization 1"
    optimization_1.force_field = ForceField.from_openff(smirnoff_force_field)
    optimization_2 = create_optimization(
        "project-1",
        "study-1",
        "optimization-2",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization_2.force_field = ForceField.from_openff(smirnoff_force_field)
    optimization_2.name = "Optimization 2"

    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].epsilon *= 2
    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].sigma *= 3

    optimization_result_1 = create_optimization_result(
        "project-1",
        "study-1",
        "optimization-1",
        ["evaluator-target-1"],
        ["recharge-target-1"],
    )
    optimization_result_1.refit_force_field = ForceField.from_openff(
        smirnoff_force_field
    )

    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].epsilon /= 4
    smirnoff_force_field.get_parameter_handler("vdW").parameters["[#6:1]"].sigma /= 6

    optimization_result_2 = create_optimization_result(
        "project-1",
        "study-1",
        "optimization-2",
        ["evaluator-target-1"],
        ["recharge-target-1"],
    )
    optimization_result_2.refit_force_field = ForceField.from_openff(
        smirnoff_force_field
    )

    return (
        [optimization_1, optimization_2],
        [optimization_result_1, optimization_result_2],
    )
Exemple #2
0
    def _create_model(cls, db, create_dependencies=True):

        project_id = "project-1"
        study_id = "study-1"
        optimization_id = "optimization-1"

        if create_dependencies:

            project, study, optimization, _, _ = commit_optimization(db)

            project_id = project.id
            study_id = study.id
            optimization_id = optimization.id

        optimization_result = create_optimization_result(
            project_id,
            study_id,
            optimization_id,
            ["evaluator-target-1"],
            ["recharge-target-1"],
        )

        return (
            optimization_result,
            {
                "project_id": project_id,
                "study_id": study_id,
                "model_id": optimization_id,
            },
        )
    def test_retrieve_force_field(self, requests_mock):

        # Mock a refit force field to retrieve.
        result = create_optimization_result(
            "project-1",
            "study-1",
            "optimization-1",
            ["evaluator-target-1"],
            [],
        )
        result.refit_force_field = ForceField(
            inner_content=('<SMIRNOFF version="0.3" '
                           'aromaticity_model="OEAroModel_MDL"></SMIRNOFF>'))
        mock_get_optimization_result(requests_mock, result)

        # Mock a benchmark which targets the refit force field.
        benchmark = create_benchmark(
            "project-1",
            "study-1",
            "benchmark-1",
            data_set_ids=["data-set-1"],
            optimization_id="optimization-1",
            force_field=None,
        )

        with temporary_cd():

            BenchmarkInputFactory._retrieve_force_field(benchmark, None)
            assert os.path.isfile("force-field.offxml")
Exemple #4
0
    def test_create_read_with_optimization(self, db: Session,
                                           optimization_id: str,
                                           create_results: bool,
                                           expected_error):
        """Test that a benchmark can be successfully created and then
        retrieved out again while targeting an optimization, or raises
        the correct error when no results have been created..
        """

        create_dependencies(db, self.dependencies())

        model = self.create_model(True)
        model.force_field = None
        model.optimization_id = optimization_id

        # Create the optimization results
        if create_results:

            db_result = OptimizationResultCRUD.create(
                db,
                create_optimization_result(
                    model.project_id,
                    model.study_id,
                    "optimization-1",
                    ["evaluator-target-1"],
                    [],
                ),
            )
            db.add(db_result)
            db.commit()

        with expected_error:
            self.test_create_read(db, False, model)
    def test_retrieve_results(self, optimization, requests_mock):

        result = create_optimization_result(
            optimization.project_id,
            optimization.study_id,
            optimization.id,
            ["evaluator-target-1"],
            [],
        )
        mock_get_optimization_result(requests_mock, result)

        with temporary_cd():

            OptimizationInputFactory._retrieve_results(optimization)

            stored_result = OptimizationResult.parse_file(
                os.path.join("analysis", "optimization-results.json"))
            assert stored_result.json() == result.json()
def test_plot(force_field, monkeypatch):

    from nonbonded.library.plotting.seaborn import optimization as optimization_module

    # Mock the required file inputs
    data_set = create_data_set("data-set-1", 1)
    data_set_collection = DataSetCollection(data_sets=[data_set])

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_evaluator_target("target-1", ["data-set-1"])],
    )
    optimization_result = create_optimization_result("project-1", "study-1",
                                                     "optimization-1",
                                                     ["target-1"], [])

    # Mock the already tested plotting methods.
    monkeypatch.setattr(optimization_module, "plot_parameter_changes",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_objective_per_iteration",
                        lambda *args: None)
    monkeypatch.setattr(optimization_module, "plot_rmse_change",
                        lambda *args: None)

    if "nonbonded.library.factories.plots.optimization" in sys.modules:
        sys.modules.pop("nonbonded.library.factories.plots.optimization")

    from nonbonded.library.factories.plots.optimization import OptimizationPlotFactory

    with temporary_cd():

        # Save the inputs in their expected locations.
        data_set_collection.to_file("test-set-collection.json")
        optimization.to_file("optimization.json")
        os.makedirs("analysis")
        optimization_result.to_file(
            os.path.join("analysis", "optimization-results.json"))

        OptimizationPlotFactory.plot([""], "png")

        assert os.path.isdir("plots")
Exemple #7
0
def commit_optimization_result(
    db: Session,
) -> Tuple[Project, Study, Optimization, DataSetCollection,
           QCDataSetCollection, OptimizationResult, ]:
    """Creates a new optimization result and commits it the current session.

    Parameters
    ----------
    db
        The current data base session.
    """

    # Create the parent optimization
    project, study, optimization, data_set, qc_data_set = commit_optimization(
        db)

    result = create_optimization_result(
        project.id,
        study.id,
        optimization.id,
        [
            target.id for target in optimization.targets
            if isinstance(target, EvaluatorTarget)
        ],
        [
            target.id for target in optimization.targets
            if isinstance(target, RechargeTarget)
        ],
    )

    db_result = OptimizationResultCRUD.create(db, result)
    db.add(db_result)
    db.commit()

    result = OptimizationResultCRUD.db_to_model(db_result)
    return project, study, optimization, data_set, qc_data_set, result
Exemple #8
0
@pytest.mark.parametrize(
    "model, optimization_result, expected_raises",
    [
        (
            create_optimization(
                "mock-project-1",
                "mock-study-1",
                "mock-optimization-2",
                targets=[create_evaluator_target("phys-prop-1", ["data-set-1"])],
                optimization_result_id="mock-optimization-1",
            ),
            create_optimization_result(
                "mock-project-1",
                "mock-study-1",
                "mock-optimization-1",
                ["phys-prop-1"],
                [],
            ),
            does_not_raise(),
        ),
        (
            create_optimization(
                "mock-project-1",
                "mock-study-1",
                "mock-optimization-2",
                targets=[create_evaluator_target("phys-prop-1", ["data-set-1"])],
                optimization_result_id=None,
            ),
            None,
            does_not_raise(),
Exemple #9
0
    def test_update_with_dependant(self, db: Session, optimization_id: str,
                                   create_results: bool, expected_error):
        """Test that a benchmark can be updated to target an optimization
        and then back to a force field.
        """

        create_dependencies(db, self.dependencies())
        model = self.create_model()

        db.add(self.crud_class().create(db, model))
        db.commit()

        # Create the optimization results
        if create_results:
            db_result = OptimizationResultCRUD.create(
                db,
                create_optimization_result(
                    model.project_id,
                    model.study_id,
                    "optimization-1",
                    ["evaluator-target-1"],
                    [],
                ),
            )
            db.add(db_result)
            db.commit()

        # Update the model.
        model.force_field = None
        model.optimization_id = optimization_id

        with expected_error:

            update_and_compare_model(
                db,
                model,
                self.crud_class().update,
                functools.partial(self.crud_class().read,
                                  **self.model_to_read_kwargs(model)),
                self.crud_class().db_to_model,
            )

        # Neither the refit force field nor the initial force field
        # should be deleted.
        assert db.query(
            models.ForceField.id).count() == 2 if create_results else 1

        # Update the model back.
        model.force_field = create_force_field()
        model.optimization_id = None

        update_and_compare_model(
            db,
            model,
            self.crud_class().update,
            functools.partial(self.crud_class().read,
                              **self.model_to_read_kwargs(model)),
            self.crud_class().db_to_model,
        )

        assert db.query(
            models.ForceField.id).count() == 2 if create_results else 1
Exemple #10
0
    def test_update_delete_with_dependant(self, db: Session,
                                          with_children: bool):
        """Test that an optimization which has dependants can only be
        updated / deleted once the dependants have been deleted.
        """

        # Create the model.
        create_dependencies(db, self.dependencies())
        model = self.create_model(True)

        db_model = self.crud_class().create(db, model)
        db.add(db_model)
        db.commit()

        # Create the results
        db_result = OptimizationResultCRUD.create(
            db,
            create_optimization_result(
                model.project_id,
                model.study_id,
                model.id,
                [
                    target.id for target in model.targets
                    if isinstance(target, EvaluatorTarget)
                ],
                [
                    target.id for target in model.targets
                    if isinstance(target, RechargeTarget)
                ],
            ),
        )
        db.add(db_result)
        db.commit()

        if with_children:

            db_benchmark = BenchmarkCRUD.create(
                db,
                create_benchmark(
                    model.project_id,
                    model.study_id,
                    "benchmark-1",
                    ["data-set-1"],
                    model.id,
                    None,
                ),
            )
            db.add(db_benchmark)

            db_optimization = OptimizationCRUD.create(
                db,
                Optimization(
                    **create_optimization(
                        model.project_id,
                        model.study_id,
                        "optimization-2",
                        [
                            create_evaluator_target("evaluator-target-1",
                                                    ["data-set-1"])
                        ],
                    ).dict(exclude={"force_field", "optimization_id"}),
                    force_field=None,
                    optimization_id="optimization-1",
                ),
            )
            db.add(db_optimization)
            db.commit()

        error_matches = (["results"] if not with_children else
                         ["benchmark-1", "optimization-2"])

        with pytest.raises(UnableToDeleteError) as error_info:
            OptimizationCRUD.delete(db, model.project_id, model.study_id,
                                    model.id)

        assert all(error_match in str(error_info.value)
                   for error_match in error_matches)

        with pytest.raises(UnableToUpdateError) as error_info:
            OptimizationCRUD.update(db, model)

        assert all(error_match in str(error_info.value)
                   for error_match in error_matches)

        # Delete the dependants and try again.
        if with_children:

            BenchmarkCRUD.delete(db, model.project_id, model.study_id,
                                 "benchmark-1")
            OptimizationCRUD.delete(db, model.project_id, model.study_id,
                                    "optimization-2")

            db.commit()

        OptimizationResultCRUD.delete(db, model.project_id, model.study_id,
                                      model.id)
        db.commit()

        OptimizationCRUD.update(db, model)
        db.commit()
        OptimizationCRUD.delete(db, model.project_id, model.study_id, model.id)
        db.commit()