Esempio n. 1
0
def test_run_calculations(request_error, results_error, expected_raises,
                          monkeypatch):

    monkeypatch.setattr(EvaluatorServerConfig, "to_backend",
                        lambda *_: does_not_raise())
    monkeypatch.setattr(EvaluatorServerConfig, "to_server",
                        lambda *_: does_not_raise())

    empty_request = Request()
    empty_result = RequestResult()

    monkeypatch.setattr(
        EvaluatorClient,
        "request_estimate",
        lambda *args, **kwargs: (empty_request, request_error),
    )
    monkeypatch.setattr(Request, "results", lambda *args, **kwargs:
                        (empty_result, results_error))

    server_config = EvaluatorServerConfig(
        backend_config=DaskLocalClusterConfig(
            resources_per_worker=ComputeResources()))

    with expected_raises as error_info:
        # noinspection PyTypeChecker
        _run_calculations(None, None, 1, None, server_config)

    error_value = None if error_info is None else error_info.value

    assert error_value == (
        request_error if request_error is not None else
        results_error if results_error is not None else None)
Esempio n. 2
0
def benchmark_model_perturbations():

    return [
        ({
            "name": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "description": "updated"
        }, lambda db: [], does_not_raise()),
        (
            {
                "analysis_environments": [ChemicalEnvironment.Hydroxy]
            },
            lambda db: [],
            does_not_raise(),
        ),
        # Test updating the test_sets.
        ({
            "test_set_ids": ["data-set-1"]
        }, lambda db: [], does_not_raise()),
        (
            {
                "test_set_ids": ["data-set-2"]
            },
            lambda db: [],
            pytest.raises(DataSetNotFoundError),
        ),
    ]
def test_launch_required_services(optimization, server_config, expected_raises,
                                  monkeypatch):

    monkeypatch.setattr(EvaluatorServerConfig, "to_backend",
                        lambda *_: does_not_raise())
    monkeypatch.setattr(EvaluatorServerConfig, "to_server",
                        lambda *_: does_not_raise())

    with temporary_cd():

        if server_config is not None:

            with open("server-config.json", "w") as file:
                file.write(server_config.json())

            server_config = "server-config.json"

        with expected_raises:

            with _launch_required_services(optimization, server_config):
                pass
Esempio n. 4
0
def project_model_perturbations():

    updated_author = create_author()
    updated_author.email = "*****@*****.**"

    updated_study = create_study("project-1", "study-1")
    updated_study.name = "updated"

    return [
        ({
            "name": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "description": "updated"
        }, lambda db: [], does_not_raise()),
        (
            {
                "authors": [create_author(), updated_author]
            },
            lambda db: [
                db.query(models.Author.email).count() == 2,
                db.query(author_projects_table).count() == 2,
            ],
            does_not_raise(),
        ),
        # Delete a study.
        (
            {
                "studies": [create_study("project-1", "study-1")]
            },
            lambda db: [db.query(models.Study.id).count() == 1],
            does_not_raise(),
        ),
        # Update a study.
        (
            {
                "studies":
                [updated_study,
                 create_study("project-1", "study-2")]
            },
            lambda db: [db.query(models.Study.id).count() == 2],
            does_not_raise(),
        ),
        # Add a study.
        (
            {
                "studies": [
                    create_study("project-1", f"study-{index + 1}")
                    for index in range(3)
                ]
            },
            lambda db: [db.query(models.Study.id).count() == 3],
            does_not_raise(),
        ),
    ]
Esempio n. 5
0
     dict(
         project_id="project-1",
         study_id="study-1",
         id="optimization-1",
         target_results={
             0: {
                 "target-1":
                 RechargeTargetResult(
                     objective_function=0.0,
                     statistic_entries=[create_statistic()],
                 )
             }
         },
         refit_force_field=create_force_field(),
     ),
     does_not_raise(),
 ),
 (
     dict(
         project_id="project-1",
         study_id="study-1",
         id="optimization-1",
         target_results={},
         refit_force_field=create_force_field(),
     ),
     pytest.raises(ValidationError),
 ),
 (
     dict(
         project_id="project-1",
         study_id="study-1",
Esempio n. 6
0
    create_qc_data_set,
    create_study,
)
from nonbonded.tests.utilities.mock import (
    mock_get_data_set,
    mock_get_project,
    mock_get_qc_data_set,
    mock_get_study,
)


@pytest.mark.usefixtures("change_api_url")
@pytest.mark.parametrize(
    "data_set_ids, data_set_type, expected_data_set_ids, expected_raises",
    [
        (["data-set-2"], DataSet, [2], does_not_raise()),
        (["data-set-3"], DataSet, [4], does_not_raise()),
        (["data-set-2", "data-set-3"], DataSet, [2, 4], does_not_raise()),
        (
            ["data-set-4"],
            DataSet,
            [],
            pytest.raises(NoMockAddress, match="phys-prop/data-set-4"),
        ),
        (["data-set-1"], QCDataSet, [], does_not_raise()),
        (["data-set-2"], QCDataSet, [], does_not_raise()),
        (["data-set-1", "data-set-2"], QCDataSet, [], does_not_raise()),
        (
            ["data-set-4"],
            QCDataSet,
            [],
Esempio n. 7
0
        assert expected_message in caplog.text
    else:
        assert caplog.text == ""

    expected_n_data_points = (1 if existing_result is None else len(
        existing_result.unsuccessful_properties))

    assert len(data_set) == expected_n_data_points


@pytest.mark.parametrize(
    "force_fields, expected_raises, expected_error_message",
    [
        (
            [("force-field.offxml", ForceField("openff-1.0.0.offxml"))],
            does_not_raise(),
            None,
        ),
        ([("force-field.json", TLeapForceFieldSource())
          ], does_not_raise(), None),
        (
            [
                ("force-field.offxml", ForceField("openff-1.0.0.offxml")),
                ("force-field.json", TLeapForceFieldSource()),
            ],
            pytest.raises(RuntimeError),
            "Two valid force fields were found",
        ),
        ([], pytest.raises(RuntimeError),
         "No valid force field could be found."),
    ],
Esempio n. 8
0
class TestBenchmarkCRUD(BaseCRUDTest):
    @classmethod
    def crud_class(cls):
        return BenchmarkCRUD

    @classmethod
    def dependencies(cls):
        return [
            "project",
            "study",
            "evaluator-target",
            "data-set",
        ]

    @classmethod
    def create_model(cls, include_children=False, index=1):

        benchmark = create_benchmark(
            "project-1",
            "study-1",
            f"benchmark-{index}",
            ["data-set-1"],
            None,
            create_force_field(),
        )

        return benchmark

    @classmethod
    def model_to_read_kwargs(cls, model):
        return {
            "project_id": model.project_id,
            "study_id": model.study_id,
            "sub_study_id": model.id,
        }

    @classmethod
    def model_to_read_all_kwargs(cls, model):
        return {"project_id": model.project_id, "study_id": model.study_id}

    @classmethod
    def not_found_error(cls):
        return BenchmarkNotFoundError

    @classmethod
    def already_exists_error(cls):
        return BenchmarkExistsError

    @classmethod
    def check_has_deleted(cls, db: Session):

        assert db.query(models.Benchmark.id).count() == 0
        # assert db.query(models.ForceField.id).count() == 0

    @pytest.mark.parametrize("perturbation, database_checks, expected_raise",
                             benchmark_model_perturbations())
    def test_update(self, db: Session, perturbation, database_checks,
                    expected_raise):
        super(TestBenchmarkCRUD,
              self).test_update(db, perturbation, database_checks,
                                expected_raise)

    @pytest.mark.parametrize(
        "dependencies, expected_error",
        [(["study", "evaluator-target"], StudyNotFoundError)],
    )
    def test_missing_dependencies(self, db: Session, dependencies: List[str],
                                  expected_error):
        super(TestBenchmarkCRUD,
              self).test_missing_dependencies(db, dependencies, expected_error)

    @pytest.mark.parametrize(
        "optimization_id, create_results, expected_error",
        [
            ("optimization-2", True, pytest.raises(OptimizationNotFoundError)),
            ("optimization-1", True, does_not_raise()),
            ("optimization-1", False, pytest.raises(UnableToCreateError)),
        ],
    )
    def test_create_read_with_optimization(self, db: Session,
                                           optimization_id: str,
                                           create_results: bool,
                                           expected_error):
        """Test that a benchmark can be successfully created and then
        retrieved out again while targeting an optimization, or raises
        the correct error when no results have been created..
        """

        create_dependencies(db, self.dependencies())

        model = self.create_model(True)
        model.force_field = None
        model.optimization_id = optimization_id

        # Create the optimization results
        if create_results:

            db_result = OptimizationResultCRUD.create(
                db,
                create_optimization_result(
                    model.project_id,
                    model.study_id,
                    "optimization-1",
                    ["evaluator-target-1"],
                    [],
                ),
            )
            db.add(db_result)
            db.commit()

        with expected_error:
            self.test_create_read(db, False, model)

    @pytest.mark.parametrize(
        "optimization_id, create_results, expected_error",
        [
            ("optimization-2", True, pytest.raises(OptimizationNotFoundError)),
            ("optimization-1", True, does_not_raise()),
            ("optimization-1", False, pytest.raises(UnableToUpdateError)),
        ],
    )
    def test_update_with_dependant(self, db: Session, optimization_id: str,
                                   create_results: bool, expected_error):
        """Test that a benchmark can be updated to target an optimization
        and then back to a force field.
        """

        create_dependencies(db, self.dependencies())
        model = self.create_model()

        db.add(self.crud_class().create(db, model))
        db.commit()

        # Create the optimization results
        if create_results:
            db_result = OptimizationResultCRUD.create(
                db,
                create_optimization_result(
                    model.project_id,
                    model.study_id,
                    "optimization-1",
                    ["evaluator-target-1"],
                    [],
                ),
            )
            db.add(db_result)
            db.commit()

        # Update the model.
        model.force_field = None
        model.optimization_id = optimization_id

        with expected_error:

            update_and_compare_model(
                db,
                model,
                self.crud_class().update,
                functools.partial(self.crud_class().read,
                                  **self.model_to_read_kwargs(model)),
                self.crud_class().db_to_model,
            )

        # Neither the refit force field nor the initial force field
        # should be deleted.
        assert db.query(
            models.ForceField.id).count() == 2 if create_results else 1

        # Update the model back.
        model.force_field = create_force_field()
        model.optimization_id = None

        update_and_compare_model(
            db,
            model,
            self.crud_class().update,
            functools.partial(self.crud_class().read,
                              **self.model_to_read_kwargs(model)),
            self.crud_class().db_to_model,
        )

        assert db.query(
            models.ForceField.id).count() == 2 if create_results else 1

    @pytest.mark.parametrize(
        "create_results, expected_error",
        [(False, does_not_raise()),
         (True, pytest.raises(UnableToDeleteError))],
    )
    def test_delete_with_dependent(self, db: Session, create_results: bool,
                                   expected_error):
        """Test that a benchmark cannot be deleted until its results have
        also been deleted.
        """

        create_dependencies(db, self.dependencies())
        model = self.create_model()

        db.add(self.crud_class().create(db, model))
        db.commit()

        # Create the benchmark results
        if create_results:

            data_set = create_data_set("data-set-1")
            data_set.entries[0].id = 1

            db_result = BenchmarkResultCRUD.create(
                db,
                create_benchmark_result(model.project_id, model.study_id,
                                        model.id, data_set),
            )
            db.add(db_result)
            db.commit()

        # Delete the model.
        with expected_error:
            self.crud_class().delete(db, model.project_id, model.study_id,
                                     model.id)

        if not create_results:
            return

        BenchmarkResultCRUD.delete(db, model.project_id, model.study_id,
                                   model.id)
        db.commit()

        self.crud_class().delete(db, model.project_id, model.study_id,
                                 model.id)
        db.commit()

        self.check_has_deleted(db)
Esempio n. 9
0
def optimization_model_perturbations():

    updated_engine_delete_prior = ForceBalance(
        priors={"vdW/Atom/epsilon": 0.1})
    updated_engine_update_prior = ForceBalance(priors={
        "vdW/Atom/epsilon": 0.2,
        "vdW/Atom/sigma": 2.0
    }, )
    updated_engine_add_prior = ForceBalance(priors={
        "vdW/Atom/epsilon": 0.1,
        "vdW/Atom/sigma": 2.0,
        "vdW/Atom/r_min": 2.0
    }, )

    invalid_evaluator_target = create_evaluator_target("evaluator-target-1",
                                                       ["data-set-999"])
    invalid_recharge_target = create_recharge_target("recharge-target-1",
                                                     ["qc-data-set-999"])

    return [
        ({
            "name": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "description": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "max_iterations": 999
        }, lambda db: [], does_not_raise()),
        (
            {
                "analysis_environments": [ChemicalEnvironment.Hydroxy]
            },
            lambda db: [],
            does_not_raise(),
        ),
        # Test updating the force field.
        (
            {
                "force_field": create_force_field("updated")
            },
            lambda db: [
                "updated" in db.query(models.ForceField.inner_content).first()[
                    0],
                db.query(models.ForceField.id).count() == 1,
            ],
            does_not_raise(),
        ),
        # Test updating the parameters to train.
        (
            {
                "parameters_to_train": [
                    Parameter(handler_type="vdW",
                              smirks="[#6:1]",
                              attribute_name="epsilon"),
                ]
            },
            lambda db: [db.query(models.Parameter.id).count() == 1],
            does_not_raise(),
        ),
        (
            {
                "parameters_to_train": [
                    Parameter(handler_type="vdW",
                              smirks="[#6:1]",
                              attribute_name="epsilon"),
                    Parameter(handler_type="vdW",
                              smirks="[#6:1]",
                              attribute_name="sigma"),
                    Parameter(handler_type="vdW",
                              smirks="[#1:1]",
                              attribute_name="sigma"),
                ]
            },
            lambda db: [db.query(models.Parameter.id).count() == 3],
            does_not_raise(),
        ),
        # Test updating an engine's priors
        (
            {
                "engine": updated_engine_delete_prior
            },
            lambda db: [db.query(models.ForceBalancePrior.id).count() == 1],
            does_not_raise(),
        ),
        (
            {
                "engine": updated_engine_update_prior
            },
            lambda db: [db.query(models.ForceBalancePrior.id).count() == 2],
            does_not_raise(),
        ),
        (
            {
                "engine": updated_engine_add_prior
            },
            lambda db: [db.query(models.ForceBalancePrior.id).count() == 3],
            does_not_raise(),
        ),
        # Test deleting a target
        (
            {
                "targets": [
                    create_evaluator_target("evaluator-target-1",
                                            ["data-set-1"])
                ]
            },
            lambda db: [
                db.query(models.EvaluatorTarget.id).count() == 1,
                db.query(models.RechargeTarget.id).count() == 0,
            ],
            does_not_raise(),
        ),
        (
            {
                "targets": [
                    create_recharge_target("recharge-target-1",
                                           ["qc-data-set-1"])
                ]
            },
            lambda db: [
                db.query(models.EvaluatorTarget.id).count() == 0,
                db.query(models.RechargeTarget.id).count() == 1,
            ],
            does_not_raise(),
        ),
        # Test adding a target
        (
            {
                "targets": [
                    create_evaluator_target("evaluator-target-1",
                                            ["data-set-1"]),
                    create_evaluator_target("evaluator-target-2",
                                            ["data-set-1"]),
                    create_recharge_target("recharge-target-1",
                                           ["qc-data-set-1"]),
                ]
            },
            lambda db: [
                db.query(models.EvaluatorTarget.id).count() == 2,
                db.query(models.RechargeTarget.id).count() == 1,
            ],
            does_not_raise(),
        ),
        (
            {
                "targets": [
                    create_evaluator_target("evaluator-target-1",
                                            ["data-set-1"]),
                    create_recharge_target("recharge-target-1",
                                           ["qc-data-set-1"]),
                    create_recharge_target("recharge-target-2",
                                           ["qc-data-set-1"]),
                ]
            },
            lambda db: [
                db.query(models.EvaluatorTarget.id).count() == 1,
                db.query(models.RechargeTarget.id).count() == 2,
            ],
            does_not_raise(),
        ),
        # Test invalidly updating a target's training set
        (
            {
                "targets": [invalid_evaluator_target]
            },
            lambda db: [],
            pytest.raises(DataSetNotFoundError),
        ),
        (
            {
                "targets": [invalid_recharge_target]
            },
            lambda db: [],
            pytest.raises(QCDataSetNotFoundError),
        ),
    ]
Esempio n. 10
0
def study_model_perturbations():

    updated_optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [create_evaluator_target("evaluator-target-1", ["data-set-1"])],
    )
    updated_optimization.max_iterations += 1

    updated_benchmark = create_benchmark(
        "project-1",
        "study-1",
        "benchmark-1",
        ["data-set-1"],
        None,
        create_force_field(),
    )
    updated_benchmark.name = "updated"

    return [
        ({
            "name": "updated"
        }, lambda db: [], does_not_raise()),
        ({
            "description": "updated"
        }, lambda db: [], does_not_raise()),
        # Delete an optimization.
        (
            {
                "optimizations": []
            },
            lambda db: [db.query(models.Optimization.id).count() == 0],
            does_not_raise(),
        ),
        # Update an optimization.
        (
            {
                "optimizations": [updated_optimization]
            },
            lambda db: [db.query(models.Optimization.id).count() == 1],
            does_not_raise(),
        ),
        # Add an optimization.
        (
            {
                "optimizations": [
                    create_optimization(
                        "project-1",
                        "study-1",
                        f"optimization-{index + 1}",
                        [
                            create_evaluator_target("evaluator-target-1",
                                                    ["data-set-1"])
                        ],
                    ) for index in range(2)
                ]
            },
            lambda db: [db.query(models.Optimization.id).count() == 2],
            does_not_raise(),
        ),
        # Delete a benchmark.
        (
            {
                "benchmarks": []
            },
            lambda db: [db.query(models.Benchmark.id).count() == 0],
            does_not_raise(),
        ),
        # Update a benchmark.
        (
            {
                "benchmarks": [updated_benchmark]
            },
            lambda db: [db.query(models.Benchmark.id).count() == 1],
            does_not_raise(),
        ),
        # Add a benchmark.
        (
            {
                "benchmarks": [
                    create_benchmark(
                        "project-1",
                        "study-1",
                        f"benchmark-{index + 1}",
                        ["data-set-1"],
                        None,
                        create_force_field(),
                    ) for index in range(2)
                ]
            },
            lambda db: [db.query(models.Benchmark.id).count() == 2],
            does_not_raise(),
        ),
    ]