Пример #1
0
def mock_get_optimization_result(requests_mock,
                                 optimization_result: OptimizationResult):
    """Mock the get optimization result endpoint."""

    requests_mock.get(
        OptimizationResult._get_endpoint(
            project_id=optimization_result.project_id,
            study_id=optimization_result.study_id,
            model_id=optimization_result.id,
        ),
        text=optimization_result.json(),
    )
Пример #2
0
    def _retrieve_force_field(
            cls, benchmark: Benchmark,
            optimization_result: Optional[OptimizationResult]):
        """Retrieve the force field to benchmark and store it in the current
        directory.
        """

        from openff.evaluator.forcefield import ForceFieldSource
        from openff.toolkit.typing.engines.smirnoff.forcefield import (
            ForceField as OFFForceField, )

        if benchmark.force_field is not None:

            force_field = benchmark.force_field.to_openff()

        else:

            optimization_result = (optimization_result
                                   if optimization_result is not None else
                                   OptimizationResult.from_rest(
                                       project_id=benchmark.project_id,
                                       study_id=benchmark.study_id,
                                       model_id=benchmark.optimization_id,
                                   ))

            force_field = optimization_result.refit_force_field.to_openff()

        if isinstance(force_field, OFFForceField):
            force_field.to_file("force-field.offxml", io_format="offxml")
        elif isinstance(force_field, ForceFieldSource):
            force_field.json("force-field.json")
Пример #3
0
    def _load_sub_study(cls, directory):

        optimization = Optimization.parse_file(
            os.path.join(directory, "optimization.json"))

        optimization_result = OptimizationResult.parse_file(
            os.path.join(directory, "analysis", "optimization-results.json"))

        return optimization, optimization_result
Пример #4
0
    def _retrieve_force_field(
        cls,
        optimization: Optimization,
        optimization_result: Optional[OptimizationResult],
    ) -> ForceField:

        optimization_result = (optimization_result if optimization_result
                               is not None else OptimizationResult.from_rest(
                                   project_id=optimization.project_id,
                                   study_id=optimization.study_id,
                                   model_id=optimization.optimization_id,
                               ))

        return optimization_result.refit_force_field
Пример #5
0
    def test_retrieve_results(self, optimization, requests_mock):

        result = create_optimization_result(
            optimization.project_id,
            optimization.study_id,
            optimization.id,
            ["evaluator-target-1"],
            [],
        )
        mock_get_optimization_result(requests_mock, result)

        with temporary_cd():

            OptimizationInputFactory._retrieve_results(optimization)

            stored_result = OptimizationResult.parse_file(
                os.path.join("analysis", "optimization-results.json"))
            assert stored_result.json() == result.json()
Пример #6
0
    def _retrieve_results(cls, optimization: Optimization):
        """Retrieves the full results for a optimization.

        Parameters
        ----------
        optimization
            The optimization to retrieve the results for.
        """

        results = OptimizationResult.from_rest(
            project_id=optimization.project_id,
            study_id=optimization.study_id,
            model_id=optimization.id,
        )

        output_directory = "analysis"
        os.makedirs(output_directory, exist_ok=True)

        with open(os.path.join(output_directory, "optimization-results.json"),
                  "w") as file:
            file.write(results.json())
Пример #7
0
    def analyze(cls, reindex):

        # Load in the definition of the optimization to optimize.
        optimization = Optimization.parse_file("optimization.json")

        # Create directories to store the results in
        output_directory = "analysis"
        os.makedirs(output_directory, exist_ok=True)

        for target in optimization.targets:
            os.makedirs(os.path.join(output_directory, target.id), exist_ok=True)

        # Load in the refit force field (if it exists)
        refit_force_field = cls._load_refit_force_field()

        # Determine the number of optimization iterations.
        target_n_iterations = [
            len(glob(os.path.join("optimize.tmp", target.id, "iter_*", "objective.p")))
            for target in optimization.targets
        ]

        n_iterations = min(target_n_iterations)

        if n_iterations == 0:
            raise RuntimeError(
                "No iteration results could be found, even though a refit force field "
                "was. Make sure not to delete the `optimize.tmp` directory after the "
                "optimization has completed."
            )

        # Analyse the results of each iteration.
        target_results = defaultdict(dict)

        for iteration in range(n_iterations):

            logger.info(f"Analysing the results of iteration {iteration}")

            for target in optimization.targets:

                logger.info(f"Analysing the {target.id} target.")

                iteration_directory = os.path.join(
                    "optimize.tmp", target.id, "iter_" + str(iteration).zfill(4)
                )

                # Analyse the target
                target_analyzer = _TARGET_FACTORIES.get(target.__class__, None)

                if target_analyzer is None:
                    raise NotImplementedError

                target_analyzer_kwargs = {}

                if issubclass(target_analyzer, EvaluatorAnalysisFactory):
                    target_analyzer_kwargs["reindex"] = reindex

                target_result = target_analyzer.analyze(
                    optimization,
                    target,
                    os.path.join("targets", target.id),
                    iteration_directory,
                    **target_analyzer_kwargs,
                )

                if target_result is None:

                    raise RuntimeError(
                        f"The results of the {target.id} target could not be "
                        f"found for iteration {iteration}."
                    )

                target_results[iteration][target.id] = target_result

                # Save the result
                with open(
                    os.path.join(
                        output_directory, target.id, f"iteration-{iteration}.json"
                    ),
                    "w",
                ) as file:
                    file.write(target_result.json())

        # Save the full results
        optimization_results = OptimizationResult(
            project_id=optimization.project_id,
            study_id=optimization.study_id,
            id=optimization.id,
            calculation_environment=cls._parse_calculation_environment(),
            analysis_environment=summarise_current_versions(),
            target_results=target_results,
            refit_force_field=refit_force_field,
        )

        with open(
            os.path.join(output_directory, "optimization-results.json"), "w"
        ) as file:
            file.write(optimization_results.json())
Пример #8
0
def test_validate_optimization_result(result_kwargs, expected_raises):

    with expected_raises:
        OptimizationResult(**result_kwargs)
Пример #9
0
def test_analysis(monkeypatch, force_field, dummy_conda_env):

    optimization = create_optimization(
        "project-1",
        "study-1",
        "optimization-1",
        [
            create_evaluator_target("evaluator-target-1", ["data-set-1"]),
            create_recharge_target("recharge-target-1", ["qc-data-set-1"]),
        ],
    )
    optimization.force_field = force_field

    with temporary_cd(os.path.dirname(dummy_conda_env)):

        # Save the expected results files.
        os.makedirs(os.path.join("result", "optimize"))

        for target in optimization.targets:
            os.makedirs(os.path.join("targets", target.id))

            os.makedirs(os.path.join("optimize.tmp", target.id, "iter_0000"))
            os.makedirs(os.path.join("optimize.tmp", target.id, "iter_0001"))

            # Add enough output files to make it look like only one full iteration has
            # finished.
            lp_dump(
                {"X": 1.0},
                os.path.join("optimize.tmp", target.id, "iter_0000",
                             "objective.p"),
            )

        lp_dump(
            {"X": 1.0},
            os.path.join("optimize.tmp", optimization.targets[0].id,
                         "iter_0001", "objective.p"),
        )

        with open("optimization.json", "w") as file:
            file.write(optimization.json())

        optimization.force_field.to_openff().to_file(
            os.path.join("result", "optimize", "force-field.offxml"))

        # Mock the already tested functions.
        monkeypatch.setattr(OptimizationAnalysisFactory,
                            "_load_refit_force_field", lambda: force_field)
        monkeypatch.setattr(
            EvaluatorAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: EvaluatorTargetResult(
                objective_function=1.0, statistic_entries=[]),
        )
        monkeypatch.setattr(
            RechargeAnalysisFactory,
            "analyze",
            lambda *args, **kwargs: RechargeTargetResult(
                objective_function=1.0, statistic_entries=[]),
        )

        OptimizationAnalysisFactory.analyze(True)

        for target in optimization.targets:

            assert os.path.isfile(
                os.path.join("analysis", target.id, "iteration-0.json"))
            assert not os.path.isfile(
                os.path.join("analysis", target.id, "iteration-1.json"))

        result = OptimizationResult.parse_file(
            os.path.join("analysis", "optimization-results.json"))

        assert len(result.target_results) == 1
        assert all(target.id in result.target_results[0]
                   for target in optimization.targets)
        assert result.refit_force_field.inner_content == force_field.inner_content
Пример #10
0
def create_optimization_result(
    project_id: str,
    study_id: str,
    optimization_id: str,
    evaluator_target_ids: List[str],
    recharge_target_ids: List[str],
) -> OptimizationResult:
    """Creates an optimization result.

    Parameters
    ----------
    project_id
        The id of the parent project.
    study_id
        The id of the parent study.
    optimization_id
        The id of the optimization which the results to belong to.
    evaluator_target_ids
        The ids of any evaluator targets which yielded a number of the results.
    recharge_target_ids
        The ids of any recharge targets which yielded a number of the results.
    """
    return OptimizationResult(
        id=optimization_id,
        study_id=study_id,
        project_id=project_id,
        calculation_environment={"forcebalance": "1.0.0"},
        analysis_environment={"nonbonded": "0.0.01a5"},
        target_results={
            0: {
                **{
                    evaluator_target_id: EvaluatorTargetResult(
                        objective_function=1.0,
                        statistic_entries=[create_data_set_statistic()],
                    )
                    for evaluator_target_id in evaluator_target_ids
                },
                **{
                    recharge_target_id: RechargeTargetResult(objective_function=0.5,
                                                             statistic_entries=[
                                                                 create_statistic(
                                                                 )
                                                             ])
                    for recharge_target_id in recharge_target_ids
                },
            },
            1: {
                **{
                    evaluator_target_id: EvaluatorTargetResult(
                        objective_function=1.0,
                        statistic_entries=[create_data_set_statistic()],
                    )
                    for evaluator_target_id in evaluator_target_ids
                },
                **{
                    recharge_target_id: RechargeTargetResult(objective_function=0.5,
                                                             statistic_entries=[
                                                                 create_statistic(
                                                                 )
                                                             ])
                    for recharge_target_id in recharge_target_ids
                },
            },
        },
        refit_force_field=create_force_field("refit"),
    )