Beispiel #1
0
def _run_calculations(
    data_set: "PhysicalPropertyDataSet",
    force_field: "ForceField",
    polling_interval: int,
    request_options: "RequestOptions",
    server_config: EvaluatorServerConfig,
) -> "RequestResult":
    """Attempt to estimate the data set using the specified force field.

    Parameters
    ----------
    data_set
        The data set to estimate.
    force_field
        The force field to estimate the data set with.
    polling_interval
        The interval with which to attempt to retrieve the results.
    request_options
        The options to use when estimating the data set.
    server_config
        The configuration to use for the evaluator server.
    """
    from openff.evaluator.client import ConnectionOptions, EvaluatorClient

    calculation_backend = server_config.to_backend()

    with calculation_backend:

        evaluator_server = server_config.to_server(calculation_backend)

        with evaluator_server:

            # Request the estimates.
            client = EvaluatorClient(
                ConnectionOptions(server_port=server_config.port))

            request, error = client.request_estimate(
                property_set=data_set,
                force_field_source=force_field,
                options=request_options,
            )

            if error is not None:
                raise error

            # Wait for the results.
            results, error = request.results(True,
                                             polling_interval=polling_interval)

            if error is not None:
                raise error

    return results
def test_server_config_to_evaluator():

    server_config = EvaluatorServerConfig(
        backend_config=DaskLocalClusterConfig(
            resources_per_worker=ComputeResources(n_gpus=0)))

    backend = server_config.to_backend()
    backend._started = True

    with temporary_cd():
        assert isinstance(backend, dask.DaskLocalCluster)
        assert isinstance(server_config.to_server(backend),
                          server.EvaluatorServer)
Beispiel #3
0
    def _generate_evaluator_config(
        cls, preset_name: str, conda_environment: str, n_workers: int, port: int
    ) -> EvaluatorServerConfig:
        """Generates an Evaluator server configuration."""

        if preset_name == "lilac-local":

            backend_config = DaskLocalClusterConfig(
                resources_per_worker=ComputeResources()
            )

        elif preset_name == "lilac-dask":

            # noinspection PyTypeChecker
            backend_config = DaskHPCClusterConfig(
                maximum_workers=n_workers,
                resources_per_worker=QueueWorkerResources(),
                queue_name="gpuqueue",
                setup_script_commands=[
                    f"conda activate {conda_environment}",
                    "module load cuda/10.1",
                ],
            )

        else:
            raise NotImplementedError()

        server_config = EvaluatorServerConfig(backend_config=backend_config, port=port)

        return server_config
def test_run_command(runner, monkeypatch):

    from nonbonded.cli.projects.benchmark import run

    monkeypatch.setattr(run, "_prepare_restart", lambda *args:
                        (None, successful_result()))
    monkeypatch.setattr(run, "_load_force_field", lambda *args: None)
    monkeypatch.setattr(run, "_run_calculations",
                        lambda *args: RequestResult())

    # Save a copy of the result model.
    with temporary_cd():

        # Create mock inputs
        with open("server-config.json", "w") as file:

            file.write(
                EvaluatorServerConfig(backend_config=DaskLocalClusterConfig(
                    resources_per_worker=ComputeResources())).json())

        RequestOptions().json("estimation-options.json")

        result = runner.invoke(run_command())

        with open("results.json") as file:
            assert successful_result().json() == file.read()

    if result.exit_code != 0:
        raise result.exception
def test_run_calculations(request_error, results_error, expected_raises,
                          monkeypatch):

    monkeypatch.setattr(EvaluatorServerConfig, "to_backend",
                        lambda *_: does_not_raise())
    monkeypatch.setattr(EvaluatorServerConfig, "to_server",
                        lambda *_: does_not_raise())

    empty_request = Request()
    empty_result = RequestResult()

    monkeypatch.setattr(
        EvaluatorClient,
        "request_estimate",
        lambda *args, **kwargs: (empty_request, request_error),
    )
    monkeypatch.setattr(Request, "results", lambda *args, **kwargs:
                        (empty_result, results_error))

    server_config = EvaluatorServerConfig(
        backend_config=DaskLocalClusterConfig(
            resources_per_worker=ComputeResources()))

    with expected_raises as error_info:
        # noinspection PyTypeChecker
        _run_calculations(None, None, 1, None, server_config)

    error_value = None if error_info is None else error_info.value

    assert error_value == (
        request_error if request_error is not None else
        results_error if results_error is not None else None)
Beispiel #6
0
    def base_function(**kwargs):

        from openff.evaluator.client import RequestOptions

        # Load in the force field.
        force_field = _load_force_field()

        # Load any previous results and the data set to estimate.
        data_set, existing_results = _prepare_restart(kwargs.pop("restart"))

        # Load the server configuration.
        server_config = EvaluatorServerConfig.parse_file(
            kwargs.pop("server_config"))

        if server_config.enable_data_caching is None:
            server_config.enable_data_caching = False

        # Load in the request options
        request_options = RequestOptions.from_json(
            kwargs.pop("request_options"))

        results = _run_calculations(
            data_set,
            force_field,
            kwargs.pop("polling_interval"),
            request_options,
            server_config,
        )

        # Save a copy of the results in case adding the already estimated
        # properties failed for some reason.
        results.json("results.tmp.json")

        if existing_results is not None:

            results.estimated_properties.add_properties(
                *existing_results.estimated_properties.properties,
                validate=False,
            )

        # Save the results to disk.
        results.json("results.json")

        if os.path.isfile("results.tmp.json"):
            # Remove the backup results.
            os.unlink("results.tmp.json")
Beispiel #7
0
def _launch_required_services(optimization: Optimization,
                              server_config: Optional[str]):
    """Launches any required services such as an OpenFF Evaluator server."""

    if not any(
            isinstance(target, EvaluatorTarget)
            for target in optimization.targets):
        yield
        return

    if server_config is None:

        raise RuntimeError(
            "The path to an OpenFF Evaluator server configuration must be provided "
            "when running an optimization against a physical property data set."
        )

    server_config = EvaluatorServerConfig.parse_file(server_config)

    # Disable data caching when re-weighting is disabled and the user hasn't
    # explicitly requested it.
    requires_cached_data = any(target.allow_reweighting
                               for target in optimization.targets
                               if isinstance(target, EvaluatorTarget))

    if server_config.enable_data_caching is None:
        server_config.enable_data_caching = requires_cached_data

    calculation_backend = server_config.to_backend()

    with calculation_backend:

        evaluator_server = server_config.to_server(calculation_backend)

        with evaluator_server:

            yield
                "project-1",
                "study-1",
                "optimization-1",
                [create_evaluator_target("evaluator-target", ["data-set-1"])],
            ),
            None,
            pytest.raises(RuntimeError),
        ),
        (
            create_optimization(
                "project-1",
                "study-1",
                "optimization-1",
                [create_evaluator_target("evaluator-target", ["data-set-1"])],
            ),
            EvaluatorServerConfig(backend_config=DaskLocalClusterConfig(
                resources_per_worker=ComputeResources())),
            does_not_raise(),
        ),
    ],
)
def test_launch_required_services(optimization, server_config, expected_raises,
                                  monkeypatch):

    monkeypatch.setattr(EvaluatorServerConfig, "to_backend",
                        lambda *_: does_not_raise())
    monkeypatch.setattr(EvaluatorServerConfig, "to_server",
                        lambda *_: does_not_raise())

    with temporary_cd():

        if server_config is not None: