コード例 #1
0
def test_lsf_wrapped_function():

    available_resources = QueueWorkerResources()

    protocols_to_import = [
        protocol_class.__module__ + "." + protocol_class.__qualname__
        for protocol_class in registered_workflow_protocols.values()
    ]

    per_worker_logging = True

    gpu_assignments = None

    expected_output = 12345

    result = DaskLSFBackend._wrapped_function(
        dummy_function,
        expected_output,
        available_resources=available_resources,
        registered_workflow_protocols=protocols_to_import,
        per_worker_logging=per_worker_logging,
        gpu_assignments=gpu_assignments,
    )

    assert expected_output == result
コード例 #2
0
def main():

    setup_timestamp_logging()

    # Load in the force field
    force_field_path = "openff-1.0.0-refit.offxml"
    force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)

    # Load in the test set.
    data_set = PhysicalPropertyDataSet.from_json("full_set.json")

    # Set up a server object to run the calculations using.
    working_directory = "working_directory"

    # Set up a backend to run the calculations on. This assume running
    # on a HPC resources with the LSF queue system installed.
    queue_resources = QueueWorkerResources(
        number_of_threads=1,
        number_of_gpus=1,
        preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
        per_thread_memory_limit=5 * unit.gigabyte,
        wallclock_time_limit="05:59",
    )

    worker_script_commands = [
        "conda activate forcebalance", "module load cuda/10.1"
    ]

    calculation_backend = DaskLSFBackend(
        minimum_number_of_workers=1,
        maximum_number_of_workers=50,
        resources_per_worker=queue_resources,
        queue_name="gpuqueue",
        setup_script_commands=worker_script_commands,
        adaptive_interval="1000ms",
    )

    with calculation_backend:

        server = EvaluatorServer(
            calculation_backend=calculation_backend,
            working_directory=working_directory,
            port=8002,
        )

        with server:

            # Request the estimates.
            client = EvaluatorClient(ConnectionOptions(server_port=8002))

            request, _ = client.request_estimate(
                property_set=data_set,
                force_field_source=force_field_source,
            )

            # Wait for the results.
            results, _ = request.results(True, 5)
            results.json(f"results.json")
コード例 #3
0
def main():

    setup_timestamp_logging()

    working_directory = "working_directory"

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set up a backend to run the calculations on. This assume running
    # on a HPC resources with the LSF queue system installed.
    queue_resources = QueueWorkerResources(
        number_of_threads=1,
        number_of_gpus=1,
        preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
        per_thread_memory_limit=5 * unit.gigabyte,
        wallclock_time_limit="05:59",
    )

    worker_script_commands = [
        "conda activate forcebalance", "module load cuda/10.1"
    ]

    calculation_backend = DaskLSFBackend(
        minimum_number_of_workers=1,
        maximum_number_of_workers=14,
        resources_per_worker=queue_resources,
        queue_name="gpuqueue",
        setup_script_commands=worker_script_commands,
        adaptive_interval="1000ms",
    )

    with calculation_backend:

        server = EvaluatorServer(
            calculation_backend=calculation_backend,
            working_directory=working_directory,
            port=8000,
        )

        # Tell the server to start listening for estimation requests.
        server.start()
コード例 #4
0
def test_dask_jobqueue_backend_creation(cluster_class):
    """Test creating and starting a new dask jobqueue backend."""

    cpu_backend = cluster_class()
    cpu_backend.start()
    cpu_backend.stop()

    gpu_resources = QueueWorkerResources(
        1, 1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA)

    gpu_commands = [
        "module load cuda/9.2",
    ]

    gpu_backend = cluster_class(
        resources_per_worker=gpu_resources,
        queue_name="gpuqueue",
        setup_script_commands=gpu_commands,
    )

    gpu_backend.start()
    assert "module load cuda/9.2" in gpu_backend.job_script()
    gpu_backend.stop()
コード例 #5
0
def setup_server(
    backend_type=BackendType.LocalCPU,
    max_number_of_workers=1,
    conda_environment="evaluator",
    worker_memory=4 * unit.gigabyte,
    port=8000,
    cuda_version="10.1",
):
    """A convenience function to sets up an estimation server which will can advantage
    of different compute backends.

    Parameters
    ----------
    backend_type: BackendType
        The type of compute backend to use.
    max_number_of_workers: int
        The maximum number of workers to adaptively insert into
        the queuing system.
    conda_environment: str
        The name of the conda environment in which the evaluator
        package is installed.
    worker_memory: pint.Quantity
        The maximum amount of memory to request per worker.
    port: int
        The port that the server should listen for estimation requests on.
    cuda_version: str
        The version of CUDA to use if running on a backend which supports
        GPUs.

    Returns
    -------
    EvaluatorServer
        The server object.
    """

    calculation_backend = None

    if backend_type == BackendType.LocalCPU:
        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers)

    elif backend_type == BackendType.LocalGPU:

        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers,
            resources_per_worker=ComputeResources(
                1, 1, ComputeResources.GPUToolkit.CUDA),
        )

    elif backend_type == BackendType.GPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            number_of_gpus=1,
            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="05:59",
        )

        worker_script_commands = [
            f"conda activate {conda_environment}",
            f"module load cuda/{cuda_version}",
        ]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name="gpuqueue",
            setup_script_commands=worker_script_commands,
            adaptive_interval="1000ms",
        )
    elif backend_type == BackendType.CPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="01:30",
        )

        worker_script_commands = [f"conda activate {conda_environment}"]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name="cpuqueue",
            setup_script_commands=worker_script_commands,
            adaptive_interval="1000ms",
        )

    calculation_backend.start()

    # Spin up the server object.
    return server.EvaluatorServer(calculation_backend=calculation_backend,
                                  port=port)