コード例 #1
0
def setup_server(max_number_of_workers=1,
                 conda_environment='propertyestimator',
                 worker_memory=5 * unit.gigabyte,
                 port=8000):
    """Sets up an estimation server which will take advantage of
    an LSF based HPC cluster with access to nVidia GPUs.

    Parameters
    ----------
    max_number_of_workers: int
        The maximum number of workers to adaptively insert into
        the queuing system.
    conda_environment: str
        The name of the conda environment in which the propertyestimator
        package is installed.
    worker_memory: Quantity
        The maximum amount of memory to request per worker.
    port: int
        The port that the server should listen for estimation requests on.
    """
    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if os.path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Request workers with access to a single CPU and CUDA based GPU.
    queue_resources = QueueWorkerResources(
        number_of_threads=1,
        number_of_gpus=1,
        preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
        per_thread_memory_limit=worker_memory,
        wallclock_time_limit="05:59")

    # Set up extra commands so that each worker has the correct environment
    # set up.
    worker_script_commands = [
        # Load in the correct conda environment.
        f'conda activate {conda_environment}',
        # Load in CUDA
        f'module load cuda/9.2'
    ]

    calculation_backend = DaskLSFBackend(
        minimum_number_of_workers=1,
        maximum_number_of_workers=max_number_of_workers,
        resources_per_worker=queue_resources,
        queue_name='gpuqueue',
        setup_script_commands=worker_script_commands,
        adaptive_interval='1000ms')

    # Set up a backend to cache simulation data in.
    storage_backend = LocalFileStorage(storage_directory)

    # Spin up the server object.
    PropertyEstimatorServer(calculation_backend=calculation_backend,
                            storage_backend=storage_backend,
                            port=port,
                            working_directory=working_directory)
コード例 #2
0
def test_dask_lsf_creation():
    """Test creating and starting a new dask LSF backend."""

    cpu_backend = DaskLSFBackend()
    cpu_backend.start()
    cpu_backend.stop()

    gpu_resources = QueueWorkerResources(
        1, 1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA)

    gpu_commands = [
        'module load cuda/9.2',
    ]

    gpu_backend = DaskLSFBackend(resources_per_worker=gpu_resources,
                                 queue_name='gpuqueue',
                                 setup_script_commands=gpu_commands)

    gpu_backend.start()
    gpu_backend.stop()
コード例 #3
0
def test_dask_jobqueue_backend_creation(cluster_class):
    """Test creating and starting a new dask jobqueue backend."""

    cpu_backend = cluster_class()
    cpu_backend.start()
    cpu_backend.stop()

    gpu_resources = QueueWorkerResources(
        1, 1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA)

    gpu_commands = [
        'module load cuda/9.2',
    ]

    gpu_backend = cluster_class(resources_per_worker=gpu_resources,
                                queue_name='gpuqueue',
                                setup_script_commands=gpu_commands)

    gpu_backend.start()
    assert 'module load cuda/9.2' in gpu_backend.job_script()
    gpu_backend.stop()
コード例 #4
0
def test_lsf_wrapped_function():

    available_resources = QueueWorkerResources()

    protocols_to_import = [
        protocol_class.__module__ + '.' + protocol_class.__qualname__
        for protocol_class in available_protocols.values()
    ]

    per_worker_logging = True

    gpu_assignments = None

    expected_output = 12345

    result = DaskLSFBackend._wrapped_function(
        dummy_function,
        expected_output,
        available_resources=available_resources,
        available_protocols=protocols_to_import,
        per_worker_logging=per_worker_logging,
        gpu_assignments=gpu_assignments)

    assert expected_output == result
コード例 #5
0
def setup_server(backend_type=BackendType.LocalCPU,
                 max_number_of_workers=1,
                 conda_environment='propertyestimator',
                 worker_memory=8 * unit.gigabyte,
                 port=8000):

    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if os.path.isdir(working_directory):
        shutil.rmtree(working_directory)

    calculation_backend = None

    if backend_type == BackendType.LocalCPU:
        calculation_backend = DaskLocalCluster(number_of_workers=1)

    elif backend_type == BackendType.LocalGPU:

        calculation_backend = DaskLocalCluster(
            number_of_workers=1,
            resources_per_worker=ComputeResources(
                1, 1, ComputeResources.GPUToolkit.CUDA))

    elif backend_type == BackendType.GPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            number_of_gpus=1,
            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="05:59")

        worker_script_commands = [
            f'export OE_LICENSE="/home/boothros/oe_license.txt"',
            f'. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            f'conda activate {conda_environment}', f'module load cuda/9.2'
        ]

        extra_script_options = ['-m "ls-gpu lt-gpu"']

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='gpuqueue',
            setup_script_commands=worker_script_commands,
            extra_script_options=extra_script_options,
            adaptive_interval='1000ms')
    elif backend_type == BackendType.CPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="01:30")

        worker_script_commands = [
            f'export OE_LICENSE="/home/boothros/oe_license.txt"',
            f'. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            f'conda activate {conda_environment}',
        ]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='cpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')

    storage_backend = LocalFileStorage(storage_directory)

    server.PropertyEstimatorServer(calculation_backend=calculation_backend,
                                   storage_backend=storage_backend,
                                   port=port,
                                   working_directory=working_directory)
コード例 #6
0
def main():

    setup_timestamp_logging()

    # Load in the force field
    force_field_path = 'smirnoff99Frosst-1.1.0.offxml'
    force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)

    # Create a data set containing three solvation free energies.
    data_set = _create_data_set()

    # Set up the compute backend which will run the calculations.
    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    queue_resources = QueueWorkerResources(
        number_of_threads=1,
        number_of_gpus=1,
        preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
        per_thread_memory_limit=5 * unit.gigabyte,
        wallclock_time_limit="05:59")

    worker_script_commands = [
        'conda activate propertyestimator', 'module load cuda/10.1'
    ]

    calculation_backend = DaskLSFBackend(
        minimum_number_of_workers=1,
        maximum_number_of_workers=3,
        resources_per_worker=queue_resources,
        queue_name='gpuqueue',
        setup_script_commands=worker_script_commands,
        adaptive_interval='1000ms',
        adaptive_class=CustomAdaptive)

    # Set up a backend to cache simulation data in.
    storage_backend = LocalFileStorage(storage_directory)

    # Spin up the server object.
    PropertyEstimatorServer(calculation_backend=calculation_backend,
                            storage_backend=storage_backend,
                            port=8005,
                            working_directory=working_directory)

    # Request the estimates.
    property_estimator = client.PropertyEstimatorClient(
        client.ConnectionOptions(server_port=8005))

    options = PropertyEstimatorOptions()
    options.allowed_calculation_layers = ['SimulationLayer']

    workflow_options = WorkflowOptions(
        WorkflowOptions.ConvergenceMode.NoChecks)

    options.workflow_options = {
        'SolvationFreeEnergy': {
            'SimulationLayer': workflow_options
        }
    }
    options.workflow_schemas = {
        'SolvationFreeEnergy': {
            'SimulationLayer': _get_fixed_lambda_schema(workflow_options)
        }
    }

    request = property_estimator.request_estimate(
        property_set=data_set,
        force_field_source=force_field_source,
        options=options)

    # Wait for the results.
    results = request.results(True, 5)

    # Save the result to file.
    with open('solvation_free_energy_simulation.json', 'wb') as file:

        json_results = json.dumps(results,
                                  sort_keys=True,
                                  indent=2,
                                  separators=(',', ': '),
                                  cls=TypedJSONEncoder)

        file.write(json_results.encode('utf-8'))
コード例 #7
0
def setup_server(backend_type=BackendType.LocalCPU, max_number_of_workers=1):
    """Creates a new estimation server object.

    Parameters
    ----------
    backend_type: BackendType
        The type of backend to use.
    max_number_of_workers: int
        The maximum number of compute workers to spin up.

    Returns
    -------
    PropertyEstimatorServer
        The server object.
    """

    # Set the name of the directory in which all temporary files
    # will be generated.
    working_directory = 'working_directory'

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set the name of the directory in which all cached simulation data
    # will be stored..
    storage_directory = 'storage_directory'

    # Set up the backend which will perform any calculations.
    calculation_backend = None

    if backend_type == BackendType.LocalCPU:

        # A backend which will run all calculations on the local machines CPUs.
        calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers)

    if backend_type == BackendType.LocalGPU:

        # A backend which will run all calculations on the local machines GPUs.
        compute_resources = ComputeResources(number_of_threads=1, number_of_gpus=1)

        calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers,
                                               resources_per_worker=compute_resources)

    elif backend_type == BackendType.LilacGPU:

        # A backend which will run all calculations on the MSKCC `lilac` cluster, taking
        # advantage of the available GPUs.
        queue_resources = QueueWorkerResources(number_of_threads=1,
                                               number_of_gpus=1,
                                               preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
                                               per_thread_memory_limit=5 * unit.gigabyte,
                                               wallclock_time_limit="05:59")

        extra_script_options = [
            '-m "ls-gpu lt-gpu"'
        ]

        worker_script_commands = [
            'export OE_LICENSE="/home/boothros/oe_license.txt"',
            '. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            'conda activate forcebalance',
            'module load cuda/9.2'
        ]

        calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,
                                             maximum_number_of_workers=max_number_of_workers,
                                             resources_per_worker=queue_resources,
                                             queue_name='gpuqueue',
                                             setup_script_commands=worker_script_commands,
                                             extra_script_options=extra_script_options,
                                             adaptive_interval='1000ms')

    elif backend_type == BackendType.LilacCPU:

        # A backend which will run all calculations on the MSKCC `lilac` cluster using onlu
        # CPUs.
        queue_resources = QueueWorkerResources(number_of_threads=1,
                                               per_thread_memory_limit=5 * unit.gigabyte,
                                               wallclock_time_limit="01:30")

        worker_script_commands = [
            'export OE_LICENSE="/home/boothros/oe_license.txt"',
            '. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            'conda activate forcebalance',
        ]

        calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,
                                             maximum_number_of_workers=max_number_of_workers,
                                             resources_per_worker=queue_resources,
                                             queue_name='cpuqueue',
                                             setup_script_commands=worker_script_commands,
                                             adaptive_interval='1000ms')

    # Set up the storage backend.
    storage_backend = LocalFileStorage(storage_directory)

    # Set up the server itself.
    server = PropertyEstimatorServer(calculation_backend=calculation_backend,
                                     storage_backend=storage_backend,
                                     working_directory=working_directory)

    return server
コード例 #8
0
def setup_server(backend_type=BackendType.LocalCPU,
                 max_number_of_workers=1,
                 conda_environment='propertyestimator',
                 worker_memory=4 * unit.gigabyte,
                 port=8000,
                 cuda_version='10.1'):
    """A convenience function to sets up an estimation server which will can advantage
    of different compute backends.

    Parameters
    ----------
    backend_type: BackendType
        The type of compute backend to use.
    max_number_of_workers: int
        The maximum number of workers to adaptively insert into
        the queuing system.
    conda_environment: str
        The name of the conda environment in which the propertyestimator
        package is installed.
    worker_memory: Quantity
        The maximum amount of memory to request per worker.
    port: int
        The port that the server should listen for estimation requests on.
    cuda_version: str
        The version of CUDA to use if running on a backend which supports
        GPUs.
    """

    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if os.path.isdir(working_directory):
        shutil.rmtree(working_directory)

    calculation_backend = None

    if backend_type == BackendType.LocalCPU:
        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers)

    elif backend_type == BackendType.LocalGPU:

        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers,
            resources_per_worker=ComputeResources(
                1, 1, ComputeResources.GPUToolkit.CUDA))

    elif backend_type == BackendType.GPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            number_of_gpus=1,
            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="05:59")

        worker_script_commands = [
            f'conda activate {conda_environment}',
            f'module load cuda/{cuda_version}'
        ]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='gpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')
    elif backend_type == BackendType.CPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="01:30")

        worker_script_commands = [f'conda activate {conda_environment}']

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='cpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')

    # Set up a backend to cache simulation data in.
    storage_backend = LocalFileStorage(storage_directory)

    # Spin up the server object.
    server.PropertyEstimatorServer(calculation_backend=calculation_backend,
                                   storage_backend=storage_backend,
                                   port=port,
                                   working_directory=working_directory)