Пример #1
0
def test_nested_input():

    dummy_schema = WorkflowSchema()

    dict_protocol = DummyInputOutputProtocol('dict_protocol')
    dict_protocol.input_value = {'a': ThermodynamicState(temperature=1*unit.kelvin)}
    dummy_schema.protocols[dict_protocol.id] = dict_protocol.schema

    quantity_protocol = DummyInputOutputProtocol('quantity_protocol')
    quantity_protocol.input_value = ProtocolPath('output_value[a].temperature', dict_protocol.id)
    dummy_schema.protocols[quantity_protocol.id] = quantity_protocol.schema

    dummy_schema.validate_interfaces()

    dummy_property = create_dummy_property(Density)

    dummy_workflow = Workflow(dummy_property, {})
    dummy_workflow.schema = dummy_schema

    with tempfile.TemporaryDirectory() as temporary_directory:

        workflow_graph = WorkflowGraph(temporary_directory)
        workflow_graph.add_workflow(dummy_workflow)

        dask_local_backend = DaskLocalCluster(1, ComputeResources(1))
        dask_local_backend.start()

        results_futures = workflow_graph.submit(dask_local_backend)

        assert len(results_futures) == 1

        result = results_futures[0].result()
        assert isinstance(result, CalculationLayerResult)
Пример #2
0
def test_simple_workflow_graph_with_groups():
    dummy_schema = WorkflowSchema()

    dummy_protocol_a = DummyInputOutputProtocol('protocol_a')
    dummy_protocol_a.input_value = EstimatedQuantity(1 * unit.kelvin, 0.1 * unit.kelvin, 'dummy_source')

    dummy_protocol_b = DummyInputOutputProtocol('protocol_b')
    dummy_protocol_b.input_value = ProtocolPath('output_value', dummy_protocol_a.id)

    conditional_group = ConditionalGroup('conditional_group')
    conditional_group.add_protocols(dummy_protocol_a, dummy_protocol_b)

    condition = ConditionalGroup.Condition()
    condition.right_hand_value = 2*unit.kelvin
    condition.type = ConditionalGroup.ConditionType.LessThan

    condition.left_hand_value = ProtocolPath('output_value.value', conditional_group.id,
                                                                   dummy_protocol_b.id)

    conditional_group.add_condition(condition)

    dummy_schema.protocols[conditional_group.id] = conditional_group.schema

    dummy_schema.final_value_source = ProtocolPath('output_value', conditional_group.id,
                                                                   dummy_protocol_b.id)

    dummy_schema.validate_interfaces()

    dummy_property = create_dummy_property(Density)

    dummy_workflow = Workflow(dummy_property, {})
    dummy_workflow.schema = dummy_schema

    with tempfile.TemporaryDirectory() as temporary_directory:

        workflow_graph = WorkflowGraph(temporary_directory)
        workflow_graph.add_workflow(dummy_workflow)

        dask_local_backend = DaskLocalCluster(1, ComputeResources(1))
        dask_local_backend.start()

        results_futures = workflow_graph.submit(dask_local_backend)

        assert len(results_futures) == 1

        result = results_futures[0].result()
        assert isinstance(result, CalculationLayerResult)
        assert result.calculated_property.value == 1 * unit.kelvin
Пример #3
0
def test_base_layer():

    properties_to_estimate = [
        create_dummy_property(Density),
        create_dummy_property(Density)
    ]

    dummy_options = PropertyEstimatorOptions()

    request = PropertyEstimatorServer.ServerEstimationRequest(
        estimation_id=str(uuid.uuid4()),
        queued_properties=properties_to_estimate,
        options=dummy_options,
        force_field_id='')

    with tempfile.TemporaryDirectory() as temporary_directory:

        with temporarily_change_directory(temporary_directory):

            # Create a simple calculation backend to test with.
            test_backend = DaskLocalCluster()
            test_backend.start()

            # Create a simple storage backend to test with.
            test_storage = LocalFileStorage()

            layer_directory = 'dummy_layer'
            makedirs(layer_directory)

            def dummy_callback(returned_request):

                assert len(returned_request.estimated_properties) == 1
                assert len(returned_request.exceptions) == 2

            dummy_layer = DummyCalculationLayer()

            dummy_layer.schedule_calculation(test_backend, test_storage,
                                             layer_directory, request,
                                             dummy_callback, True)
Пример #4
0
def main(n_workers, cpus_per_worker, gpus_per_worker):

    if n_workers <= 0:
        raise ValueError('The number of workers must be greater than 0')
    if cpus_per_worker <= 0:
        raise ValueError('The number of CPU\'s per worker must be greater than 0')
    if gpus_per_worker < 0:
        raise ValueError('The number of GPU\'s per worker must be greater than or equal to 0')
    if 0 < gpus_per_worker != cpus_per_worker:
        raise ValueError('The number of GPU\'s per worker must match the number of '
                         'CPU\'s per worker.')

    # Set up logging for the propertyestimator.
    setup_timestamp_logging()
    logger = logging.getLogger()

    # Set up the directory structure.
    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set up a backend to run the calculations on with the requested resources.
    if gpus_per_worker <= 0:
        worker_resources = ComputeResources(number_of_threads=cpus_per_worker)
    else:
        worker_resources = ComputeResources(number_of_threads=cpus_per_worker,
                                            number_of_gpus=gpus_per_worker,
                                            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA)

    calculation_backend = DaskLocalCluster(number_of_workers=n_workers,
                                           resources_per_worker=worker_resources)

    # Create a backend to cache simulation files.
    storage_backend = LocalFileStorage(storage_directory)

    # Create an estimation server which will run the calculations.
    logger.info(f'Starting the server with {n_workers} workers, each with '
                f'{cpus_per_worker} CPUs and {gpus_per_worker} GPUs.')

    server = PropertyEstimatorServer(calculation_backend=calculation_backend,
                                     storage_backend=storage_backend,
                                     working_directory=working_directory,
                                     port=8000)

    # Tell the server to start listening for estimation requests.
    server.start_listening_loop()
Пример #5
0
def setup_server(backend_type=BackendType.LocalCPU,
                 max_number_of_workers=1,
                 conda_environment='propertyestimator',
                 worker_memory=8 * unit.gigabyte,
                 port=8000):

    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if os.path.isdir(working_directory):
        shutil.rmtree(working_directory)

    calculation_backend = None

    if backend_type == BackendType.LocalCPU:
        calculation_backend = DaskLocalCluster(number_of_workers=1)

    elif backend_type == BackendType.LocalGPU:

        calculation_backend = DaskLocalCluster(
            number_of_workers=1,
            resources_per_worker=ComputeResources(
                1, 1, ComputeResources.GPUToolkit.CUDA))

    elif backend_type == BackendType.GPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            number_of_gpus=1,
            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="05:59")

        worker_script_commands = [
            f'export OE_LICENSE="/home/boothros/oe_license.txt"',
            f'. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            f'conda activate {conda_environment}', f'module load cuda/9.2'
        ]

        extra_script_options = ['-m "ls-gpu lt-gpu"']

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='gpuqueue',
            setup_script_commands=worker_script_commands,
            extra_script_options=extra_script_options,
            adaptive_interval='1000ms')
    elif backend_type == BackendType.CPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="01:30")

        worker_script_commands = [
            f'export OE_LICENSE="/home/boothros/oe_license.txt"',
            f'. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            f'conda activate {conda_environment}',
        ]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='cpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')

    storage_backend = LocalFileStorage(storage_directory)

    server.PropertyEstimatorServer(calculation_backend=calculation_backend,
                                   storage_backend=storage_backend,
                                   port=port,
                                   working_directory=working_directory)
Пример #6
0
def setup_server(backend_type=BackendType.LocalCPU, max_number_of_workers=1):
    """Creates a new estimation server object.

    Parameters
    ----------
    backend_type: BackendType
        The type of backend to use.
    max_number_of_workers: int
        The maximum number of compute workers to spin up.

    Returns
    -------
    PropertyEstimatorServer
        The server object.
    """

    # Set the name of the directory in which all temporary files
    # will be generated.
    working_directory = 'working_directory'

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set the name of the directory in which all cached simulation data
    # will be stored..
    storage_directory = 'storage_directory'

    # Set up the backend which will perform any calculations.
    calculation_backend = None

    if backend_type == BackendType.LocalCPU:

        # A backend which will run all calculations on the local machines CPUs.
        calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers)

    if backend_type == BackendType.LocalGPU:

        # A backend which will run all calculations on the local machines GPUs.
        compute_resources = ComputeResources(number_of_threads=1, number_of_gpus=1)

        calculation_backend = DaskLocalCluster(number_of_workers=max_number_of_workers,
                                               resources_per_worker=compute_resources)

    elif backend_type == BackendType.LilacGPU:

        # A backend which will run all calculations on the MSKCC `lilac` cluster, taking
        # advantage of the available GPUs.
        queue_resources = QueueWorkerResources(number_of_threads=1,
                                               number_of_gpus=1,
                                               preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
                                               per_thread_memory_limit=5 * unit.gigabyte,
                                               wallclock_time_limit="05:59")

        extra_script_options = [
            '-m "ls-gpu lt-gpu"'
        ]

        worker_script_commands = [
            'export OE_LICENSE="/home/boothros/oe_license.txt"',
            '. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            'conda activate forcebalance',
            'module load cuda/9.2'
        ]

        calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,
                                             maximum_number_of_workers=max_number_of_workers,
                                             resources_per_worker=queue_resources,
                                             queue_name='gpuqueue',
                                             setup_script_commands=worker_script_commands,
                                             extra_script_options=extra_script_options,
                                             adaptive_interval='1000ms')

    elif backend_type == BackendType.LilacCPU:

        # A backend which will run all calculations on the MSKCC `lilac` cluster using onlu
        # CPUs.
        queue_resources = QueueWorkerResources(number_of_threads=1,
                                               per_thread_memory_limit=5 * unit.gigabyte,
                                               wallclock_time_limit="01:30")

        worker_script_commands = [
            'export OE_LICENSE="/home/boothros/oe_license.txt"',
            '. /home/boothros/miniconda3/etc/profile.d/conda.sh',
            'conda activate forcebalance',
        ]

        calculation_backend = DaskLSFBackend(minimum_number_of_workers=1,
                                             maximum_number_of_workers=max_number_of_workers,
                                             resources_per_worker=queue_resources,
                                             queue_name='cpuqueue',
                                             setup_script_commands=worker_script_commands,
                                             adaptive_interval='1000ms')

    # Set up the storage backend.
    storage_backend = LocalFileStorage(storage_directory)

    # Set up the server itself.
    server = PropertyEstimatorServer(calculation_backend=calculation_backend,
                                     storage_backend=storage_backend,
                                     working_directory=working_directory)

    return server
Пример #7
0
def setup_server(backend_type=BackendType.LocalCPU,
                 max_number_of_workers=1,
                 conda_environment='propertyestimator',
                 worker_memory=4 * unit.gigabyte,
                 port=8000,
                 cuda_version='10.1'):
    """A convenience function to sets up an estimation server which will can advantage
    of different compute backends.

    Parameters
    ----------
    backend_type: BackendType
        The type of compute backend to use.
    max_number_of_workers: int
        The maximum number of workers to adaptively insert into
        the queuing system.
    conda_environment: str
        The name of the conda environment in which the propertyestimator
        package is installed.
    worker_memory: Quantity
        The maximum amount of memory to request per worker.
    port: int
        The port that the server should listen for estimation requests on.
    cuda_version: str
        The version of CUDA to use if running on a backend which supports
        GPUs.
    """

    working_directory = 'working_directory'
    storage_directory = 'storage_directory'

    # Remove any existing data.
    if os.path.isdir(working_directory):
        shutil.rmtree(working_directory)

    calculation_backend = None

    if backend_type == BackendType.LocalCPU:
        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers)

    elif backend_type == BackendType.LocalGPU:

        calculation_backend = DaskLocalCluster(
            number_of_workers=max_number_of_workers,
            resources_per_worker=ComputeResources(
                1, 1, ComputeResources.GPUToolkit.CUDA))

    elif backend_type == BackendType.GPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            number_of_gpus=1,
            preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="05:59")

        worker_script_commands = [
            f'conda activate {conda_environment}',
            f'module load cuda/{cuda_version}'
        ]

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='gpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')
    elif backend_type == BackendType.CPU:

        queue_resources = QueueWorkerResources(
            number_of_threads=1,
            per_thread_memory_limit=worker_memory,
            wallclock_time_limit="01:30")

        worker_script_commands = [f'conda activate {conda_environment}']

        calculation_backend = DaskLSFBackend(
            minimum_number_of_workers=1,
            maximum_number_of_workers=max_number_of_workers,
            resources_per_worker=queue_resources,
            queue_name='cpuqueue',
            setup_script_commands=worker_script_commands,
            adaptive_interval='1000ms')

    # Set up a backend to cache simulation data in.
    storage_backend = LocalFileStorage(storage_directory)

    # Spin up the server object.
    server.PropertyEstimatorServer(calculation_backend=calculation_backend,
                                   storage_backend=storage_backend,
                                   port=port,
                                   working_directory=working_directory)