Пример #1
0
def main():

    # Set up logging for the evaluator.
    setup_timestamp_logging()

    # Set up the directory structure.
    working_directory = "working_directory"

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set up a backend to run the calculations on with the requested resources.
    worker_resources = ComputeResources(number_of_threads=1)

    calculation_backend = DaskLocalCluster(
        number_of_workers=1, resources_per_worker=worker_resources)

    with calculation_backend:

        server = EvaluatorServer(
            calculation_backend=calculation_backend,
            working_directory=working_directory,
            port=8000,
        )

        # Tell the server to start listening for estimation requests.
        server.start()
Пример #2
0
def main(n_workers, cpus_per_worker, gpus_per_worker):

    if n_workers <= 0:
        raise ValueError("The number of workers must be greater than 0")
    if cpus_per_worker <= 0:
        raise ValueError("The number of CPU's per worker must be greater than 0")
    if gpus_per_worker < 0:

        raise ValueError(
            "The number of GPU's per worker must be greater than or equal to 0"
        )
    if 0 < gpus_per_worker != cpus_per_worker:

        raise ValueError(
            "The number of GPU's per worker must match the number of "
            "CPU's per worker."
        )

    # Set up logging for the evaluator.
    setup_timestamp_logging()
    logger = logging.getLogger()

    # Set up the directory structure.
    working_directory = "working_directory"

    # Remove any existing data.
    if path.isdir(working_directory):
        shutil.rmtree(working_directory)

    # Set up a backend to run the calculations on with the requested resources.
    if gpus_per_worker <= 0:
        worker_resources = ComputeResources(number_of_threads=cpus_per_worker)
    else:
        worker_resources = ComputeResources(
            number_of_threads=cpus_per_worker,
            number_of_gpus=gpus_per_worker,
            preferred_gpu_toolkit=ComputeResources.GPUToolkit.CUDA,
        )

    calculation_backend = DaskLocalCluster(
        number_of_workers=n_workers, resources_per_worker=worker_resources
    )

    # Create an estimation server which will run the calculations.
    logger.info(
        f"Starting the server with {n_workers} workers, each with "
        f"{cpus_per_worker} CPUs and {gpus_per_worker} GPUs."
    )

    with calculation_backend:

        server = EvaluatorServer(
            calculation_backend=calculation_backend,
            working_directory=working_directory,
            port=8000,
        )

        # Tell the server to start listening for estimation requests.
        server.start()
Пример #3
0
def main():

    setup_timestamp_logging()

    # Load in the force field
    force_field_path = "smirnoff99Frosst-1.1.0.offxml"
    force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)

    # Load in the data set containing the pure and binary properties.
    data_set = PhysicalPropertyDataSet.from_json("pure_data_set.json")
    data_set.merge(PhysicalPropertyDataSet.from_json("binary_data_set.json"))

    # Set up a server object to run the calculations using.
    server = setup_server(backend_type=BackendType.LocalGPU,
                          max_number_of_workers=1,
                          port=8001)

    with server:

        # Request the estimates.
        property_estimator = EvaluatorClient(
            ConnectionOptions(server_port=8001))

        for calculation_layer in ["SimulationLayer", "ReweightingLayer"]:

            options = RequestOptions()
            options.calculation_layers = [calculation_layer]

            parameter_gradient_keys = [
                ParameterGradientKey(tag="vdW",
                                     smirks="[#6X4:1]",
                                     attribute="epsilon"),
                ParameterGradientKey(tag="vdW",
                                     smirks="[#6X4:1]",
                                     attribute="rmin_half"),
            ]

            request, _ = property_estimator.request_estimate(
                property_set=data_set,
                force_field_source=force_field_source,
                options=options,
                parameter_gradient_keys=parameter_gradient_keys,
            )

            # Wait for the results.
            results, _ = request.results(True, 5)

            layer_name = re.sub(r"(?<!^)(?=[A-Z])", "_",
                                calculation_layer).lower()
            results.json(f"pure_binary_{layer_name}.json", True)
Пример #4
0
def main():

    setup_timestamp_logging()

    # Load in the force field
    force_field_path = "smirnoff99Frosst-1.1.0.offxml"
    force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)

    # Create a data set containing three solvation free energies.
    data_set = PhysicalPropertyDataSet.from_json("hydration_data_set.json")
    data_set.json("hydration_data_set.json", format=True)

    # Set up a server object to run the calculations using.
    server = setup_server(backend_type=BackendType.LocalGPU,
                          max_number_of_workers=1,
                          port=8002)

    with server:

        # Request the estimates.
        property_estimator = EvaluatorClient(
            ConnectionOptions(server_port=8002))

        options = RequestOptions()
        options.calculation_layers = ["SimulationLayer"]
        options.add_schema("SimulationLayer", "SolvationFreeEnergy",
                           _get_fixed_lambda_schema())

        request, _ = property_estimator.request_estimate(
            property_set=data_set,
            force_field_source=force_field_source,
            options=options,
        )

        # Wait for the results.
        results, _ = request.results(True, 60)

        # Save the result to file.
        results.json("results.json", True)
Пример #5
0
def main():

    setup_timestamp_logging()

    # Retrieve the current version.
    version = evaluator.__version__.replace(".", "-").replace("v", "")

    if "+" in version:
        version = "latest"

    # Create a new directory to run the current versions results in.
    os.makedirs(os.path.join(version, "results"))

    with temporarily_change_directory(version):

        # Load in the force field
        force_field = ForceField(
            "openff-1.2.0.offxml",
            get_data_filename("forcefield/tip3p.offxml"),
        )

        force_field_source = SmirnoffForceFieldSource.from_object(force_field)
        force_field_source.json("force-field.json")

        # Load in the data set, retaining only a specific host / guest pair.
        binding_affinity = TaproomDataSet(
            host_codes=["acd"],
            guest_codes=["bam"],
            default_ionic_strength=150 * unit.millimolar,
        ).properties[0]

        # Set up the calculation
        schema = HostGuestBindingAffinity.default_paprika_schema(
            n_solvent_molecules=2000).workflow_schema
        schema.replace_protocol_types({
            "BaseBuildSystem": ("BuildSmirnoffSystem" if isinstance(
                force_field_source, SmirnoffForceFieldSource) else
                                "BuildTLeapSystem" if isinstance(
                                    force_field_source, TLeapForceFieldSource)
                                else "BaseBuildSystem")
        })

        metadata = Workflow.generate_default_metadata(binding_affinity,
                                                      "force-field.json",
                                                      UNDEFINED)

        workflow = Workflow.from_schema(schema, metadata, "acd_bam")

        # Run the calculation
        with DaskLSFBackend(
                minimum_number_of_workers=1,
                maximum_number_of_workers=50,
                resources_per_worker=QueueWorkerResources(
                    number_of_gpus=1,
                    preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
                    per_thread_memory_limit=5 * unit.gigabyte,
                    wallclock_time_limit="05:59",
                ),
                setup_script_commands=[
                    "conda activate openff-evaluator-paprika",
                    "module load cuda/10.0",
                ],
                queue_name="gpuqueue",
        ) as calculation_backend:

            results = workflow.execute(
                root_directory="workflow",
                calculation_backend=calculation_backend).result()

        # Save the results
        results.json("results.json", format=True)
Пример #6
0
def main():

    setup_timestamp_logging()

    # Retrieve the current version.
    version = evaluator.__version__.replace(".", "-").replace("v", "")

    if "+" in version:
        version = "latest"

    # Create a new directory to run the current versions results in.
    os.makedirs(os.path.join(version, "results"))

    with temporarily_change_directory(version):

        with DaskLSFBackend(
                minimum_number_of_workers=1,
                maximum_number_of_workers=12,
                resources_per_worker=QueueWorkerResources(
                    number_of_gpus=1,
                    preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
                    per_thread_memory_limit=5 * unit.gigabyte,
                    wallclock_time_limit="05:59",
                ),
                setup_script_commands=[
                    f"conda activate openff-evaluator-{version}",
                    "module load cuda/10.0",
                ],
                queue_name="gpuqueue",
        ) as calculation_backend:

            with EvaluatorServer(
                    calculation_backend,
                    working_directory="outputs",
                    storage_backend=LocalFileStorage("cached-data"),
            ):

                client = EvaluatorClient()

                for allowed_layer in ["SimulationLayer", "ReweightingLayer"]:

                    data_set = define_data_set(
                        allowed_layer == "ReweightingLayer")

                    options = RequestOptions()
                    options.calculation_layers = [allowed_layer]
                    options.calculation_schemas = {
                        property_type: {}
                        for property_type in data_set.property_types
                    }

                    if allowed_layer == "SimulationLayer":

                        options.add_schema(
                            "SimulationLayer",
                            "SolvationFreeEnergy",
                            solvation_free_energy_schema(),
                        )

                    request, _ = client.request_estimate(
                        data_set,
                        ForceField("openff-1.2.0.offxml"),
                        options,
                        parameter_gradient_keys=[
                            ParameterGradientKey("vdW", smirks, attribute)
                            for smirks in [
                                "[#1:1]-[#6X4]",
                                "[#1:1]-[#6X4]-[#7,#8,#9,#16,#17,#35]",
                                "[#1:1]-[#8]",
                                "[#6X4:1]",
                                "[#8X2H1+0:1]",
                                "[#1]-[#8X2H2+0:1]-[#1]",
                            ] for attribute in ["epsilon", "rmin_half"]
                        ],
                    )

                    results, _ = request.results(synchronous=True,
                                                 polling_interval=60)
                    results.json(
                        os.path.join("results", f"{allowed_layer}.json"))