def test_submission(): with tempfile.TemporaryDirectory() as directory: with temporarily_change_directory(directory): with DaskLocalCluster() as calculation_backend: # Spin up a server instance. server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=directory, ) with server: # Connect a client. client = EvaluatorClient() # Submit an empty data set. force_field_path = "smirnoff99Frosst-1.1.0.offxml" force_field_source = SmirnoffForceFieldSource.from_path( force_field_path ) request, error = client.request_estimate( PhysicalPropertyDataSet(), force_field_source ) assert error is None assert isinstance(request, Request) result, error = request.results(polling_interval=0.01) assert error is None assert isinstance(result, RequestResult)
def main(): setup_timestamp_logging() # Load in the force field force_field_path = "openff-1.0.0-refit.offxml" force_field_source = SmirnoffForceFieldSource.from_path(force_field_path) # Load in the test set. data_set = PhysicalPropertyDataSet.from_json("full_set.json") # Set up a server object to run the calculations using. working_directory = "working_directory" # Set up a backend to run the calculations on. This assume running # on a HPC resources with the LSF queue system installed. queue_resources = QueueWorkerResources( number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=5 * unit.gigabyte, wallclock_time_limit="05:59", ) worker_script_commands = [ "conda activate forcebalance", "module load cuda/10.1" ] calculation_backend = DaskLSFBackend( minimum_number_of_workers=1, maximum_number_of_workers=50, resources_per_worker=queue_resources, queue_name="gpuqueue", setup_script_commands=worker_script_commands, adaptive_interval="1000ms", ) with calculation_backend: server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=working_directory, port=8002, ) with server: # Request the estimates. client = EvaluatorClient(ConnectionOptions(server_port=8002)) request, _ = client.request_estimate( property_set=data_set, force_field_source=force_field_source, ) # Wait for the results. results, _ = request.results(True, 5) results.json(f"results.json")
def main(): setup_timestamp_logging() working_directory = "working_directory" # Remove any existing data. if path.isdir(working_directory): shutil.rmtree(working_directory) # Set up a backend to run the calculations on. This assume running # on a HPC resources with the LSF queue system installed. queue_resources = QueueWorkerResources( number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=5 * unit.gigabyte, wallclock_time_limit="05:59", ) worker_script_commands = [ "conda activate forcebalance", "module load cuda/10.1" ] calculation_backend = DaskLSFBackend( minimum_number_of_workers=1, maximum_number_of_workers=14, resources_per_worker=queue_resources, queue_name="gpuqueue", setup_script_commands=worker_script_commands, adaptive_interval="1000ms", ) with calculation_backend: server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=working_directory, port=8000, ) # Tell the server to start listening for estimation requests. server.start()