def test_solvate_existing_structure_protocol(): """Tests solvating a single methanol molecule in water.""" import mdtraj methanol_component = Component("CO") methanol_substance = Substance() methanol_substance.add_component(methanol_component, ExactAmount(1)) water_substance = Substance() water_substance.add_component(Component("O"), MoleFraction(1.0)) with tempfile.TemporaryDirectory() as temporary_directory: build_methanol_coordinates = BuildCoordinatesPackmol("build_methanol") build_methanol_coordinates.max_molecules = 1 build_methanol_coordinates.substance = methanol_substance build_methanol_coordinates.execute(temporary_directory, ComputeResources()) methanol_residue_name = build_methanol_coordinates.assigned_residue_names[ methanol_component.identifier] solvate_coordinates = SolvateExistingStructure("solvate_methanol") solvate_coordinates.max_molecules = 9 solvate_coordinates.substance = water_substance solvate_coordinates.solute_coordinate_file = ( build_methanol_coordinates.coordinate_file_path) solvate_coordinates.execute(temporary_directory, ComputeResources()) solvated_system = mdtraj.load_pdb( solvate_coordinates.coordinate_file_path) assert solvated_system.n_residues == 10 assert solvated_system.top.residue(0).name == methanol_residue_name
def main(n_workers, cpus_per_worker, gpus_per_worker): if n_workers <= 0: raise ValueError("The number of workers must be greater than 0") if cpus_per_worker <= 0: raise ValueError("The number of CPU's per worker must be greater than 0") if gpus_per_worker < 0: raise ValueError( "The number of GPU's per worker must be greater than or equal to 0" ) if 0 < gpus_per_worker != cpus_per_worker: raise ValueError( "The number of GPU's per worker must match the number of " "CPU's per worker." ) # Set up logging for the evaluator. setup_timestamp_logging() logger = logging.getLogger() # Set up the directory structure. working_directory = "working_directory" # Remove any existing data. if path.isdir(working_directory): shutil.rmtree(working_directory) # Set up a backend to run the calculations on with the requested resources. if gpus_per_worker <= 0: worker_resources = ComputeResources(number_of_threads=cpus_per_worker) else: worker_resources = ComputeResources( number_of_threads=cpus_per_worker, number_of_gpus=gpus_per_worker, preferred_gpu_toolkit=ComputeResources.GPUToolkit.CUDA, ) calculation_backend = DaskLocalCluster( number_of_workers=n_workers, resources_per_worker=worker_resources ) # Create an estimation server which will run the calculations. logger.info( f"Starting the server with {n_workers} workers, each with " f"{cpus_per_worker} CPUs and {gpus_per_worker} GPUs." ) with calculation_backend: server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=working_directory, port=8000, ) # Tell the server to start listening for estimation requests. server.start()
def test_run_openmm_simulation_checkpoints(): import mdtraj thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1.0 * unit.atmosphere) with tempfile.TemporaryDirectory() as directory: coordinate_path, parameterized_system = _setup_dummy_system(directory) # Check that executing twice doesn't run the simulation twice npt_equilibration = OpenMMSimulation("npt_equilibration") npt_equilibration.total_number_of_iterations = 1 npt_equilibration.steps_per_iteration = 4 npt_equilibration.output_frequency = 1 npt_equilibration.thermodynamic_state = thermodynamic_state npt_equilibration.input_coordinate_file = coordinate_path npt_equilibration.parameterized_system = parameterized_system npt_equilibration.execute(directory, ComputeResources()) assert os.path.isfile(npt_equilibration._checkpoint_path) npt_equilibration.execute(directory, ComputeResources()) assert len(npt_equilibration.observables) == 4 assert (len( mdtraj.load(npt_equilibration.trajectory_file_path, top=coordinate_path)) == 4) # Make sure that the output files are correctly truncating if more frames # than expected are written with open(npt_equilibration._checkpoint_path, "r") as file: checkpoint = json.load(file, cls=TypedJSONDecoder) # Fake having saved more frames than expected npt_equilibration.steps_per_iteration = 8 checkpoint.steps_per_iteration = 8 npt_equilibration.output_frequency = 2 checkpoint.output_frequency = 2 with open(npt_equilibration._checkpoint_path, "w") as file: json.dump(checkpoint, file, cls=TypedJSONEncoder) npt_equilibration.execute(directory, ComputeResources()) assert len(npt_equilibration.observables) == 4 assert (len( mdtraj.load(npt_equilibration.trajectory_file_path, top=coordinate_path)) == 4)
def test_average_free_energies_protocol(): """Tests adding together two free energies.""" compute_resources = ComputeResources(number_of_threads=1) delta_g_one = (-10.0 * unit.kilocalorie / unit.mole).plus_minus( 1.0 * unit.kilocalorie / unit.mole) delta_g_two = (-20.0 * unit.kilocalorie / unit.mole).plus_minus( 2.0 * unit.kilocalorie / unit.mole) thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1 * unit.atmosphere) sum_protocol = AverageFreeEnergies("average_free_energies") sum_protocol.values = [delta_g_one, delta_g_two] sum_protocol.thermodynamic_state = thermodynamic_state sum_protocol.execute("", compute_resources) result_value = sum_protocol.result.value.to(unit.kilocalorie / unit.mole) result_uncertainty = sum_protocol.result.error.to(unit.kilocalorie / unit.mole) assert isinstance(sum_protocol.result, unit.Measurement) assert result_value.magnitude == pytest.approx(-20.0, abs=0.2) assert result_uncertainty.magnitude == pytest.approx(2.0, abs=0.2)
def test_conditional_group_self_reference(): """Tests that protocols within a conditional group can access the outputs of its parent, such as the current iteration of the group.""" max_iterations = 10 criteria = random.randint(1, max_iterations - 1) group = ConditionalGroup("conditional_group") group.max_iterations = max_iterations protocol = DummyProtocol("protocol_a") protocol.input_value = ProtocolPath("current_iteration", group.id) condition_1 = ConditionalGroup.Condition() condition_1.left_hand_value = ProtocolPath("output_value", group.id, protocol.id) condition_1.right_hand_value = criteria condition_1.type = ConditionalGroup.Condition.Type.GreaterThan condition_2 = ConditionalGroup.Condition() condition_2.left_hand_value = ProtocolPath("current_iteration", group.id) condition_2.right_hand_value = criteria condition_2.type = ConditionalGroup.Condition.Type.GreaterThan group.add_protocols(protocol) group.add_condition(condition_1) group.add_condition(condition_2) with tempfile.TemporaryDirectory() as directory: group.execute(directory, ComputeResources()) assert protocol.output_value == criteria + 1
def test_compute_state_energy_gradients(tmpdir): build_tip3p_smirnoff_force_field().json(os.path.join(tmpdir, "ff.json")) _, parameterized_system = _setup_dummy_system( tmpdir, Substance.from_components("O"), 10, os.path.join(tmpdir, "ff.json")) protocol = SolvationYankProtocol("") protocol.thermodynamic_state = ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere) protocol.gradient_parameters = [ ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") ] gradients = protocol._compute_state_energy_gradients( mdtraj.load_dcd( get_data_filename("test/trajectories/water.dcd"), get_data_filename("test/trajectories/water.pdb"), ), parameterized_system.topology, parameterized_system.force_field.to_force_field(), True, ComputeResources(), ) assert len(gradients) == 1 assert not np.isclose(gradients[0].value, 0.0 * unit.dimensionless)
def test_reweight_statistics(): number_of_frames = 10 reduced_potentials = (np.ones(number_of_frames) * random.random() * unit.dimensionless) potentials = (np.ones(number_of_frames) * random.random() * unit.kilojoule / unit.mole) with tempfile.TemporaryDirectory() as directory: statistics_path = path.join(directory, "stats.csv") statistics_array = StatisticsArray() statistics_array[ObservableType.ReducedPotential] = reduced_potentials statistics_array[ObservableType.PotentialEnergy] = potentials statistics_array.to_pandas_csv(statistics_path) reweight_protocol = ReweightStatistics("reduced_potentials") reweight_protocol.statistics_type = ObservableType.PotentialEnergy reweight_protocol.statistics_paths = statistics_path reweight_protocol.reference_reduced_potentials = statistics_path reweight_protocol.target_reduced_potentials = statistics_path reweight_protocol.bootstrap_uncertainties = True reweight_protocol.required_effective_samples = 0 reweight_protocol.execute(directory, ComputeResources())
def test_conditional_protocol_group_fail(): with tempfile.TemporaryDirectory() as directory: initial_value = 2 * unit.kelvin value_protocol_a = DummyProtocol("protocol_a") value_protocol_a.input_value = initial_value add_values = AddValues("add_values") add_values.values = [ ProtocolPath("output_value", value_protocol_a.id), ProtocolPath("output_value", value_protocol_a.id), ] condition = ConditionalGroup.Condition() condition.left_hand_value = ProtocolPath("result", add_values.id) condition.right_hand_value = ProtocolPath("output_value", value_protocol_a.id) condition.type = ConditionalGroup.Condition.Type.LessThan protocol_group = ConditionalGroup("protocol_group") protocol_group.conditions.append(condition) protocol_group.max_iterations = 10 protocol_group.add_protocols(value_protocol_a, add_values) with pytest.raises(RuntimeError): protocol_group.execute(directory, ComputeResources())
def execute(self, root_directory="", calculation_backend=None, compute_resources=None): """Executes the workflow. Parameters ---------- root_directory: str The directory to execute the graph in. calculation_backend: CalculationBackend, optional. The backend to execute the graph on. This parameter is mutually exclusive with `compute_resources`. compute_resources: CalculationBackend, optional. The compute resources to run using. If None and no `calculation_backend` is specified, the workflow will be executed on a single CPU thread. This parameter is mutually exclusive with `calculation_backend`. Returns ------- WorkflowResult or Future of WorkflowResult: The result of executing this workflow. If executed on a `calculation_backend`, the result will be wrapped in a `Future` object. """ if calculation_backend is None and compute_resources is None: compute_resources = ComputeResources(number_of_threads=1) workflow_graph = self.to_graph() return workflow_graph.execute(root_directory, calculation_backend, compute_resources)[0]
def test_build_docked_coordinates_protocol(): """Tests docking a methanol molecule into alpha-Cyclodextrin.""" if not has_openeye(): pytest.skip("The `BuildDockedCoordinates` protocol requires OpenEye.") ligand_substance = Substance() ligand_substance.add_component( Component("CO", role=Component.Role.Ligand), ExactAmount(1), ) # TODO: This test could likely be made substantially faster # by storing the binary prepared receptor. Would this # be in breach of any oe license terms? with tempfile.TemporaryDirectory() as temporary_directory: build_docked_coordinates = BuildDockedCoordinates("build_methanol") build_docked_coordinates.ligand_substance = ligand_substance build_docked_coordinates.number_of_ligand_conformers = 5 build_docked_coordinates.receptor_coordinate_file = get_data_filename( "test/molecules/acd.mol2") build_docked_coordinates.execute(temporary_directory, ComputeResources()) docked_pdb = PDBFile( build_docked_coordinates.docked_complex_coordinate_path) assert docked_pdb.topology.getNumResidues() == 2
def test_central_difference_gradient(): with tempfile.TemporaryDirectory() as directory: gradient_key = ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") reverse_parameter = -random.random() * unit.kelvin reverse_observable = -random.random() * unit.kelvin forward_parameter = random.random() * unit.kelvin forward_observable = random.random() * unit.kelvin central_difference = CentralDifferenceGradient("central_difference") central_difference.parameter_key = gradient_key central_difference.reverse_observable_value = reverse_observable central_difference.reverse_parameter_value = reverse_parameter central_difference.forward_observable_value = forward_observable central_difference.forward_parameter_value = forward_parameter central_difference.execute(directory, ComputeResources()) assert central_difference.gradient.value == ( (forward_observable - reverse_observable) / (forward_parameter - reverse_parameter))
def main(): # Set up logging for the evaluator. setup_timestamp_logging() # Set up the directory structure. working_directory = "working_directory" # Remove any existing data. if path.isdir(working_directory): shutil.rmtree(working_directory) # Set up a backend to run the calculations on with the requested resources. worker_resources = ComputeResources(number_of_threads=1) calculation_backend = DaskLocalCluster( number_of_workers=1, resources_per_worker=worker_resources) with calculation_backend: server = EvaluatorServer( calculation_backend=calculation_backend, working_directory=working_directory, port=8000, ) # Tell the server to start listening for estimation requests. server.start()
def test_conditional_protocol_group(): with tempfile.TemporaryDirectory() as directory: initial_value = 2 * unit.kelvin value_protocol_a = DummyProtocol("protocol_a") value_protocol_a.input_value = initial_value add_values = AddValues("add_values") add_values.values = [ ProtocolPath("output_value", value_protocol_a.id), ProtocolPath("output_value", value_protocol_a.id), ] condition = ConditionalGroup.Condition() condition.left_hand_value = ProtocolPath("result", add_values.id) condition.right_hand_value = ProtocolPath("output_value", value_protocol_a.id) condition.type = ConditionalGroup.Condition.Type.GreaterThan protocol_group = ConditionalGroup("protocol_group") protocol_group.conditions.append(condition) protocol_group.add_protocols(value_protocol_a, add_values) protocol_group.execute(directory, ComputeResources()) assert (protocol_group.get_value(ProtocolPath( "result", add_values.id)) == 4 * unit.kelvin)
def test_divide_values_protocol(value, divisor): with tempfile.TemporaryDirectory() as temporary_directory: divide_quantities = DivideValue("divide") divide_quantities.value = value divide_quantities.divisor = divisor divide_quantities.execute(temporary_directory, ComputeResources()) assert divide_quantities.result == value / divisor
def __init__(self, number_of_workers=1, resources_per_worker=ComputeResources()): """Constructs a new BaseDaskBackend object.""" super().__init__(number_of_workers, resources_per_worker) self._cluster = None self._client = None
def test_multiply_values_protocol(value, multiplier): with tempfile.TemporaryDirectory() as temporary_directory: multiply_quantities = MultiplyValue("multiply") multiply_quantities.value = value multiply_quantities.multiplier = multiplier multiply_quantities.execute(temporary_directory, ComputeResources()) assert multiply_quantities.result == value * multiplier
def test_add_values_protocol(values): with tempfile.TemporaryDirectory() as temporary_directory: add_quantities = AddValues("add") add_quantities.values = values add_quantities.execute(temporary_directory, ComputeResources()) assert add_quantities.result == reduce(operator.add, values)
def _get_options_dictionary(self, available_resources): """Returns a dictionary of options which will be serialized to a yaml file and passed to YANK. Parameters ---------- available_resources: ComputeResources The resources available to execute on. Returns ------- dict of str and Any A yaml compatible dictionary of YANK options. """ from openforcefield.utils import quantity_to_string platform_name = "CPU" if available_resources.number_of_gpus > 0: # A platform which runs on GPUs has been requested. from openff.evaluator.backends import ComputeResources toolkit_enum = ComputeResources.GPUToolkit( available_resources.preferred_gpu_toolkit ) # A platform which runs on GPUs has been requested. platform_name = ( "CUDA" if toolkit_enum == ComputeResources.GPUToolkit.CUDA else ComputeResources.GPUToolkit.OpenCL ) return { "verbose": self.verbose, "output_dir": ".", "temperature": quantity_to_string( pint_quantity_to_openmm(self.thermodynamic_state.temperature) ), "pressure": quantity_to_string( pint_quantity_to_openmm(self.thermodynamic_state.pressure) ), "minimize": True, "number_of_equilibration_iterations": self.number_of_equilibration_iterations, "default_number_of_iterations": self.number_of_iterations, "default_nsteps_per_iteration": self.steps_per_iteration, "checkpoint_interval": self.checkpoint_interval, "default_timestep": quantity_to_string( pint_quantity_to_openmm(self.timestep) ), "annihilate_electrostatics": True, "annihilate_sterics": False, "platform": platform_name, }
def test_subtract_values_protocol(values): with tempfile.TemporaryDirectory() as temporary_directory: sub_quantities = SubtractValues("sub") sub_quantities.value_b = values[1] sub_quantities.value_a = values[0] sub_quantities.execute(temporary_directory, ComputeResources()) assert sub_quantities.result == values[1] - values[0]
def test_protocol_group_exceptions(): exception_protocol = ExceptionProtocol("exception_protocol") protocol_group = ProtocolGroup("protocol_group") protocol_group.add_protocols(exception_protocol) with tempfile.TemporaryDirectory() as directory: with pytest.raises(RuntimeError): protocol_group.execute(directory, ComputeResources())
def test_extract_average_statistic(): statistics_path = get_data_filename("test/statistics/stats_pandas.csv") with tempfile.TemporaryDirectory() as temporary_directory: extract_protocol = ExtractAverageStatistic("extract_protocol") extract_protocol.statistics_path = statistics_path extract_protocol.statistics_type = ObservableType.PotentialEnergy extract_protocol.execute(temporary_directory, ComputeResources())
def test_run_energy_minimisation(): with tempfile.TemporaryDirectory() as directory: coordinate_path, parameterized_system = _setup_dummy_system(directory) energy_minimisation = OpenMMEnergyMinimisation("energy_minimisation") energy_minimisation.input_coordinate_file = coordinate_path energy_minimisation.parameterized_system = parameterized_system energy_minimisation.execute(directory, ComputeResources()) assert path.isfile(energy_minimisation.output_coordinate_file)
def test_reweight_observables(): with tempfile.TemporaryDirectory() as directory: reweight_protocol = ReweightObservable("") reweight_protocol.observable = ObservableArray(value=np.zeros(10) * unit.kelvin) reweight_protocol.reference_reduced_potentials = [ ObservableArray(value=np.zeros(10) * unit.dimensionless) ] reweight_protocol.frame_counts = [10] reweight_protocol.target_reduced_potentials = ObservableArray( value=np.zeros(10) * unit.dimensionless) reweight_protocol.bootstrap_uncertainties = True reweight_protocol.required_effective_samples = 0 reweight_protocol.execute(directory, ComputeResources())
def test_concatenate_statistics(): statistics_path = get_data_filename("test/statistics/stats_pandas.csv") original_array = StatisticsArray.from_pandas_csv(statistics_path) with tempfile.TemporaryDirectory() as temporary_directory: concatenate_protocol = ConcatenateStatistics("concatenate_protocol") concatenate_protocol.input_statistics_paths = [ statistics_path, statistics_path ] concatenate_protocol.execute(temporary_directory, ComputeResources()) final_array = StatisticsArray.from_pandas_csv( concatenate_protocol.output_statistics_path) assert len(final_array) == len(original_array) * 2
def test_protocol_group_resume(): """A test that protocol groups can recover after being killed (e.g. by a worker being killed due to hitting a wallclock limit) """ compute_resources = ComputeResources() # Fake a protocol group which executes the first # two protocols and then 'gets killed'. protocol_a = DummyInputOutputProtocol("protocol_a") protocol_a.input_value = 1 protocol_b = DummyInputOutputProtocol("protocol_b") protocol_b.input_value = ProtocolPath("output_value", protocol_a.id) protocol_group_a = ProtocolGroup("group_a") protocol_group_a.add_protocols(protocol_a, protocol_b) protocol_graph = ProtocolGraph() protocol_graph.add_protocols(protocol_group_a) protocol_graph.execute("graph_a", compute_resources=compute_resources) # Remove the output file so it appears the the protocol group had not # completed. os.unlink( os.path.join("graph_a", protocol_group_a.id, f"{protocol_group_a.id}_output.json")) # Build the 'full' group with the last two protocols which # 'had not been exited' after the group was 'killed' protocol_a = DummyInputOutputProtocol("protocol_a") protocol_a.input_value = 1 protocol_b = DummyInputOutputProtocol("protocol_b") protocol_b.input_value = ProtocolPath("output_value", protocol_a.id) protocol_c = DummyInputOutputProtocol("protocol_c") protocol_c.input_value = ProtocolPath("output_value", protocol_b.id) protocol_d = DummyInputOutputProtocol("protocol_d") protocol_d.input_value = ProtocolPath("output_value", protocol_c.id) protocol_group_a = ProtocolGroup("group_a") protocol_group_a.add_protocols(protocol_a, protocol_b, protocol_c, protocol_d) protocol_graph = ProtocolGraph() protocol_graph.add_protocols(protocol_group_a) protocol_graph.execute("graph_a", compute_resources=compute_resources) assert all(x != UNDEFINED for x in protocol_group_a.outputs.values())
def test_weight_by_mole_fraction_protocol(component_smiles, value): full_substance = Substance.from_components("C", "CC", "CCC") component = Substance.from_components(component_smiles) mole_fraction = next( iter(full_substance.get_amounts(component.components[0].identifier)) ).value with tempfile.TemporaryDirectory() as temporary_directory: weight_protocol = WeightByMoleFraction("weight") weight_protocol.value = value weight_protocol.full_substance = full_substance weight_protocol.component = component weight_protocol.execute(temporary_directory, ComputeResources()) assert weight_protocol.weighted_value == value * mole_fraction
def test_protocol_group_execution(): protocol_a = DummyInputOutputProtocol("protocol_a") protocol_a.input_value = 1 protocol_b = DummyInputOutputProtocol("protocol_b") protocol_b.input_value = ProtocolPath("output_value", protocol_a.id) protocol_group = ProtocolGroup("protocol_group") protocol_group.add_protocols(protocol_a, protocol_b) with tempfile.TemporaryDirectory() as directory: protocol_group.execute(directory, ComputeResources()) value_path = ProtocolPath("output_value", protocol_group.id, protocol_b.id) final_value = protocol_group.get_value(value_path) assert final_value == protocol_a.input_value
def test_calculate_reduced_potential_openmm(): substance = Substance.from_components("O") thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1.0 * unit.atmosphere) with tempfile.TemporaryDirectory() as directory: force_field_path = path.join(directory, "ff.json") with open(force_field_path, "w") as file: file.write(build_tip3p_smirnoff_force_field().json()) build_coordinates = BuildCoordinatesPackmol("build_coordinates") build_coordinates.max_molecules = 10 build_coordinates.mass_density = 0.05 * unit.grams / unit.milliliters build_coordinates.substance = substance build_coordinates.execute(directory, None) assign_parameters = BuildSmirnoffSystem("assign_parameters") assign_parameters.force_field_path = force_field_path assign_parameters.coordinate_file_path = build_coordinates.coordinate_file_path assign_parameters.substance = substance assign_parameters.execute(directory, None) reduced_potentials = OpenMMReducedPotentials("reduced_potentials") reduced_potentials.substance = substance reduced_potentials.thermodynamic_state = thermodynamic_state reduced_potentials.reference_force_field_paths = [force_field_path] reduced_potentials.system_path = assign_parameters.system_path reduced_potentials.trajectory_file_path = get_data_filename( "test/trajectories/water.dcd") reduced_potentials.coordinate_file_path = get_data_filename( "test/trajectories/water.pdb") reduced_potentials.kinetic_energies_path = get_data_filename( "test/statistics/stats_pandas.csv") reduced_potentials.high_precision = False reduced_potentials.execute(directory, ComputeResources()) assert path.isfile(reduced_potentials.statistics_file_path) final_array = StatisticsArray.from_pandas_csv( reduced_potentials.statistics_file_path) assert ObservableType.ReducedPotential in final_array
def test_extract_uncorrelated_statistics_data(): statistics_path = get_data_filename("test/statistics/stats_pandas.csv") original_array = StatisticsArray.from_pandas_csv(statistics_path) with tempfile.TemporaryDirectory() as temporary_directory: extract_protocol = ExtractUncorrelatedStatisticsData( "extract_protocol") extract_protocol.input_statistics_path = statistics_path extract_protocol.equilibration_index = 2 extract_protocol.statistical_inefficiency = 2.0 extract_protocol.execute(temporary_directory, ComputeResources()) final_array = StatisticsArray.from_pandas_csv( extract_protocol.output_statistics_path) assert len(final_array) == (len(original_array) - 2) / 2 assert (extract_protocol.number_of_uncorrelated_samples == (len(original_array) - 2) / 2)
def test_run_openmm_simulation(): thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1.0 * unit.atmosphere) with tempfile.TemporaryDirectory() as directory: coordinate_path, parameterized_system = _setup_dummy_system(directory) npt_equilibration = OpenMMSimulation("npt_equilibration") npt_equilibration.steps_per_iteration = 2 npt_equilibration.output_frequency = 1 npt_equilibration.thermodynamic_state = thermodynamic_state npt_equilibration.input_coordinate_file = coordinate_path npt_equilibration.parameterized_system = parameterized_system npt_equilibration.execute(directory, ComputeResources()) assert path.isfile(npt_equilibration.output_coordinate_file) assert path.isfile(npt_equilibration.trajectory_file_path) assert len(npt_equilibration.observables) == 2