def test_average_free_energies_protocol(): """Tests adding together two free energies.""" delta_g_one = Observable( value=(-10.0 * unit.kilocalorie / unit.mole).plus_minus( 1.0 * unit.kilocalorie / unit.mole), gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "sigma"), value=0.1 * unit.kilocalorie / unit.mole / unit.angstrom, ) ], ) delta_g_two = Observable( value=(-20.0 * unit.kilocalorie / unit.mole).plus_minus( 2.0 * unit.kilocalorie / unit.mole), gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "sigma"), value=0.2 * unit.kilocalorie / unit.mole / unit.angstrom, ) ], ) thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1 * unit.atmosphere) sum_protocol = AverageFreeEnergies("") sum_protocol.values = [delta_g_one, delta_g_two] sum_protocol.thermodynamic_state = thermodynamic_state sum_protocol.execute() result_value = sum_protocol.result.value.to(unit.kilocalorie / unit.mole) result_uncertainty = sum_protocol.result.error.to(unit.kilocalorie / unit.mole) assert isinstance(sum_protocol.result, Observable) assert result_value.magnitude == pytest.approx(-20.0, abs=0.2) assert result_uncertainty.magnitude == pytest.approx(2.0, abs=0.2) assert (sum_protocol.confidence_intervals[0] > result_value > sum_protocol.confidence_intervals[1]) gradient_value = sum_protocol.result.gradients[0].value.to( unit.kilocalorie / unit.mole / unit.angstrom) beta = 1.0 / (298.0 * unit.kelvin * unit.molar_gas_constant).to( unit.kilocalorie / unit.mole) assert np.isclose( gradient_value.magnitude, (0.1 * np.exp(-beta.magnitude * -10.0) + 0.2 * np.exp(-beta.magnitude * -20.0)) / (np.exp(-beta.magnitude * -10.0) + np.exp(-beta.magnitude * -20.0)), )
def test_gradient_division(): gradient_a = ParameterGradient( ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin) result = gradient_a / 2.0 assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0) gradient_c = ParameterGradient( ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin) with pytest.raises(ValueError): gradient_a / gradient_c
def main(): setup_timestamp_logging() # Load in the force field force_field_path = "smirnoff99Frosst-1.1.0.offxml" force_field_source = SmirnoffForceFieldSource.from_path(force_field_path) # Load in the data set containing the pure and binary properties. data_set = PhysicalPropertyDataSet.from_json("pure_data_set.json") data_set.merge(PhysicalPropertyDataSet.from_json("binary_data_set.json")) # Set up a server object to run the calculations using. server = setup_server(backend_type=BackendType.LocalGPU, max_number_of_workers=1, port=8001) with server: # Request the estimates. property_estimator = EvaluatorClient( ConnectionOptions(server_port=8001)) for calculation_layer in ["SimulationLayer", "ReweightingLayer"]: options = RequestOptions() options.calculation_layers = [calculation_layer] parameter_gradient_keys = [ ParameterGradientKey(tag="vdW", smirks="[#6X4:1]", attribute="epsilon"), ParameterGradientKey(tag="vdW", smirks="[#6X4:1]", attribute="rmin_half"), ] request, _ = property_estimator.request_estimate( property_set=data_set, force_field_source=force_field_source, options=options, parameter_gradient_keys=parameter_gradient_keys, ) # Wait for the results. results, _ = request.results(True, 5) layer_name = re.sub(r"(?<!^)(?=[A-Z])", "_", calculation_layer).lower() results.json(f"pure_binary_{layer_name}.json", True)
def test_central_difference_gradient(): with tempfile.TemporaryDirectory() as directory: gradient_key = ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") reverse_parameter = -random.random() * unit.kelvin reverse_observable = -random.random() * unit.kelvin forward_parameter = random.random() * unit.kelvin forward_observable = random.random() * unit.kelvin central_difference = CentralDifferenceGradient("central_difference") central_difference.parameter_key = gradient_key central_difference.reverse_observable_value = reverse_observable central_difference.reverse_parameter_value = reverse_parameter central_difference.forward_observable_value = forward_observable central_difference.forward_parameter_value = forward_parameter central_difference.execute(directory, ComputeResources()) assert central_difference.gradient.value == ( (forward_observable - reverse_observable) / (forward_parameter - reverse_parameter))
def test_observable_array_valid_initializer( value: unit.Quantity, gradient_values: List[unit.Quantity], expected_value: unit.Quantity, expected_gradient_values: List[unit.Quantity], ): observable = ObservableArray( value, [ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=gradient_value, ) for gradient_value in gradient_values ], ) # noinspection PyUnresolvedReferences assert observable.value.shape == expected_value.shape assert numpy.allclose(observable.value, expected_value) assert all(observable.gradients[i].value.shape == expected_gradient_values[i].shape for i in range(len(expected_gradient_values))) assert all( numpy.allclose(observable.gradients[i].value, expected_gradient_values[i]) for i in range(len(expected_gradient_values)))
def test_bootstrap(data_values, expected_error, sub_counts): def bootstrap_function(values: ObservableArray) -> Observable: return Observable( value=values.value.mean().plus_minus(0.0 * values.value.units), gradients=[ ParameterGradient(gradient.key, numpy.mean(gradient.value)) for gradient in values.gradients ], ) data = ObservableArray( value=data_values, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=data_values, ) ], ) average = bootstrap(bootstrap_function, 1000, 1.0, sub_counts, values=data) assert numpy.isclose(average.value, data.value.mean()) assert numpy.isclose(average.gradients[0].value, data.value.mean()) if expected_error is not None: assert numpy.isclose(average.error, expected_error, rtol=0.1)
def test_observable_array_join(): gradient_unit = unit.mole / unit.kilojoule observables = [ ObservableArray( value=(numpy.arange(2) + i * 2) * unit.kelvin, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=(numpy.arange(2) + i * 2) * unit.kelvin * gradient_unit, ) ], ) for i in range(2) ] joined = ObservableArray.join(*observables) assert len(joined) == 4 assert numpy.allclose(joined.value, numpy.arange(4).reshape(-1, 1) * unit.kelvin) assert numpy.allclose( joined.gradients[0].value, numpy.arange(4).reshape(-1, 1) * unit.kelvin * gradient_unit, )
def test_system_subset_vdw(): # Create a dummy topology topology = Molecule.from_smiles("Cl").to_topology() # Create the system subset. system, parameter_value = system_subset( parameter_key=ParameterGradientKey("vdW", "[#1:1]", "epsilon"), force_field=hydrogen_chloride_force_field(True, True), topology=topology, scale_amount=0.5, ) assert system.getNumForces() == 1 assert system.getNumParticles() == 2 charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0) charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1) assert np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), 0.0) assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 0.0) assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 2.0) assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 1.0) assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 2.0) assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.5)
def test_compute_state_energy_gradients(tmpdir): build_tip3p_smirnoff_force_field().json(os.path.join(tmpdir, "ff.json")) _, parameterized_system = _setup_dummy_system( tmpdir, Substance.from_components("O"), 10, os.path.join(tmpdir, "ff.json")) protocol = SolvationYankProtocol("") protocol.thermodynamic_state = ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere) protocol.gradient_parameters = [ ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") ] gradients = protocol._compute_state_energy_gradients( mdtraj.load_dcd( get_data_filename("test/trajectories/water.dcd"), get_data_filename("test/trajectories/water.pdb"), ), parameterized_system.topology, parameterized_system.force_field.to_force_field(), True, ComputeResources(), ) assert len(gradients) == 1 assert not np.isclose(gradients[0].value, 0.0 * unit.dimensionless)
def test_observable_array_join_single(): gradient_unit = unit.mole / unit.kilojoule joined = ObservableArray.join( ObservableArray( value=(numpy.arange(2)) * unit.kelvin, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=(numpy.arange(2)) * unit.kelvin * gradient_unit, ) ], )) assert len(joined) == 2
def _mock_observable( value: ValueType, gradient_values: List[Tuple[str, str, str, ValueType]], object_type: Union[Type[Observable], Type[ObservableArray]], ): return object_type( value=value, gradients=[ ParameterGradient( key=ParameterGradientKey(tag, smirks, attribute), value=value * unit.kelvin, ) for tag, smirks, attribute, value in gradient_values ], )
def test_gradient_subtraction(): gradient_a = ParameterGradient( ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin) gradient_b = ParameterGradient( ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin) result = gradient_a - gradient_b assert np.isclose(result.value.to(unit.kelvin).magnitude, -1.0) result = gradient_b - gradient_a assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0) gradient_c = ParameterGradient( ParameterGradientKey("vdW", "[#6:1]", "epsilon"), 1.0 * unit.kelvin) with pytest.raises(ValueError): gradient_a - gradient_c with pytest.raises(ValueError): gradient_c - gradient_a with pytest.raises(ValueError): gradient_a - 1.0
def test_observable_array_subset(): observable = ObservableArray( value=numpy.arange(4) * unit.kelvin, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=numpy.arange(4) * unit.kelvin, ) ], ) subset = observable.subset([1, 3]) assert len(subset) == 2 assert numpy.allclose(subset.value, numpy.array([[1.0], [3.0]]) * unit.kelvin) assert numpy.allclose(subset.gradients[0].value, numpy.array([[1.0], [3.0]]) * unit.kelvin)
def test_observable_array_round_trip(value): observable = ObservableArray( value=value * unit.kelvin, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=value * 2.0 * unit.kelvin, ) ], ) round_tripped: ObservableArray = json.loads(json.dumps( observable, cls=TypedJSONEncoder), cls=TypedJSONDecoder) assert isinstance(round_tripped, ObservableArray) assert numpy.isclose(observable.value, round_tripped.value) assert len(observable.gradients) == len(round_tripped.gradients) assert observable.gradients[0] == round_tripped.gradients[0]
def test_system_subset_library_charge(): force_field = hydrogen_chloride_force_field(True, False) # Ensure a zero charge after perturbation. force_field.get_parameter_handler( "LibraryCharges").parameters["[#1:1]"].charge1 = ( 1.5 * simtk_unit.elementary_charge) # Create a dummy topology topology = Molecule.from_smiles("Cl").to_topology() # Create the system subset. system, parameter_value = system_subset( parameter_key=ParameterGradientKey("LibraryCharges", "[#17:1]", "charge1"), force_field=force_field, topology=topology, scale_amount=0.5, ) assert system.getNumForces() == 1 assert system.getNumParticles() == 2 charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0) charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1) assert np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), -1.5) assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 1.5) assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 10.0) assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 10.0) assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0) assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
def test_compute_gradients(tmpdir, smirks, all_zeros): # Load a short trajectory. coordinate_path = get_data_filename("test/trajectories/water.pdb") trajectory_path = get_data_filename("test/trajectories/water.dcd") trajectory = mdtraj.load_dcd(trajectory_path, coordinate_path) observables = ObservableFrame({ "PotentialEnergy": ObservableArray( np.zeros(len(trajectory)) * unit.kilojoule / unit.mole) }) _compute_gradients( [ParameterGradientKey("vdW", smirks, "epsilon")], observables, ForceField("openff-1.2.0.offxml"), ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere), Topology.from_mdtraj(trajectory.topology, [Molecule.from_smiles("O")]), trajectory, ComputeResources(), True, ) assert len( observables["PotentialEnergy"].gradients[0].value) == len(trajectory) if all_zeros: assert np.allclose( observables["PotentialEnergy"].gradients[0].value, 0.0 * unit.kilojoule / unit.kilocalorie, ) else: assert not np.allclose( observables["PotentialEnergy"].gradients[0].value, 0.0 * unit.kilojoule / unit.kilocalorie, )
def test_observable_round_trip(): observable = Observable( value=(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin), gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=0.2 * unit.kelvin, ) ], ) round_tripped: Observable = json.loads(json.dumps(observable, cls=TypedJSONEncoder), cls=TypedJSONDecoder) assert isinstance(round_tripped, Observable) assert numpy.isclose(observable.value, round_tripped.value) assert numpy.isclose(observable.error, round_tripped.error) assert len(observable.gradients) == len(round_tripped.gradients) assert observable.gradients[0] == round_tripped.gradients[0]
def test_system_subset_charge_increment(): pytest.skip( "This test will fail until the SMIRNOFF charge increment handler allows " "N - 1 charges to be specified.") # Create a dummy topology topology = Molecule.from_smiles("Cl").to_topology() # Create the system subset. system, parameter_value = system_subset( parameter_key=ParameterGradientKey("ChargeIncrementModel", "[#1:1]-[#17:2]", "charge_increment1"), force_field=hydrogen_chloride_force_field(False, True), topology=topology, scale_amount=0.5, ) assert system.getNumForces() == 1 assert system.getNumParticles() == 2 charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0) charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1) assert not np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), -1.0) assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 1.0) assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 10.0) assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 10.0) assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0) assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
def test_zero_gradient(): with tempfile.TemporaryDirectory() as directory: force_field_path = os.path.join(directory, "ff.json") with open(force_field_path, "w") as file: file.write(build_tip3p_smirnoff_force_field().json()) gradient_key = ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") zero_gradients = ZeroGradients("") zero_gradients.input_observables = ObservableArray(value=0.0 * unit.kelvin) zero_gradients.gradient_parameters = [gradient_key] zero_gradients.force_field_path = force_field_path zero_gradients.execute() assert len(zero_gradients.output_observables.gradients) == 1 assert zero_gradients.output_observables.gradients[ 0].key == gradient_key assert np.allclose( zero_gradients.output_observables.gradients[0].value, 0.0)
def test_frame_subset(): observable_frame = ObservableFrame({ "Temperature": ObservableArray( value=numpy.arange(4) * unit.kelvin, gradients=[ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=numpy.arange(4) * unit.kelvin, ) ], ) }) subset = observable_frame.subset([1, 3]) assert len(subset) == 2 assert numpy.allclose(subset["Temperature"].value, numpy.array([[1.0], [3.0]]) * unit.kelvin) assert numpy.allclose( subset["Temperature"].gradients[0].value, numpy.array([[1.0], [3.0]]) * unit.kelvin, )
def test_analyze_phase(monkeypatch, tmpdir): from simtk import unit as simtk_unit # Generate the required inputs build_tip3p_smirnoff_force_field().json(os.path.join(tmpdir, "ff.json")) coordinate_path, parameterized_system = _setup_dummy_system( tmpdir, Substance.from_components("O"), 10, os.path.join(tmpdir, "ff.json")) solvent_trajectory = mdtraj.load_dcd( get_data_filename("test/trajectories/water.dcd"), get_data_filename("test/trajectories/water.pdb"), ) # Mock the internally called methods. monkeypatch.setattr( SolvationYankProtocol, "_time_series_statistics", lambda *_: TimeSeriesStatistics(len(solvent_trajectory), len(solvent_trajectory), 1.0, 0), ) monkeypatch.setattr(SolvationYankProtocol, "_extract_trajectory", lambda *_: solvent_trajectory) monkeypatch.setattr( SolvationYankProtocol, "_extract_solvent_trajectory", lambda *_: solvent_trajectory, ) monkeypatch.setattr(SolvationYankProtocol, "_compute_state_energy_gradients", lambda *_: []) # Build up the protocol. protocol = SolvationYankProtocol("") protocol.thermodynamic_state = ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere) protocol.gradient_parameters = [ ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon") ] protocol.solvent_1 = Substance.from_components("O") protocol._analysed_output = { "general": { "solvent1": { "nstates": 1 } }, "free_energy": { "solvent1": { "kT": 1.0 / simtk_unit.kilojoules_per_mole, "free_energy_diff": 0.0, "free_energy_diff_unit": 0.0 * simtk_unit.kilojoules_per_mole, "free_energy_diff_error": 0.0, "free_energy_diff_error_unit": 0.0 * simtk_unit.kilojoules_per_mole, } }, } ( free_energy, solution_trajectory, solvent_trajectory, solution_gradients, solvent_gradients, ) = protocol._analyze_phase("", parameterized_system, "solvent1", ComputeResources())
def main(): setup_timestamp_logging() # Retrieve the current version. version = evaluator.__version__.replace(".", "-").replace("v", "") if "+" in version: version = "latest" # Create a new directory to run the current versions results in. os.makedirs(os.path.join(version, "results")) with temporarily_change_directory(version): with DaskLSFBackend( minimum_number_of_workers=1, maximum_number_of_workers=12, resources_per_worker=QueueWorkerResources( number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=5 * unit.gigabyte, wallclock_time_limit="05:59", ), setup_script_commands=[ f"conda activate openff-evaluator-{version}", "module load cuda/10.0", ], queue_name="gpuqueue", ) as calculation_backend: with EvaluatorServer( calculation_backend, working_directory="outputs", storage_backend=LocalFileStorage("cached-data"), ): client = EvaluatorClient() for allowed_layer in ["SimulationLayer", "ReweightingLayer"]: data_set = define_data_set( allowed_layer == "ReweightingLayer") options = RequestOptions() options.calculation_layers = [allowed_layer] options.calculation_schemas = { property_type: {} for property_type in data_set.property_types } if allowed_layer == "SimulationLayer": options.add_schema( "SimulationLayer", "SolvationFreeEnergy", solvation_free_energy_schema(), ) request, _ = client.request_estimate( data_set, ForceField("openff-1.2.0.offxml"), options, parameter_gradient_keys=[ ParameterGradientKey("vdW", smirks, attribute) for smirks in [ "[#1:1]-[#6X4]", "[#1:1]-[#6X4]-[#7,#8,#9,#16,#17,#35]", "[#1:1]-[#8]", "[#6X4:1]", "[#8X2H1+0:1]", "[#1]-[#8X2H2+0:1]-[#1]", ] for attribute in ["epsilon", "rmin_half"] ], ) results, _ = request.results(synchronous=True, polling_interval=60) results.json( os.path.join("results", f"{allowed_layer}.json"))
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance @pytest.mark.parametrize( "values", [ [random.randint(1, 10) for _ in range(10)], [random.random() for _ in range(10)], [random.random() * unit.kelvin for _ in range(10)], [ (random.random() * unit.kelvin).plus_minus(random.random() * unit.kelvin) for x in range(10) ], [ ParameterGradient( ParameterGradientKey("a", "b", "c"), random.random() * unit.kelvin ) for _ in range(10) ], ], ) def test_add_values_protocol(values): with tempfile.TemporaryDirectory() as temporary_directory: add_quantities = AddValues("add") add_quantities.values = values add_quantities.execute(temporary_directory, ComputeResources()) assert add_quantities.result == reduce(operator.add, values)
def submit_jobs(self, mvals, AGrad=True, AHess=True): """ Submit jobs for evaluating the objective function Parameters ---------- mvals: np.ndarray mvals array containing the math values of the parameters AGrad: bool Flag for computing gradients of not AHess: bool Flag for computing hessian or not Notes ----- 1. This function is called before wq_complete() and get(). 2. This function should not block. """ # Make the force field based on the current values of the parameters. self.FF.make(mvals) force_field = smirnoff.ForceField( self.FF.offxml, allow_cosmetic_attributes=True ) # strip out cosmetic attributes with tempfile.NamedTemporaryFile(mode="w", suffix=".offxml") as file: force_field.to_file(file.name, discard_cosmetic_attributes=True) force_field = smirnoff.ForceField(file.name) # Determine which gradients (if any) we should be estimating. parameter_gradient_keys = [] self._gradient_key_mappings = {} self._parameter_units = {} if AGrad is True: index_counter = 0 for field_list in self.FF.pfields: string_key = field_list[0] key_split = string_key.split("/") parameter_tag = key_split[0].strip() parameter_smirks = key_split[3].strip() parameter_attribute = key_split[2].strip() # Use the full attribute name (e.g. k1) for the gradient key. parameter_gradient_key = ParameterGradientKey( tag=parameter_tag, smirks=parameter_smirks, attribute=parameter_attribute, ) # Find the unit of the gradient parameter. parameter_value, is_cosmetic = self._parameter_value_from_gradient_key( parameter_gradient_key ) if parameter_value is None or is_cosmetic: # We don't wan't gradients w.r.t. cosmetic parameters. continue parameter_unit = parameter_value.units parameter_gradient_keys.append(parameter_gradient_key) self._gradient_key_mappings[parameter_gradient_key] = index_counter self._parameter_units[parameter_gradient_key] = parameter_unit index_counter += 1 # Submit the estimation request. self._pending_estimate_request, _ = self._client.request_estimate( property_set=self._reference_data_set, force_field_source=force_field, options=self._options.estimation_options, parameter_gradient_keys=parameter_gradient_keys, ) logger.info( "Requesting the estimation of {} properties, and their " "gradients with respect to {} parameters.\n".format( len(self._reference_data_set), len(parameter_gradient_keys) ) ) if ( self._pending_estimate_request.results( True, polling_interval=self._options.polling_interval )[0] is None ): raise RuntimeError( "No `EvaluatorServer` could be found to submit the calculations to. " "Please double check that a server is running, and that the connection " "settings specified in the input script are correct." )
"str" * unit.kelvin, [], pytest.raises(TypeError), "The value must be a unit-wrapped integer, float or numpy array.", ), ( numpy.ones((2, 2, 2)) * unit.kelvin, [], pytest.raises(ValueError), "The wrapped array must not contain more than two dimensions.", ), ( None, [ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=numpy.ones((2, 2)) * unit.kelvin, ), ], pytest.raises(ValueError), "A valid value must be provided.", ), ( 1.0 * unit.kelvin, [ ParameterGradient( key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=numpy.ones(1), ), ], pytest.raises(TypeError),