Ejemplo n.º 1
0
def test_average_free_energies_protocol():
    """Tests adding together two free energies."""

    delta_g_one = Observable(
        value=(-10.0 * unit.kilocalorie / unit.mole).plus_minus(
            1.0 * unit.kilocalorie / unit.mole),
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
                value=0.1 * unit.kilocalorie / unit.mole / unit.angstrom,
            )
        ],
    )
    delta_g_two = Observable(
        value=(-20.0 * unit.kilocalorie / unit.mole).plus_minus(
            2.0 * unit.kilocalorie / unit.mole),
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
                value=0.2 * unit.kilocalorie / unit.mole / unit.angstrom,
            )
        ],
    )

    thermodynamic_state = ThermodynamicState(298 * unit.kelvin,
                                             1 * unit.atmosphere)

    sum_protocol = AverageFreeEnergies("")

    sum_protocol.values = [delta_g_one, delta_g_two]
    sum_protocol.thermodynamic_state = thermodynamic_state

    sum_protocol.execute()

    result_value = sum_protocol.result.value.to(unit.kilocalorie / unit.mole)
    result_uncertainty = sum_protocol.result.error.to(unit.kilocalorie /
                                                      unit.mole)

    assert isinstance(sum_protocol.result, Observable)
    assert result_value.magnitude == pytest.approx(-20.0, abs=0.2)
    assert result_uncertainty.magnitude == pytest.approx(2.0, abs=0.2)

    assert (sum_protocol.confidence_intervals[0] > result_value >
            sum_protocol.confidence_intervals[1])

    gradient_value = sum_protocol.result.gradients[0].value.to(
        unit.kilocalorie / unit.mole / unit.angstrom)
    beta = 1.0 / (298.0 * unit.kelvin * unit.molar_gas_constant).to(
        unit.kilocalorie / unit.mole)

    assert np.isclose(
        gradient_value.magnitude,
        (0.1 * np.exp(-beta.magnitude * -10.0) +
         0.2 * np.exp(-beta.magnitude * -20.0)) /
        (np.exp(-beta.magnitude * -10.0) + np.exp(-beta.magnitude * -20.0)),
    )
Ejemplo n.º 2
0
def test_gradient_division():

    gradient_a = ParameterGradient(
        ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin)

    result = gradient_a / 2.0
    assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0)

    gradient_c = ParameterGradient(
        ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin)

    with pytest.raises(ValueError):
        gradient_a / gradient_c
Ejemplo n.º 3
0
    def _execute(self, directory, available_resources):

        force_field_source = ForceFieldSource.from_json(self.force_field_path)

        if not isinstance(force_field_source, SmirnoffForceFieldSource):
            raise ValueError("Only SMIRNOFF force fields are supported.")

        force_field = force_field_source.to_force_field()

        parameter_units = {
            gradient_key: openmm_quantity_to_pint(
                getattr(
                    force_field.get_parameter_handler(
                        gradient_key.tag).parameters[gradient_key.smirks],
                    gradient_key.attribute,
                )).units
            for gradient_key in self.gradient_parameters
        }

        self.input_observables.clear_gradients()

        if isinstance(self.input_observables, Observable):

            self.output_observables = Observable(
                value=self.input_observables.value,
                gradients=[
                    ParameterGradient(
                        key=gradient_key,
                        value=(0.0 * self.input_observables.value.units /
                               parameter_units[gradient_key]),
                    ) for gradient_key in self.gradient_parameters
                ],
            )

        elif isinstance(self.input_observables, ObservableArray):

            self.output_observables = ObservableArray(
                value=self.input_observables.value,
                gradients=[
                    ParameterGradient(
                        key=gradient_key,
                        value=(
                            numpy.zeros(self.input_observables.value.shape) *
                            self.input_observables.value.units /
                            parameter_units[gradient_key]),
                    ) for gradient_key in self.gradient_parameters
                ],
            )

        else:
            raise NotImplementedError()
Ejemplo n.º 4
0
def test_bootstrap(data_values, expected_error, sub_counts):
    def bootstrap_function(values: ObservableArray) -> Observable:

        return Observable(
            value=values.value.mean().plus_minus(0.0 * values.value.units),
            gradients=[
                ParameterGradient(gradient.key, numpy.mean(gradient.value))
                for gradient in values.gradients
            ],
        )

    data = ObservableArray(
        value=data_values,
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                value=data_values,
            )
        ],
    )

    average = bootstrap(bootstrap_function, 1000, 1.0, sub_counts, values=data)

    assert numpy.isclose(average.value, data.value.mean())
    assert numpy.isclose(average.gradients[0].value, data.value.mean())

    if expected_error is not None:
        assert numpy.isclose(average.error, expected_error, rtol=0.1)
Ejemplo n.º 5
0
def test_observable_array_join():

    gradient_unit = unit.mole / unit.kilojoule

    observables = [
        ObservableArray(
            value=(numpy.arange(2) + i * 2) * unit.kelvin,
            gradients=[
                ParameterGradient(
                    key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                    value=(numpy.arange(2) + i * 2) * unit.kelvin *
                    gradient_unit,
                )
            ],
        ) for i in range(2)
    ]

    joined = ObservableArray.join(*observables)
    assert len(joined) == 4

    assert numpy.allclose(joined.value,
                          numpy.arange(4).reshape(-1, 1) * unit.kelvin)
    assert numpy.allclose(
        joined.gradients[0].value,
        numpy.arange(4).reshape(-1, 1) * unit.kelvin * gradient_unit,
    )
Ejemplo n.º 6
0
def test_observable_array_valid_initializer(
    value: unit.Quantity,
    gradient_values: List[unit.Quantity],
    expected_value: unit.Quantity,
    expected_gradient_values: List[unit.Quantity],
):

    observable = ObservableArray(
        value,
        [
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                value=gradient_value,
            ) for gradient_value in gradient_values
        ],
    )

    # noinspection PyUnresolvedReferences
    assert observable.value.shape == expected_value.shape
    assert numpy.allclose(observable.value, expected_value)

    assert all(observable.gradients[i].value.shape ==
               expected_gradient_values[i].shape
               for i in range(len(expected_gradient_values)))
    assert all(
        numpy.allclose(observable.gradients[i].value,
                       expected_gradient_values[i])
        for i in range(len(expected_gradient_values)))
Ejemplo n.º 7
0
    def _execute(self, directory, available_resources):

        import mdtraj

        charges = self._extract_charges(self.parameterized_system.system)
        charge_derivatives = self._compute_charge_derivatives(len(charges))

        dipole_moments = []
        dipole_gradients = {key: [] for key in self.gradient_parameters}

        for chunk in mdtraj.iterload(
            self.trajectory_path, top=self.parameterized_system.topology_path, chunk=50
        ):

            xyz = chunk.xyz.transpose(0, 2, 1) * unit.nanometers

            dipole_moments.extend(xyz.dot(charges))

            for key in self.gradient_parameters:
                dipole_gradients[key].extend(xyz.dot(charge_derivatives[key]))

        self.dipole_moments = ObservableArray(
            value=np.vstack(dipole_moments),
            gradients=[
                ParameterGradient(key=key, value=np.vstack(dipole_gradients[key]))
                for key in self.gradient_parameters
            ],
        )
Ejemplo n.º 8
0
    def bootstrap_function(values: ObservableArray) -> Observable:

        return Observable(
            value=values.value.mean().plus_minus(0.0 * values.value.units),
            gradients=[
                ParameterGradient(gradient.key, numpy.mean(gradient.value))
                for gradient in values.gradients
            ],
        )
Ejemplo n.º 9
0
    def _compute_weights(
            mbar: pymbar.MBAR,
            target_reduced_potentials: ObservableArray) -> ObservableArray:
        """Return the values that each sample at the target state should be weighted
        by.

        Parameters
        ----------
        mbar
            A pre-computed MBAR object encoded information from the reference states.
        target_reduced_potentials
            The reduced potentials at the target state.

        Returns
        -------
            The values to weight each sample by.
        """
        from scipy.special import logsumexp

        u_kn = target_reduced_potentials.value.to(
            unit.dimensionless).magnitude.T

        log_denominator_n = logsumexp(mbar.f_k - mbar.u_kn.T,
                                      b=mbar.N_k,
                                      axis=1)

        f_hat = -logsumexp(-u_kn - log_denominator_n, axis=1)

        # Calculate the weights
        weights = np.exp(f_hat - u_kn - log_denominator_n) * unit.dimensionless

        # Compute the gradients of the weights.
        weight_gradients = []

        for gradient in target_reduced_potentials.gradients:

            gradient_value = gradient.value.magnitude.flatten()

            # Compute the numerator of the gradient. We need to specifically ask for the
            # sign of the exp sum as the numerator may be negative.
            d_f_hat_numerator, d_f_hat_numerator_sign = logsumexp(
                -u_kn - log_denominator_n,
                b=gradient_value,
                axis=1,
                return_sign=True)
            d_f_hat_d_theta = d_f_hat_numerator_sign * np.exp(
                d_f_hat_numerator + f_hat)

            d_weights_d_theta = ((d_f_hat_d_theta - gradient_value) * weights *
                                 gradient.value.units)

            weight_gradients.append(
                ParameterGradient(key=gradient.key, value=d_weights_d_theta.T))

        return ObservableArray(value=weights.T, gradients=weight_gradients)
Ejemplo n.º 10
0
def test_gradient_subtraction():

    gradient_a = ParameterGradient(
        ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin)
    gradient_b = ParameterGradient(
        ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin)

    result = gradient_a - gradient_b
    assert np.isclose(result.value.to(unit.kelvin).magnitude, -1.0)

    result = gradient_b - gradient_a
    assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0)

    gradient_c = ParameterGradient(
        ParameterGradientKey("vdW", "[#6:1]", "epsilon"), 1.0 * unit.kelvin)

    with pytest.raises(ValueError):
        gradient_a - gradient_c

    with pytest.raises(ValueError):
        gradient_c - gradient_a

    with pytest.raises(ValueError):
        gradient_a - 1.0
Ejemplo n.º 11
0
def test_observable_array_join_single():

    gradient_unit = unit.mole / unit.kilojoule

    joined = ObservableArray.join(
        ObservableArray(
            value=(numpy.arange(2)) * unit.kelvin,
            gradients=[
                ParameterGradient(
                    key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                    value=(numpy.arange(2)) * unit.kelvin * gradient_unit,
                )
            ],
        ))
    assert len(joined) == 2
Ejemplo n.º 12
0
def _mock_observable(
    value: ValueType,
    gradient_values: List[Tuple[str, str, str, ValueType]],
    object_type: Union[Type[Observable], Type[ObservableArray]],
):

    return object_type(
        value=value,
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey(tag, smirks, attribute),
                value=value * unit.kelvin,
            ) for tag, smirks, attribute, value in gradient_values
        ],
    )
Ejemplo n.º 13
0
def test_observable_array_subset():

    observable = ObservableArray(
        value=numpy.arange(4) * unit.kelvin,
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                value=numpy.arange(4) * unit.kelvin,
            )
        ],
    )

    subset = observable.subset([1, 3])
    assert len(subset) == 2

    assert numpy.allclose(subset.value,
                          numpy.array([[1.0], [3.0]]) * unit.kelvin)
    assert numpy.allclose(subset.gradients[0].value,
                          numpy.array([[1.0], [3.0]]) * unit.kelvin)
Ejemplo n.º 14
0
    def subset(self, indices: Iterable[int]) -> "ObservableArray":
        """Extracts the subset of the values stored for this observable at the
        specified indices.

        Parameters
        ----------
        indices
            The indices of the entries to extract.

        Returns
        -------
            The subset of the observable values.
        """

        return self.__class__(
            value=self._value[indices],
            gradients=[
                ParameterGradient(key=gradient.key, value=gradient.value[indices])
                for gradient in self._gradients
            ],
        )
Ejemplo n.º 15
0
    def _execute(self, directory, available_resources):

        if self.forward_parameter_value < self.reverse_parameter_value:

            raise ValueError(
                f"The forward parameter value ({self.forward_parameter_value}) must "
                f"be larger than the reverse value ({self.reverse_parameter_value})."
            )

        reverse_value = self.reverse_observable_value
        forward_value = self.forward_observable_value

        if isinstance(reverse_value, pint.Measurement):
            reverse_value = reverse_value.value

        if isinstance(forward_value, pint.Measurement):
            forward_value = forward_value.value

        gradient = (forward_value - reverse_value) / (
            self.forward_parameter_value - self.reverse_parameter_value)

        self.gradient = ParameterGradient(self.parameter_key, gradient)
Ejemplo n.º 16
0
def test_observable_array_round_trip(value):

    observable = ObservableArray(
        value=value * unit.kelvin,
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                value=value * 2.0 * unit.kelvin,
            )
        ],
    )

    round_tripped: ObservableArray = json.loads(json.dumps(
        observable, cls=TypedJSONEncoder),
                                                cls=TypedJSONDecoder)

    assert isinstance(round_tripped, ObservableArray)

    assert numpy.isclose(observable.value, round_tripped.value)

    assert len(observable.gradients) == len(round_tripped.gradients)
    assert observable.gradients[0] == round_tripped.gradients[0]
Ejemplo n.º 17
0
def test_observable_round_trip():

    observable = Observable(
        value=(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin),
        gradients=[
            ParameterGradient(
                key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                value=0.2 * unit.kelvin,
            )
        ],
    )

    round_tripped: Observable = json.loads(json.dumps(observable,
                                                      cls=TypedJSONEncoder),
                                           cls=TypedJSONDecoder)

    assert isinstance(round_tripped, Observable)

    assert numpy.isclose(observable.value, round_tripped.value)
    assert numpy.isclose(observable.error, round_tripped.error)

    assert len(observable.gradients) == len(round_tripped.gradients)
    assert observable.gradients[0] == round_tripped.gradients[0]
Ejemplo n.º 18
0
def test_frame_subset():

    observable_frame = ObservableFrame({
        "Temperature":
        ObservableArray(
            value=numpy.arange(4) * unit.kelvin,
            gradients=[
                ParameterGradient(
                    key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
                    value=numpy.arange(4) * unit.kelvin,
                )
            ],
        )
    })

    subset = observable_frame.subset([1, 3])
    assert len(subset) == 2

    assert numpy.allclose(subset["Temperature"].value,
                          numpy.array([[1.0], [3.0]]) * unit.kelvin)
    assert numpy.allclose(
        subset["Temperature"].gradients[0].value,
        numpy.array([[1.0], [3.0]]) * unit.kelvin,
    )
Ejemplo n.º 19
0
    def join(cls, *observables: "ObservableArray") -> "ObservableArray":
        """Concatenates multiple observables together in the order that they appear in
        the args list.

        Parameters
        ----------
        observables
            The observables to join.

        Returns
        -------
            The concatenated observable object.
        """
        if len(observables) < 1:
            raise ValueError("At least one observable must be provided.")
        if len(observables) == 1:
            return observables[0]

        expected_gradients = {gradient.key for gradient in observables[0].gradients}
        expected_gradient_units = {
            gradient.key: gradient.value.units for gradient in observables[0].gradients
        }

        # Ensure the arrays contain the same observables.
        if not all(
            observable.value.dimensionality == observables[0].value.dimensionality
            for observable in observables
        ):
            raise ValueError("The observables must all have compatible units.")

        # Ensure the arrays contain gradients for the same FF parameters.
        if not all(
            {gradient.key for gradient in observable.gradients} == expected_gradients
            for observable in observables
        ):
            raise ValueError(
                "The observables must contain gradient information for the same "
                "parameters."
            )

        # Ensure the gradients are all in the same units.
        if not all(
            {gradient.key: gradient.value.units for gradient in observable.gradients}
            == expected_gradient_units
            for observable in observables
        ):
            raise ValueError(
                "The gradients of each of the observables must have the same units."
            )

        return ObservableArray(
            value=numpy.concatenate(
                [
                    observable.value.to(observables[0].value.units).magnitude
                    for observable in observables
                ]
            )
            * observables[0].value.units,
            gradients=[
                ParameterGradient(
                    key=gradient_key,
                    value=numpy.concatenate(
                        [
                            next(
                                x for x in observable.gradients if x.key == gradient_key
                            )
                            .value.to(expected_gradient_units[gradient_key])
                            .magnitude
                            for observable in observables
                        ]
                    )
                    * expected_gradient_units[gradient_key],
                )
                for gradient_key in expected_gradients
            ],
        )
Ejemplo n.º 20
0
     "str" * unit.kelvin,
     [],
     pytest.raises(TypeError),
     "The value must be a unit-wrapped integer, float or numpy array.",
 ),
 (
     numpy.ones((2, 2, 2)) * unit.kelvin,
     [],
     pytest.raises(ValueError),
     "The wrapped array must not contain more than two dimensions.",
 ),
 (
     None,
     [
         ParameterGradient(
             key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
             value=numpy.ones((2, 2)) * unit.kelvin,
         ),
     ],
     pytest.raises(ValueError),
     "A valid value must be provided.",
 ),
 (
     1.0 * unit.kelvin,
     [
         ParameterGradient(
             key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
             value=numpy.ones(1),
         ),
     ],
     pytest.raises(TypeError),
     "The gradient values must be unit-wrapped integers, floats or numpy arrays.",
Ejemplo n.º 21
0
def _compute_gradients(
    gradient_parameters: List[ParameterGradientKey],
    observables: ObservableFrame,
    force_field: "ForceField",
    thermodynamic_state: ThermodynamicState,
    topology: "Topology",
    trajectory: "Trajectory",
    compute_resources: ComputeResources,
    enable_pbc: bool = True,
    perturbation_amount: float = 0.0001,
):
    """Computes the gradients of the provided observables with respect to
    the set of specified force field parameters using the central difference
    finite difference method.

    Notes
    -----
    The ``observables`` object will be modified in-place.

    Parameters
    ----------
    gradient_parameters
        The parameters to differentiate with respect to.
    observables
        The observables to differentiate.
    force_field
        The full set force field parameters which contain the parameters to
        differentiate.
    thermodynamic_state
        The state at which the trajectory was sampled
    topology
        The topology of the system the observables were collected for.
    trajectory
        The trajectory over which the observables were collected.
    compute_resources
        The compute resources available for the computations.
    enable_pbc
        Whether PBC should be enabled when re-evaluating system energies.
    perturbation_amount
        The amount to perturb for the force field parameter by.
    """

    from simtk import openmm

    gradients = defaultdict(list)
    observables.clear_gradients()

    if enable_pbc:
        # Make sure the PBC are set on the topology otherwise the cut-off will be
        # set incorrectly.
        topology.box_vectors = trajectory.openmm_boxes(0)

    for parameter_key in gradient_parameters:

        # Build the slightly perturbed systems.
        reverse_system, reverse_parameter_value = system_subset(
            parameter_key, force_field, topology, -perturbation_amount)
        forward_system, forward_parameter_value = system_subset(
            parameter_key, force_field, topology, perturbation_amount)

        # Perform a cheap check to try and catch most cases where the systems energy
        # does not depend on this parameter.
        reverse_xml = openmm.XmlSerializer.serialize(reverse_system)
        forward_xml = openmm.XmlSerializer.serialize(forward_system)

        if not enable_pbc:
            disable_pbc(reverse_system)
            disable_pbc(forward_system)

        reverse_parameter_value = openmm_quantity_to_pint(
            reverse_parameter_value)
        forward_parameter_value = openmm_quantity_to_pint(
            forward_parameter_value)

        # Evaluate the energies using the reverse and forward sub-systems.
        if reverse_xml != forward_xml:
            reverse_energies = _evaluate_energies(
                thermodynamic_state,
                reverse_system,
                trajectory,
                compute_resources,
                enable_pbc,
            )
            forward_energies = _evaluate_energies(
                thermodynamic_state,
                forward_system,
                trajectory,
                compute_resources,
                enable_pbc,
            )
        else:

            zeros = np.zeros(len(trajectory))

            reverse_energies = forward_energies = ObservableFrame({
                ObservableType.PotentialEnergy:
                ObservableArray(
                    zeros * unit.kilojoule / unit.mole,
                    [
                        ParameterGradient(
                            key=parameter_key,
                            value=(zeros * unit.kilojoule / unit.mole /
                                   reverse_parameter_value.units),
                        )
                    ],
                ),
                ObservableType.ReducedPotential:
                ObservableArray(
                    zeros * unit.dimensionless,
                    [
                        ParameterGradient(
                            key=parameter_key,
                            value=(zeros * unit.dimensionless /
                                   reverse_parameter_value.units),
                        )
                    ],
                ),
            })

        potential_gradient = ParameterGradient(
            key=parameter_key,
            value=(forward_energies[ObservableType.PotentialEnergy].value -
                   reverse_energies[ObservableType.PotentialEnergy].value) /
            (forward_parameter_value - reverse_parameter_value),
        )
        reduced_potential_gradient = ParameterGradient(
            key=parameter_key,
            value=(forward_energies[ObservableType.ReducedPotential].value -
                   reverse_energies[ObservableType.ReducedPotential].value) /
            (forward_parameter_value - reverse_parameter_value),
        )

        gradients[ObservableType.PotentialEnergy].append(potential_gradient)
        gradients[ObservableType.TotalEnergy].append(potential_gradient)
        gradients[ObservableType.Enthalpy].append(potential_gradient)
        gradients[ObservableType.ReducedPotential].append(
            reduced_potential_gradient)

        if ObservableType.KineticEnergy in observables:
            gradients[ObservableType.KineticEnergy].append(
                ParameterGradient(
                    key=parameter_key,
                    value=(
                        np.zeros(potential_gradient.value.shape) *
                        observables[ObservableType.KineticEnergy].value.units /
                        reverse_parameter_value.units),
                ))
        if ObservableType.Density in observables:
            gradients[ObservableType.Density].append(
                ParameterGradient(
                    key=parameter_key,
                    value=(np.zeros(potential_gradient.value.shape) *
                           observables[ObservableType.Density].value.units /
                           reverse_parameter_value.units),
                ))
        if ObservableType.Volume in observables:
            gradients[ObservableType.Volume].append(
                ParameterGradient(
                    key=parameter_key,
                    value=(np.zeros(potential_gradient.value.shape) *
                           observables[ObservableType.Volume].value.units /
                           reverse_parameter_value.units),
                ))

    for observable_type in observables:

        observables[observable_type] = ObservableArray(
            value=observables[observable_type].value,
            gradients=gradients[observable_type],
        )
Ejemplo n.º 22
0
    def _bootstrap_function(self, **kwargs: ObservableArray) -> Observable:
        """The function to perform on the data set being sampled by
        bootstrapping.

        Parameters
        ----------
        observables
            The bootstrap sample values.

        Returns
        -------
            The result of evaluating the data.
        """

        # The simple base function only supports a single observable.
        assert len(kwargs) == 1

        # Compute the mean observable.
        sample_observable = next(iter(kwargs.values()))
        mean_observable = np.mean(sample_observable.value, axis=0)

        if sample_observable.value.shape[1] > 1:
            mean_observable = mean_observable.reshape(1, -1)
        else:
            mean_observable = mean_observable.item()

        # Retrieve the potential gradients for easy access
        potential_gradients = {
            gradient.key: gradient.value
            for gradient in (
                []
                if self.potential_energies == UNDEFINED
                else self.potential_energies.gradients
            )
        }
        observable_gradients = {
            gradient.key: gradient
            for gradient in (
                []
                if self.potential_energies == UNDEFINED
                else sample_observable.gradients
            )
        }

        # Compute the mean gradients.
        gradients = []

        for gradient_key in observable_gradients:

            gradient = observable_gradients[gradient_key]

            value = np.mean(gradient.value, axis=0) - self.thermodynamic_state.beta * (
                np.mean(
                    sample_observable.value * potential_gradients[gradient.key],
                    axis=0,
                )
                - (
                    np.mean(sample_observable.value, axis=0)
                    * np.mean(potential_gradients[gradient.key], axis=0)
                )
            )

            if sample_observable.value.shape[1] > 1:
                value = value.reshape(1, -1)
            else:
                value = value.item()

            gradients.append(ParameterGradient(key=gradient.key, value=value))

        return_type = (
            Observable if sample_observable.value.shape[1] == 1 else ObservableArray
        )
        return return_type(value=mean_observable, gradients=gradients)
Ejemplo n.º 23
0
def compute_dielectric_constant(
    dipole_moments: ObservableArray,
    volumes: ObservableArray,
    temperature: unit.Quantity,
    average_function,
) -> Observable:
    """A function to compute the average dielectric constant from an array of
    dipole moments and an array of volumes, whereby the average values of the
    observables are computed using a custom function.

    Parameters
    ----------
    dipole_moments
        The dipole moments array.
    volumes
        The volume array.
    temperature
        The temperature at which the dipole_moments and volumes were sampled.
    average_function
        The function to use when evaluating the average of an observable.

    Returns
    -------
        The average value of the dielectric constant.
    """

    dipole_moments_sqr = dipole_moments * dipole_moments
    dipole_moments_sqr = ObservableArray(
        value=dipole_moments_sqr.value.sum(axis=1),
        gradients=[
            ParameterGradient(gradient.key, gradient.value.sum(axis=1))
            for gradient in dipole_moments_sqr.gradients
        ],
    )

    avg_sqr_dipole_moments = average_function(observable=dipole_moments_sqr)
    avg_sqr_dipole_moments = ObservableArray(
        avg_sqr_dipole_moments.value, avg_sqr_dipole_moments.gradients
    )

    avg_dipole_moment = average_function(observable=dipole_moments)

    avg_dipole_moment_sqr = avg_dipole_moment * avg_dipole_moment
    avg_dipole_moment_sqr = ObservableArray(
        value=avg_dipole_moment_sqr.value.sum(axis=1),
        gradients=[
            ParameterGradient(gradient.key, gradient.value.sum(axis=1))
            for gradient in avg_dipole_moment_sqr.gradients
        ],
    )

    avg_volume = average_function(observable=volumes)
    avg_volume = ObservableArray(avg_volume.value, avg_volume.gradients)

    dipole_variance = avg_sqr_dipole_moments - avg_dipole_moment_sqr

    prefactor = 1.0 / (3.0 * E0 * unit.boltzmann_constant * temperature)

    dielectric_constant = 1.0 * unit.dimensionless + prefactor * (
        dipole_variance / avg_volume
    )

    return Observable(
        value=dielectric_constant.value.item().to(unit.dimensionless),
        gradients=[
            ParameterGradient(
                gradient.key,
                gradient.value.item().to(
                    unit.dimensionless
                    * gradient.value.units
                    / dielectric_constant.value.units
                ),
            )
            for gradient in dielectric_constant.gradients
        ],
    )
Ejemplo n.º 24
0
    def _execute(self, directory, available_resources):

        from scipy.special import logsumexp

        default_unit = unit.kilocalorie / unit.mole

        boltzmann_factor = (
            self.thermodynamic_state.temperature * unit.molar_gas_constant
        )
        boltzmann_factor.ito(default_unit)

        beta = 1.0 / boltzmann_factor

        values = [
            (-beta * value.value.to(default_unit)).to(unit.dimensionless).magnitude
            for value in self.values
        ]

        # Compute the mean.
        mean = logsumexp(values)

        # Compute the gradients of the mean.
        value_gradients = [
            {gradient.key: -beta * gradient.value for gradient in value.gradients}
            for value in self.values
        ]
        value_gradients_by_key = {
            gradient_key: [
                gradients_by_key[gradient_key] for gradients_by_key in value_gradients
            ]
            for gradient_key in value_gradients[0]
        }

        mean_gradients = []

        for gradient_key, gradient_values in value_gradients_by_key.items():

            expected_unit = value_gradients[0][gradient_key].units

            d_log_mean_numerator, d_mean_numerator_sign = logsumexp(
                values,
                b=[x.to(expected_unit).magnitude for x in gradient_values],
                return_sign=True,
            )
            d_mean_numerator = d_mean_numerator_sign * np.exp(d_log_mean_numerator)

            d_mean_d_theta = d_mean_numerator / np.exp(mean)

            mean_gradients.append(
                ParameterGradient(
                    key=gradient_key,
                    value=-boltzmann_factor * d_mean_d_theta * expected_unit,
                )
            )

        # Compute the standard error and 95% CI
        cycle_result = np.empty(self.bootstrap_cycles)

        for cycle_index, cycle in enumerate(range(self.bootstrap_cycles)):

            cycle_values = np.empty(len(self.values))

            for value_index, value in enumerate(self.values):

                cycle_mean = value.value.to(default_unit).magnitude
                cycle_sem = value.error.to(default_unit).magnitude

                sampled_value = np.random.normal(cycle_mean, cycle_sem) * default_unit
                cycle_values[value_index] = (
                    (-beta * sampled_value).to(unit.dimensionless).magnitude
                )

            # ΔG° = -RT × Log[ Σ_{n} exp(-βΔG°_{n}) ]
            cycle_result[cycle_index] = logsumexp(cycle_values)

        mean = -boltzmann_factor * mean
        sem = np.std(-boltzmann_factor * cycle_result)

        confidence_intervals = np.empty(2)
        sorted_statistics = np.sort(cycle_result)

        confidence_intervals[0] = sorted_statistics[int(0.025 * self.bootstrap_cycles)]
        confidence_intervals[1] = sorted_statistics[int(0.975 * self.bootstrap_cycles)]

        confidence_intervals = -boltzmann_factor * confidence_intervals

        self.result = Observable(value=mean.plus_minus(sem), gradients=mean_gradients)
        self.confidence_intervals = confidence_intervals
Ejemplo n.º 25
0
    def _initialize(self, value: unit.Quantity, gradients: List[ParameterGradient]):

        expected_types = (int, float, numpy.ndarray)

        if value is not None:

            if not isinstance(value, unit.Quantity) or not isinstance(
                value.magnitude, expected_types
            ):
                raise TypeError(
                    "The value must be a unit-wrapped integer, float or numpy array."
                )

            if not issubclass(type(value.magnitude), numpy.ndarray):
                value = numpy.array([value.magnitude]) * value.units

            # Ensure the inner array has a uniform shape.
            if value.magnitude.ndim > 2:

                raise ValueError(
                    "The wrapped array must not contain more than two dimensions."
                )

            if value.magnitude.ndim < 2:
                value = value.reshape(-1, 1)

        reshaped_gradients = []

        if gradients is not None:

            if value is None:
                raise ValueError("A valid value must be provided.")

            # Make sure the value and gradients have the same wrapped type.
            if not all(
                isinstance(gradient.value, unit.Quantity)
                and isinstance(gradient.value.magnitude, expected_types)
                for gradient in gradients
            ):

                raise TypeError(
                    "The gradient values must be unit-wrapped integers, floats "
                    "or numpy arrays."
                )

            # Make sure the gradient values are all numpy arrays and make sure the each
            # have the same shape as the value.
            for gradient in gradients:

                gradient_value = gradient.value.magnitude

                if not isinstance(gradient.value.magnitude, numpy.ndarray):
                    gradient_value = numpy.array([gradient_value])

                if gradient_value.ndim < 2:
                    gradient_value = gradient_value.reshape(-1, 1)

                if gradient_value.ndim > 2:

                    raise ValueError(
                        "Gradient values must not contain more than two dimensions."
                    )

                if value.magnitude.shape[1] != gradient_value.shape[1]:

                    raise ValueError(
                        f"Gradient values should be {value.magnitude.shape[1]}-"
                        f"dimensional to match the dimensionality of the value."
                    )

                if gradient_value.shape[0] != value.magnitude.shape[0]:

                    raise ValueError(
                        f"Gradient values should have a length of "
                        f"{value.magnitude.shape[0]} to match the length of the value."
                    )

                reshaped_gradients.append(
                    ParameterGradient(
                        key=gradient.key,
                        value=unit.Quantity(gradient_value, gradient.value.units),
                    )
                )

        super(ObservableArray, self)._initialize(value, reshaped_gradients)
Ejemplo n.º 26
0
    def _reweight_observables(
        self,
        weights: ObservableArray,
        mbar: pymbar.MBAR,
        target_reduced_potentials: ObservableArray,
        **observables: ObservableArray,
    ) -> typing.Union[ObservableArray, Observable]:
        """A function which computes the average value of an observable using
        weights computed from MBAR and from a set of component observables.

        Parameters
        ----------
        weights
            The MBAR weights
        observables
            The component observables which may be combined to yield the final
            average observable of interest.
        mbar
            A pre-computed MBAR object encoded information from the reference states.
            This will be used to compute the std error when not bootstrapping.
        target_reduced_potentials
            The reduced potentials at the target state. This will be used to compute
            the std error when not bootstrapping.

        Returns
        -------
            The re-weighted average observable.
        """

        observable = observables.pop("observable")
        assert len(observables) == 0

        return_type = ObservableArray if observable.value.shape[
            1] > 1 else Observable

        weighted_observable = weights * observable

        average_value = weighted_observable.value.sum(axis=0)
        average_gradients = [
            ParameterGradient(key=gradient.key,
                              value=gradient.value.sum(axis=0))
            for gradient in weighted_observable.gradients
        ]

        if return_type == Observable:

            average_value = average_value.item()
            average_gradients = [
                ParameterGradient(key=gradient.key,
                                  value=gradient.value.item())
                for gradient in average_gradients
            ]

        else:

            average_value = average_value.reshape(1, -1)
            average_gradients = [
                ParameterGradient(key=gradient.key,
                                  value=gradient.value.reshape(1, -1))
                for gradient in average_gradients
            ]

        if self.bootstrap_uncertainties is False:

            # Unfortunately we need to re-compute the average observable for now
            # as pymbar does not expose an easier way to compute the average
            # uncertainty.
            observable_dimensions = observable.value.shape[1]
            assert observable_dimensions == 1

            results = mbar.computeExpectations(
                observable.value.T.magnitude,
                target_reduced_potentials.value.T.magnitude,
                state_dependent=True,
            )

            uncertainty = results[1][-1] * observable.value.units
            average_value = average_value.plus_minus(uncertainty)

        return return_type(value=average_value, gradients=average_gradients)
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance


@pytest.mark.parametrize(
    "values",
    [
        [random.randint(1, 10) for _ in range(10)],
        [random.random() for _ in range(10)],
        [random.random() * unit.kelvin for _ in range(10)],
        [
            (random.random() * unit.kelvin).plus_minus(random.random() * unit.kelvin)
            for x in range(10)
        ],
        [
            ParameterGradient(
                ParameterGradientKey("a", "b", "c"), random.random() * unit.kelvin
            )
            for _ in range(10)
        ],
    ],
)
def test_add_values_protocol(values):

    with tempfile.TemporaryDirectory() as temporary_directory:

        add_quantities = AddValues("add")
        add_quantities.values = values

        add_quantities.execute(temporary_directory, ComputeResources())
        assert add_quantities.result == reduce(operator.add, values)