Esempio n. 1
0
    def __init__(
        self,
        sigma: unit.Quantity,
        point: unit.Quantity,
        radius: unit.Quantity,
        atom_idx: int,
        active_at: int = -1,
    ):
        """
        Flat well restraint that becomes active when atom moves outside of radius.
        Parameters
        ----------
        sigma : float, unit'd
        point : np.array, unit'd
        radius : float, unit'd
        atom_idx : list
            list of atoms idxs
        active_at : int
            Integer to indicccate at which state the restraint is fully active. Either 0 (for
            lambda 0), or 1 (for lambda 1) or -1 (always active)
        """

        assert type(sigma) == unit.Quantity
        assert type(point) == unit.Quantity
        super().__init__(sigma, point.value_in_unit(unit.angstrom), active_at)

        self.atom_idx = atom_idx
        self.cutoff_radius = (radius.value_in_unit(unit.angstrom)
                              )  # slightly decrease radius
Esempio n. 2
0
    def calculate_energy(
        self,
        coordinate_list: unit.Quantity,
        lambda_value: float = 0.0,
        original_neural_network: bool = True,
        requires_grad_wrt_coordinates: bool = True,
        requires_grad_wrt_parameters: bool = True,
        include_restraint_energy_contribution: bool = True,
    ):
        """
        Given a coordinate set (x) the energy is calculated in kJ/mol.

        Parameters
        ----------
        x : list, [N][K][3] unit'd (distance unit)
            initial configuration
        lambda_value : float
            between 0.0 and 1.0 - at zero contributions of alchemical atoms are zero

        Returns
        -------
        NamedTuple
        """

        assert type(coordinate_list) == unit.Quantity
        assert 0.0 <= float(lambda_value) <= 1.0
        logger.debug(
            f"Including restraints: {include_restraint_energy_contribution}")

        logger.debug(f"Batch-size: {len(coordinate_list)}")

        coordinates = torch.tensor(
            coordinate_list.value_in_unit(unit.nanometer),
            requires_grad=requires_grad_wrt_coordinates,
            device=self.device,
            dtype=torch.float32,
        )
        logger.debug(f"coordinates tensor: {coordinates.size()}")

        energy_in_kT, restraint_energy_contribution_in_kT = self._calculate_energy(
            coordinates,
            lambda_value,
            original_neural_network,
            include_restraint_energy_contribution,
        )

        energy = np.array([e.item() for e in energy_in_kT]) * kT

        restraint_energy_contribution = (
            np.array([e.item()
                      for e in restraint_energy_contribution_in_kT]) * kT)
        if requires_grad_wrt_parameters:
            return DecomposedEnergy(energy, restraint_energy_contribution,
                                    energy_in_kT)
        else:
            return DecomposedEnergy(energy, restraint_energy_contribution,
                                    energy_in_kT.detach())
Esempio n. 3
0
def sample_velocities(masses: Quantity, temperature: Quantity) -> np.array:
    """Sample Maxwell-Boltzmann velocities ~ N(0, sqrt(kB T / m)"""
    n_particles = len(masses)
    spatial_dim = 3

    v_unscaled = np.random.randn(n_particles, spatial_dim)

    # intended to be consistent with timemachine.integrator:langevin_coefficients
    sigma = np.sqrt(BOLTZ * temperature.value_in_unit(kelvin)) * np.sqrt(
        1 / masses)
    v_scaled = v_unscaled * np.expand_dims(sigma, axis=1)

    assert v_scaled.shape == (n_particles, spatial_dim)

    return v_scaled
Esempio n. 4
0
def _from_omm_quantity(val: simtk_unit.Quantity):
    """Helper function to convert float or array quantities tagged with SimTK/OpenMM units to
    a Pint-compatible quantity"""
    unit_ = val.unit
    val_ = val.value_in_unit(unit_)
    if type(val_) in {float, int}:
        unit_ = val.unit
        return val_ * unit.Unit(str(unit_))
    elif type(val_) in {tuple, list, np.ndarray}:
        array = np.asarray(val_)
        return array * unit.Unit(str(unit_))
    else:
        raise UnitValidationError(
            "Found a simtk.unit.Unit wrapped around something other than a float-like "
            f"or np.ndarray-like. Found a unit wrapped around type {type(val_)}."
        )
Esempio n. 5
0
    def calculate_energy(self, x: unit.Quantity):
        """
        Given a coordinate set (x) the energy is calculated in kJ/mol.
        Parameters
        ----------
        x : array of floats, unit'd (angstroms)
            initial configuration

        Returns
        -------
        energy : unit.quantity.Quantity
            energy in kJ/mol
        """

        assert type(x) in _allowable_quantities
        coordinates = torch.tensor([x.value_in_unit(unit.angstroms)],
                                   requires_grad=True, device=self.device, dtype=torch.float32)

        energy_in_hartrees = self._reform_as_energy_tensor(coordinates)
        energy = energy_in_hartrees.item() * self.hartree_to_kJ_per_mole * unit.kilojoule_per_mole
        return energy
Esempio n. 6
0
    def __init__(
        self,
        sigma: unit.Quantity,
        point: unit.Quantity,
        atom_idx: list,
        atoms: str,
        active_at: int = -1,
    ):
        """
        Center of mass restraint.

        Parameters
        ----------
        sigma : in angstrom
        point : np.array, unit'd
        atom_idx : list
            list of atoms idxs
        atoms: str
            Str of atoms to retrieve element information
        """
        assert type(sigma) == unit.Quantity
        assert type(point) == unit.Quantity
        super().__init__(sigma, point.value_in_unit(unit.angstrom), active_at)
        cutoff_radius = 0.1 * unit.angstrom
        self.cutoff_radius = cutoff_radius.value_in_unit(unit.angstrom)
        # only look at heavy atoms
        full_mass_list = [mass_dict_in_daltons[atoms[i]] for i in atom_idx]
        heavy_atoms_idx = [i for i, x in enumerate(full_mass_list) if x != 1.0]

        self.atom_idx = heavy_atoms_idx
        self.mass_list = [heavy_atoms_idx[i] for i in heavy_atoms_idx]

        scaled_masses = np.array(self.mass_list) / np.array(
            self.mass_list).sum()
        self.masses = torch.tensor(scaled_masses,
                                   dtype=torch.double,
                                   device=self.device,
                                   requires_grad=True)