Exemplo n.º 1
0
def test_OpenMMReporter():
    """
    test the OpenMMReporter object for its ability to make appropriate trajectory writes for particles.
    use the harmonic oscillator testsystem

    NOTE : this class will conduct dynamics on 5 particles defined by the harmonic oscillator testsystem in accordance with the coddiwomple.openmm.propagators.OMMBIP
    equipped with the coddiwomple.openmm.integrators.OMMLI integrator, but will NOT explicitly conduct a full test on the propagators or integrators.
    """
    from coddiwomple.openmm.propagators import OMMBIP
    from coddiwomple.openmm.integrators import OMMLI

    temperature = 300 * unit.kelvin
    pressure = None
    from coddiwomple.openmm.states import OpenMMPDFState, OpenMMParticleState
    from coddiwomple.particles import Particle
    from coddiwomple.openmm.reporters import OpenMMReporter
    import shutil

    #create the default get_harmonic_testsystem
    testsystem, period, collision_rate, timestep, alchemical_functions = get_harmonic_testsystem(temperature = temperature)

    #create a particle state and 5 particles
    particles = []
    for i in range(5):
        particle_state = OpenMMParticleState(positions = testsystem.positions) #make a particle state
        particle = Particle(index = i, record_state=False, iteration = 0)
        particle.update_state(particle_state)
        particles.append(particle)

    #since we are copying over the positions, we need a simple assert statement to make sure that the id(hex(particle_state.positions)) are separate in memory
    position_hexes = [hex(id(particle.state.positions)) for particle in particles]
    assert len(position_hexes) == len(list(set(position_hexes))), f"positions are copied identically; this is a problem"

    #create a pdf_state
    pdf_state = OpenMMPDFState(system = testsystem.system, alchemical_composability = HarmonicAlchemicalState, temperature = temperature, pressure = pressure)

    #create an integrator
    integrator = OMMLI(temperature=temperature, collision_rate=collision_rate, timestep=timestep)

    #create a propagator
    propagator = OMMBIP(openmm_pdf_state = pdf_state, integrator = integrator)

    steps_per_application = 100

    #the only thing we want to do here is to run independent md for each of the particles and save trajectories; at the end, we will delete the directory and the traj files
    temp_traj_dir, temp_traj_prefix = os.path.join(os.getcwd(), 'test_dir'), 'traj_prefix'
    reporter = OpenMMReporter(trajectory_directory = 'test_dir', trajectory_prefix='traj_prefix', md_topology=testsystem.mdtraj_topology)
    assert reporter.write_traj

    num_applications=10
    for application_index in range(num_applications):
        returnables = [propagator.apply(particle.state, n_steps=100, reset_integrator=True, apply_pdf_to_context=True, randomize_velocities=True) for particle in particles]
        _save=True if application_index == num_applications-1 else False
        reporter.record(particles, save_to_disk=_save)
    assert reporter.hex_counter == len(reporter.hex_dict)
    assert os.path.exists(temp_traj_dir)
    assert os.listdir(temp_traj_dir) is not None

    #then we can delete
    shutil.rmtree(temp_traj_dir)
Exemplo n.º 2
0
 def _before_integration(self, *args, **kwargs):
     """
     update the particle with the particle state
     """
     from coddiwomple.particles import Particle
     super()._before_integration(*args, **kwargs)
     particle_state = args[0]
     self.particle = Particle(index = 0, iteration = 0)
     if self._write_trajectory:
         particle_state.update_from_context(self.context, ignore_velocities=True)
         self.particle.update_state(particle_state)
         self.reporter.record([self.particle])
Exemplo n.º 3
0
def test_OpenMMParticleState():
    """
    conduct a class-wide test on coddiwomple.openmm.states.OpenMMParticleState with the `get_harmonic_testsystem` testsystem
    this will assert successes on __init__, as well as _all_ methods in the coddiwomple.particles.Particle class
    """
    temperature = 300 * unit.kelvin
    pressure = None
    from coddiwomple.openmm.states import OpenMMPDFState, OpenMMParticleState
    from coddiwomple.particles import Particle

    #create the default get_harmonic_testsystem
    testsystem, period, collision_rate, timestep, alchemical_functions = get_harmonic_testsystem(temperature = temperature)

    #test __init__ method
    particle_state = OpenMMParticleState(positions = testsystem.positions) #make a particle state
    particle = Particle(index = 0, record_state=False, iteration = 0)

    #test update_state
    assert particle.state is None
    assert not particle._record_states
    particle.update_state(particle_state)
    assert particle.state is not None

    #test update_iteration
    assert particle.iteration == 0
    particle.update_iteration()
    assert particle.iteration == 1

    #test update ancestry
    assert particle.ancestry == [0]
    particle.update_ancestry(1)
    assert particle.ancestry == [0,1]
Exemplo n.º 4
0
def propagator_testprep():
    """
    wrapper that outputs all necessary Propagator (and subclass) inputs for testing (the test system is butane solvated in tip3p)

    returns
        pdf_state : coddiwomple.OpenMMPDFState
            subclass of openmmtools.states.ThermodynamicState of the system object
        pdf_state_subset : coddiwomple.OpenMMPDFState
            subclass of openmmtools.states.ThermodynamicState of the system_subset object
        integrator : qmlify.propagation.Integrator
            integrator that equips the Propagator
        ani_handler : qmlify.ANI_force_and_energy
            handler of the ani components
        atom_map : dict
            index map of {md_top_atom_index : md_subset_top_atom_index} of the matching atoms where key is the index of the md_topology atom and the value is the index of the matching md_subset_topology atom
        particle : coddiwomple.particles.Particle
            particle containing a coddiwomple.openmm.OpenMMParticleState (subclass of openmmtools.states.SamplerState)
    """
    import mdtraj as md
    from coddiwomple.particles import Particle
    from coddiwomple.openmm.states import OpenMMParticleState
    from qmlify.utils import generate_propagator_inputs

    vac_sys_pos_top, sol_sys_pos_top = generate_testsystem()
    vac_system, vac_positions, vac_topology = vac_sys_pos_top
    sol_system, sol_positions, sol_topology = sol_sys_pos_top

    md_topology = md.Topology.from_openmm(sol_topology)
    md_subset_topology = md.Topology.from_openmm(vac_topology)
    pdf_state, pdf_state_subset, integrator, ani_handler, atom_map = generate_propagator_inputs(system = sol_system,
                                                                                                system_subset = vac_system,
                                                                                                md_topology = md_topology,
                                                                                                md_subset_topology = md_subset_topology)
    particle = Particle(0)
    box_vectors = sol_system.getDefaultPeriodicBoxVectors()
    particle_state = OpenMMParticleState(positions = sol_positions, box_vectors = box_vectors)
    particle.update_state(particle_state)

    return pdf_state, pdf_state_subset, integrator, ani_handler, atom_map, particle
Exemplo n.º 5
0
class OMMAISPR(OMMAISP):
    """
    OpenMMAISP Reportable

    OMMAISP is a simple subclass of OMMAISP that equips an OpenMMReporter object and writes a trajectory to disk at specified iterations
    """
    def __init__(self,
                 openmm_pdf_state,
                 integrator,
                 record_state_work_interval = None,
                 reporter = None,
                 trajectory_write_interval = 1,
                 context_cache=None,
                 reassign_velocities=False,
                 n_restart_attempts=0):
        """
        see super (i.e. OMMAISP)

        arguments (new):
            reporter : coddiwomple.openmm.reporter.OpenMMReporter, default None
                a reporter object to write trajectories
            trajectory_write_interval : int, default 1
                write the trajectory every trajectory_write_interval intervals
        """
        super().__init__(openmm_pdf_state = openmm_pdf_state,
                         integrator = integrator,
                         record_state_work_interval = record_state_work_interval,
                         context_cache=context_cache,
                         reassign_velocities=reassign_velocities,
                         n_restart_attempts=n_restart_attempts)

        self._write_trajectory = False if reporter is None else True
        self.reporter = reporter
        self.particle = None
        self._trajectory_write_interval = trajectory_write_interval if self._write_trajectory else None

    def _before_integration(self, *args, **kwargs):
        """
        update the particle with the particle state
        """
        from coddiwomple.particles import Particle
        super()._before_integration(*args, **kwargs)
        particle_state = args[0]
        self.particle = Particle(index = 0, iteration = 0)
        if self._write_trajectory:
            particle_state.update_from_context(self.context, ignore_velocities=True)
            self.particle.update_state(particle_state)
            self.reporter.record([self.particle])

    def _during_integration(self, *args, **kwargs):
        """
        write trajectory if we are allowed and if we satisfy the interval criterion
        """
        super()._during_integration(*args, **kwargs)
        particle_state = args[0]
        if self._write_trajectory:
            integrator_variables = self._get_global_integrator_variables()
            iteration = integrator_variables['iteration']
            n_iterations = integrator_variables['niterations']
            if iteration % self._trajectory_write_interval == 0:
                particle_state.update_from_context(self.context, ignore_velocities=True)
                if iteration == n_iterations:
                    try:
                        self.reporter.record([self.particle], save_to_disk=True)
                    except Exception as e:
                        _logger.warning(f"{e}")
                    self.reporter.reset()
                else:
                    self.reporter.record([self.particle])

    def _after_integration(self, *args, **kwargs):
        """
        write trajectory if we are allowed and if we satisfy the interval criterion
        """
        super()._after_integration(*args, **kwargs)
        particle_state = args[0]
        if self._write_trajectory:
            particle_state.update_from_context(self.context, ignore_velocities=True)
            integrator_variables = self._get_global_integrator_variables()
            iteration = integrator_variables['iteration']
            if iteration % self._trajectory_write_interval == 0:
                pass ##do not record if the state work was recorded at the last `_during_integration` pass
            else:
                self.reporter.record([particle], save_to_disk = True)
                self.reporter.reset()
Exemplo n.º 6
0
def run(setup_dict):
    """
    execute a Propagator
    """
    import torchani
    from simtk import unit
    import sys
    import numpy as np
    import mdtraj as md
    from coddiwomple.particles import Particle
    from coddiwomple.openmm.states import OpenMMParticleState
    from qmlify.utils import load_yaml, deserialize_xml, position_extractor, generate_propagator_inputs, depickle

    #pull systems
    system = deserialize_xml(setup_dict['system'])
    system_subset = deserialize_xml(setup_dict['subset_system'])

    #load topologies
    md_topology = md.Topology.from_openmm(depickle(setup_dict['topology']))
    md_subset_topology = md.Topology.from_openmm(
        depickle(setup_dict['subset_topology']))

    #load positions and box vectors
    positions, box_vectors = position_extractor(
        positions_cache_filename=setup_dict['positions_cache_filename'],
        index_to_extract=setup_dict['position_extraction_index'])
    positions *= unit.nanometers
    if box_vectors is not None: box_vectors *= unit.nanometers

    #integrator integrator_kwargs
    default_integrator_kwargs = {
        'temperature': 300.0 * unit.kelvin,
        'collision_rate': 1.0 / unit.picoseconds,
        'timestep': 1.0 * unit.femtoseconds,
        'splitting': "V R O R F",
        'constraint_tolerance': 1e-6,
        'pressure': 1.0 * unit.atmosphere
    }
    if 'integrator_kwargs' in setup_dict.keys():
        integrator_kwargs = setup_dict['integrator_kwargs']
        if integrator_kwargs is not None:
            if 'temperature' in integrator_kwargs.keys():
                integrator_kwargs['temperature'] *= unit.kelvin
            if 'collision_rate' in integrator_kwargs.keys():
                integrator_kwargs['collision_rate'] /= unit.picoseconds
            if 'timestep' in integrator_kwargs.keys():
                integrator_kwargs['timestep'] *= unit.femtoseconds
            if 'pressure' in integrator_kwargs.keys(
            ) and integrator_kwargs['pressure'] is not None:
                integrator_kwargs['pressure'] *= unit.atmosphere
            default_integrator_kwargs.update(integrator_kwargs)

    pdf_state, pdf_state_subset, integrator, ani_handler, atom_map = generate_propagator_inputs(
        system=system,
        system_subset=system_subset,
        md_topology=md_topology,
        md_subset_topology=md_subset_topology,
        ani_model=torchani.models.ANI2x(),
        integrator_kwargs=default_integrator_kwargs)

    if setup_dict['direction'] == 'forward':
        from qmlify.propagation import Propagator
        prop = Propagator
    elif setup_dict['direction'] == 'backward':
        from qmlify.propagation import BackwardPropagator
        prop = BackwardPropagator
    elif setup_dict['direction'] == 'ani_endstate':
        from qmlify.propagation import ANIPropagator
        prop = ANIPropagator
    else:
        raise Exception(
            f"{setup_dict['direction']} is not valid. allowed directions are 'forward', 'backward', 'ani_endstate'"
        )

    propagator = prop(openmm_pdf_state=pdf_state,
                      openmm_pdf_state_subset=pdf_state_subset,
                      subset_indices_map=atom_map,
                      integrator=integrator,
                      ani_handler=ani_handler,
                      context_cache=None,
                      reassign_velocities=True,
                      n_restart_attempts=0)

    particle = Particle(0)
    particle_state = OpenMMParticleState(positions=positions,
                                         box_vectors=box_vectors)
    particle.update_state(particle_state)
    particle_state, _return_dict = propagator.apply(
        particle_state,
        n_steps=setup_dict['num_steps'],
        reset_integrator=True,
        apply_pdf_to_context=True)
    if box_vectors is None:
        particle_state.box_vectors = None

    work_array = np.array(propagator.state_works[0])

    if particle_state.box_vectors is not None:
        np.savez(setup_dict['out_positions_npz'],
                 positions=np.array([
                     particle_state.positions.value_in_unit_system(
                         unit.md_unit_system)
                 ]),
                 box_vectors=np.array([
                     particle_state.box_vectors.value_in_unit_system(
                         unit.md_unit_system)
                 ]))
    else:
        np.savez(setup_dict['out_positions_npz'],
                 positions=particle_state.positions.value_in_unit_system(
                     unit.md_unit_system))
    np.savez(setup_dict['out_works_npz'], works=work_array)
Exemplo n.º 7
0
def ANI_endstate_sampler(
                         system,
                         system_subset,
                         subset_indices_map,
                         positions_cache_filename,
                         md_topology,
                         index_to_run,
                         steps_per_application,
                         integrator_kwargs = {'temperature': 300.0 * unit.kelvin,
                                                      'collision_rate': 1.0 / unit.picoseconds,
                                                      'timestep': 2.0 * unit.femtoseconds,
                                                      'splitting': "V R O R F",
                                                      'constraint_tolerance': 1e-6,
                                                      'pressure': 1.0 * unit.atmosphere},
                        position_extractor = None
                        ):
    """
    conduct ani endstate sampling

    arguments
        system : openmm.System
            system
        system_subset : openmm.System
            subset system
        subset_indices_map : dict
            dict of {openmm_pdf_state atom_index : openmm_pdf_state_subset atom index}
        positions_cache_filename : str
            path to the cache positions
        index_to_run : int
            index of the positions to anneal
        number_of_applications : int
            number of applications of the propagator
        steps_per_application : int
            number of integration steps per application
        integrator_kwargs : dict, see default
            kwargs to pass to OMMLIAIS integrator
    """
    from coddiwomple.particles import Particle
    from coddiwomple.openmm.states import OpenMMParticleState, OpenMMPDFState
    #load the endstate cache
    traj = np.load(positions_cache_filename)
    positions = traj['positions'][index_to_run,:,:] * unit.nanometers
    if position_extractor is not None:
        positions = position_extractor(_positions)

    try:
        box_vectors = traj['box_vectors'][index_to_run,:,:] * unit.nanometers
    except Exception as e:
        box_vectors = None

    species_str = ''.join([atom.element.symbol for atom in md_topology.subset(list(subset_indices_map.keys())).atoms])
    _logger.info(f"species string: {species_str}")
    ani_handler = ANI1_force_and_energy(model = torchani.models.ANI1ccx(),
                                                 atoms=species_str,
                                                 platform='cpu',
                                                 temperature=integrator_kwargs['temperature'])

    #make thermostates
    pressure = integrator_kwargs['pressure'] if box_vectors is not None else None
    pdf_state = ThermodynamicState(system = system, temperature = integrator_kwargs['temperature'], pressure=pressure)
    pdf_state_subset = ThermodynamicState(system = system_subset, temperature = integrator_kwargs['temperature'], pressure = None)


    #make an integrator
    integrator = Integrator(**integrator_kwargs)

    #make a propagator
    propagator = ANIPropagator(openmm_pdf_state = pdf_state,
                 openmm_pdf_state_subset = pdf_state_subset,
                 subset_indices_map = subset_indices_map,
                 integrator = integrator,
                 ani_handler = ani_handler,
                 context_cache=None,
                 reassign_velocities=True,
                 n_restart_attempts=0,
                 reporter = None)


    particle = Particle(0)
    particle_state = OpenMMParticleState(positions = positions, box_vectors = box_vectors)
    particle.update_state(particle_state)
    particle_state, _return_dict = propagator.apply(particle_state, n_steps = steps_per_application, reset_integrator=True, apply_pdf_to_context=True)
    if box_vectors is None:
        particle_state.box_vectors=None
    
    return particle_state, np.array(propagator.state_works[0])
Exemplo n.º 8
0
def annealed_importance_sampling(direction,
                                 system,
                                 system_subset,
                                 subset_indices_map,
                                 positions_cache_filename,
                                 index_to_run,
                                 directory_name,
                                 trajectory_prefix,
                                 md_topology,
                                 steps_per_application,
                                 integrator_kwargs = {'temperature': 300.0 * unit.kelvin,
                                                      'collision_rate': 1.0 / unit.picoseconds,
                                                      'timestep': 1.0 * unit.femtoseconds,
                                                      'splitting': "V R O R F",
                                                      'constraint_tolerance': 1e-6,
                                                      'pressure': 1.0 * unit.atmosphere},
                                 save_indices = None,
                                 position_extractor = None,
                                 write_trajectory_interval=1
                                ):
    """
    conduct annealed importance sampling in the openmm regime; will write the accumulated work dictionary after each application

    arguments
        direction : str
            forward or backward
        system : openmm.System
            system
        system_subset : openmm.System
            subset system
        subset_indices_map : dict
            dict of {openmm_pdf_state atom_index : openmm_pdf_state_subset atom index}
        positions_cache_filename : str
            path to the cache positions
        index_to_run : int
            index of the positions to anneal
        directory_name : str
            directory that will be written to
        trajectory_prefix : str
            .pdb prefix
        md_topology : mdtraj.Topology
            topology that will write the trajectory
        save_indices : list
            list of indices that will be saved
        number_of_applications : int
            number of applications of the propagator
        steps_per_application : int
            number of integration steps per application
        integrator_kwargs : dict, see default
            kwargs to pass to OMMLIAIS integrator
        save_indices : list(int)
            list of indices of md_topology atoms to save to disk
        position_extractor : function, default None
            function to extract appropriate positons from the cache
        write_trajectory_interval : int
            frequency with which to write trajectory to disk
    """
    from coddiwomple.particles import Particle
    from coddiwomple.openmm.states import OpenMMParticleState, OpenMMPDFState
    #load the endstate cache
    traj = np.load(positions_cache_filename)
    positions = traj['positions'][index_to_run,:,:] * unit.nanometers
    if position_extractor is not None:
        positions = position_extractor(_positions)

    try:
        box_vectors = traj['box_vectors'][index_to_run,:,:] * unit.nanometers
    except Exception as e:
        box_vectors = None

    assert direction in ['forward', 'backward']

    #make a handle object for ANI
    species_str = ''.join([atom.element.symbol for atom in md_topology.subset(list(subset_indices_map.keys())).atoms])
    _logger.info(f"species string: {species_str}")
    ani_handler = ANI1_force_and_energy(model = torchani.models.ANI1ccx(),
                                                 atoms=species_str,
                                                 platform='cpu',
                                                 temperature=integrator_kwargs['temperature'])

    #make thermostates
    pressure = integrator_kwargs['pressure'] if box_vectors is not None else None
    pdf_state = ThermodynamicState(system = system, temperature = integrator_kwargs['temperature'], pressure=pressure)
    pdf_state_subset = ThermodynamicState(system = system_subset, temperature = integrator_kwargs['temperature'], pressure = None)

    #make a reporter
    saveable_topology = md_topology.subset(save_indices)
    reporter = OpenMMReporter(directory_name, trajectory_prefix, saveable_topology, subset_indices = save_indices)


    #make an integrator
    integrator = Integrator(**integrator_kwargs)

    #make a propagator
    if direction == 'forward':
        propagator = Propagator(openmm_pdf_state = pdf_state,
                     openmm_pdf_state_subset = pdf_state_subset,
                     subset_indices_map = subset_indices_map,
                     integrator = integrator,
                     ani_handler = ani_handler,
                     context_cache=None,
                     reassign_velocities=True,
                     n_restart_attempts=0,
                     reporter = reporter,
                     write_trajectory_interval = write_trajectory_interval)
    else:
        propagator = BackwardPropagator(openmm_pdf_state = pdf_state,
                     openmm_pdf_state_subset = pdf_state_subset,
                     subset_indices_map = subset_indices_map,
                     integrator = integrator,
                     ani_handler = ani_handler,
                     context_cache=None,
                     reassign_velocities=True,
                     n_restart_attempts=0,
                     reporter = reporter,
                     write_trajectory_interval = write_trajectory_interval)



    particle = Particle(0)
    particle_state = OpenMMParticleState(positions = positions, box_vectors = box_vectors)
    particle.update_state(particle_state)
    particle_state, _return_dict = propagator.apply(particle_state, n_steps = steps_per_application, reset_integrator=True, apply_pdf_to_context=True)
    if box_vectors is None:
        particle_state.box_vectors=None


    return particle_state, np.array(propagator.state_works[0])
Exemplo n.º 9
0
    def __init__(self,
                 openmm_pdf_state,
                 openmm_pdf_state_subset,
                 subset_indices_map,
                 integrator,
                 ani_handler,
                 context_cache=None,
                 reassign_velocities=True,
                 n_restart_attempts=0,
                 reporter=None,
                 write_trajectory_interval = 1,
                 **kwargs):
        """
        arguments
            openmm_pdf_state : openmmtools.states.ThermodynamicState
                the pdf state of the propagator
            openmm_pdf_state_subset : openmmtools.states.ThermodynamicState
                the pdf state of the atom subset
            subset_indices_map : dict
                dict of {openmm_pdf_state atom_index : openmm_pdf_state_subset atom index}
            integrator : openmm.Integrator
                integrator of dynamics
            ani_handler : ANI1_force_and_energy
                handler for ani forces and potential energy
            context_cache : openmmtools.cache.ContextCache, optional default:None
                The ContextCache to use for Context creation. If None, the global cache
                openmmtools.cache.global_context_cache is used.
            reassign_velocities : bool, optional default:False
                If True, the velocities will be reassigned from the Maxwell-Boltzmann
                distribution at the beginning of the move.
            n_restart_attempts : int, optional default:0
                When greater than 0, if after the integration there are NaNs in energies,
                the move will restart. When the integrator has a random component, this
                may help recovering. On the last attempt, the ``Context`` is
                re-initialized in a slower process, but better than the simulation
                crashing. An IntegratorMoveError is raised after the given number of
                attempts if there are still NaNs.
            reporter : coddiwomple.openmm.reporter.OpenMMReporter, default None
                a reporter object to write trajectories
            write_trajectory_interval : int
                frequency of writing trajectory
        """
        super().__init__(openmm_pdf_state,
                 integrator,
                 context_cache,
                 reassign_velocities,
                 n_restart_attempts)
        #create a pdf state for the subset indices (usually a vacuum system)
        self.pdf_state_subset = openmm_pdf_state_subset
        assert self.pdf_state_subset.temperature == self.pdf_state.temperature, f"the temperatures of the pdf states do not match"

        #create a dictionary for subset indices
        self._subset_indices_map = subset_indices_map

        #create an ani handler attribute that can be referenced
        self.ani_handler = ani_handler

        #create a context for the subset atoms that can be referenced
        self.context_subset, _ = cache.global_context_cache.get_context(self.pdf_state_subset)

        #create a reporter for the accumulated works
        self._state_works = {}
        self._state_works_counter = 0

        #create a reporter
        self._write_trajectory = False if reporter is None else True
        self.reporter=reporter
        if self._write_trajectory:
            from coddiwomple.particles import Particle
            self.particle = Particle(0)
            self.write_trajectory_interval=write_trajectory_interval
        else:
            self.particle = None
            self.write_trajectory_interval=None
Exemplo n.º 10
0
class Propagator(OMMBIP):
    """
    Propagator pseudocode:
    Step 1: initialization--
        set iteration = 0, n_iterations = n_iterations, lambda  = 0 (i.e. iteration / n_iterations); work_accumulated = 0.0
        generate sample x_0 ~ e^(-p(x))
        evaluate work_incremental = 0 (i.e. u_mm(x_0) - g(x_0), but we presume that g = u_mm(.))
        work_accumulated <- work_accumulated + work_incremental
        x' = x_0
    Step 2: sampling
        for increment in range(n_iterations):
            x = x'
            ante_perturbation_potential =  (1 - lambda) * u_mm(x) + lambda * u_ani_mm_mix(x)
            set iteration <- iteration + 1.0; lambda <- iteration / n_iterations
            evaluate work_incremental = [(1 - lambda) * u_mm(x) + lambda * u_ani_mm_mix(x)] - ante_perturbation_potential
            work_accumulated <- work_accumulated + work_incremental
            create a modified force: modified_f = (1 - lambda) * f_mm + lambda * f_ani_mm_mix (where f_. = -grad(u_.) )
            x' =  V R O R (where V deterministic update is according to modified_f defined above) w.r.t x

    NOTE: in this regime, the last x' is propagated w.r.t. a propagator whose invariant distribution respects u_ani_mm_mix;
    this should _not_ be the case.  There should be an exception in the Step 2 for loop that breaks once the final work_incremental is computed and updated
    to the work_accumulated. Regardless, the distribution of accumulated works is unaffected by this 'bug'; only expectations (as a function of x) w.r.t. these
    weights may be affected.

    See: 3.1.1. of https://www.stats.ox.ac.uk/~doucet/delmoral_doucet_jasra_sequentialmontecarlosamplersJRSSB.pdf (esp. Remark 1.)




    """
    def __init__(self,
                 openmm_pdf_state,
                 openmm_pdf_state_subset,
                 subset_indices_map,
                 integrator,
                 ani_handler,
                 context_cache=None,
                 reassign_velocities=True,
                 n_restart_attempts=0,
                 reporter=None,
                 write_trajectory_interval = 1,
                 **kwargs):
        """
        arguments
            openmm_pdf_state : openmmtools.states.ThermodynamicState
                the pdf state of the propagator
            openmm_pdf_state_subset : openmmtools.states.ThermodynamicState
                the pdf state of the atom subset
            subset_indices_map : dict
                dict of {openmm_pdf_state atom_index : openmm_pdf_state_subset atom index}
            integrator : openmm.Integrator
                integrator of dynamics
            ani_handler : ANI1_force_and_energy
                handler for ani forces and potential energy
            context_cache : openmmtools.cache.ContextCache, optional default:None
                The ContextCache to use for Context creation. If None, the global cache
                openmmtools.cache.global_context_cache is used.
            reassign_velocities : bool, optional default:False
                If True, the velocities will be reassigned from the Maxwell-Boltzmann
                distribution at the beginning of the move.
            n_restart_attempts : int, optional default:0
                When greater than 0, if after the integration there are NaNs in energies,
                the move will restart. When the integrator has a random component, this
                may help recovering. On the last attempt, the ``Context`` is
                re-initialized in a slower process, but better than the simulation
                crashing. An IntegratorMoveError is raised after the given number of
                attempts if there are still NaNs.
            reporter : coddiwomple.openmm.reporter.OpenMMReporter, default None
                a reporter object to write trajectories
            write_trajectory_interval : int
                frequency of writing trajectory
        """
        super().__init__(openmm_pdf_state,
                 integrator,
                 context_cache,
                 reassign_velocities,
                 n_restart_attempts)
        #create a pdf state for the subset indices (usually a vacuum system)
        self.pdf_state_subset = openmm_pdf_state_subset
        assert self.pdf_state_subset.temperature == self.pdf_state.temperature, f"the temperatures of the pdf states do not match"

        #create a dictionary for subset indices
        self._subset_indices_map = subset_indices_map

        #create an ani handler attribute that can be referenced
        self.ani_handler = ani_handler

        #create a context for the subset atoms that can be referenced
        self.context_subset, _ = cache.global_context_cache.get_context(self.pdf_state_subset)

        #create a reporter for the accumulated works
        self._state_works = {}
        self._state_works_counter = 0

        #create a reporter
        self._write_trajectory = False if reporter is None else True
        self.reporter=reporter
        if self._write_trajectory:
            from coddiwomple.particles import Particle
            self.particle = Particle(0)
            self.write_trajectory_interval=write_trajectory_interval
        else:
            self.particle = None
            self.write_trajectory_interval=None

    def _initialize_state_works(self):
        """
        initialize an empty list and add 0.0 to it (state works)
        """
        self._current_state_works = [] #define an interim (auxiliary) list that will track the thermodynamic work of the current application
        self._current_state_works.append(0.0) #the first incremental work is always 0 since the importance function is identical to the first target distribution (i.e. fully interacting MM)

    def _initialize_iterations(self, n_iterations):
        """
        initialize the iteration counter
        """
        self._iteration = 0.0 #define the first iteration as 0
        self._n_iterations = n_iterations #the number of iterations in the protocol is equal to the number of steps in the application

    def _update_particle_state_substate(self, particle_state, new_state_subset=False):
        """
        update the particle state from the context, create a particle substate and update from context
        """
        #update the particle state and the particle state subset
        particle_state.update_from_context(self.context, ignore_velocities=True) #update the particle state from the context
        if new_state_subset:
            self.particle_state_subset = SamplerState(positions = particle_state.positions[list(self._subset_indices_map.keys())]) #create a particle state from the subset context
        else:
            self.particle_state_subset.positions = particle_state.positions[list(self._subset_indices_map.keys())] #update the particle subset positions appropriately
        self.particle_state_subset.apply_to_context(self.context_subset, ignore_velocities=True) #apply the subset particle state to its context
        self.particle_state_subset.update_from_context(self.context_subset, ignore_velocities=True) #update the subset particle state from its context to updated the potential energy

    def _update_current_state_works(self, particle_state):
        """
        update the current state and associated works
        """
        #get the reduced potential
        reduced_potential = self._compute_hybrid_potential(_lambda = self._iteration / self._n_iterations, particle_state = particle_state)
        perturbed_reduced_potential = self._compute_hybrid_potential(_lambda = (self._iteration + 1.0) / self._n_iterations, particle_state = particle_state)
        self._current_state_works.append(self._current_state_works[-1] + (perturbed_reduced_potential - reduced_potential))

    def _update_force(self, particle_state):
        """
        update the force
        """
        mm_force_matrix = self._compute_hybrid_forces(_lambda = (self._iteration + 1.0) / self._n_iterations, particle_state = particle_state).value_in_unit_system(unit.md_unit_system)
        self.integrator.setPerDofVariableByName('modified_force', mm_force_matrix)



    def _before_integration(self, *args, **kwargs):
        particle_state = args[0] #define the particle state
        n_iterations = args[1] #define the number of iterations

        self._initialize_state_works()
        self._initialize_iterations(n_iterations)

        #update the particle state and the particle state subset
        self._update_particle_state_substate(particle_state, new_state_subset=True)

        self._update_current_state_works(particle_state)

        self._update_force(particle_state)

        #report
        if self._write_trajectory: # the first state is always saved for processing purposes
            self.particle.update_state(particle_state)
            self.reporter.record([self.particle])


    def _during_integration(self, *args, **kwargs):
        particle_state = args[0]
        self._iteration += 1.0

        self._update_particle_state_substate(particle_state)

        #get the reduced potential
        if self._iteration < self._n_iterations:
            self._update_current_state_works(particle_state)
            self._update_force(particle_state)
        else:
            #we are done
            pass

        if self._write_trajectory and int(self._iteration) % self.write_trajectory_interval == 0:
            self.particle.update_state(particle_state)
            if self._iteration == self._n_iterations:
                self.reporter.record([self.particle], save_to_disk=True)
            else:
                self.reporter.record([self.particle], save_to_disk=False)



    def _after_integration(self, *args, **kwargs):
        self._state_works[self._state_works_counter] = deepcopy(self._current_state_works)
        self._state_works_counter += 1

        if self._write_trajectory:
            self.reporter.reset()
        #self._log_context_parameters()


    def _compute_hybrid_potential(self,_lambda, particle_state):
        """
        function to compute the hybrid reduced potential defined as follows:
        U(x_rec, x_lig) = u_mm,rec(x_rec) - lambda*u_mm,lig(x_lig) + lambda*u_ani,lig(x_lig)
        """
        reduced_potential = (self.pdf_state.reduced_potential(particle_state)
                             - _lambda * self.pdf_state_subset.reduced_potential(self.particle_state_subset)
                             + _lambda * self.ani_handler.calculate_energy(self.particle_state_subset.positions) * self.pdf_state.beta)
        return reduced_potential

    def _compute_hybrid_forces(self, _lambda, particle_state):
        """
        function to compute a hybrid force matrix of shape num_particles x 3
        in the spirit of the _compute_hybrid_potential, we compute the forces in the following way
            F(x_rec, x_lig) = F_mm(x_rec, x_lig) - lambda * F_mm(x_lig) + lambda * F_ani(x_lig)
        """
        # get the complex mm forces
        state = self.context.getState(getForces=True)
        mm_force_matrix = state.getForces(asNumpy=True) # returns forces in kJ/(nm mol)

        # get the ligand mm forces
        subset_state = self.context_subset.getState(getForces=True)
        mm_force_matrix_subset = subset_state.getForces(asNumpy=True)

        # get the ligand ani forces
        coords = self.particle_state_subset.positions
        subset_ani_force_matrix, energie = self.ani_handler.calculate_force(coords) # returns force in kJ/(A mol)
        #print(f"ani force matrix head: ",subset_ani_force_matrix[0])

        # now combine the ligand forces
        subset_force_matrix = _lambda * (subset_ani_force_matrix - mm_force_matrix_subset) #we are adding two Quantities with different units, but they are compatible
        #print(f"mm subset force matrix head", mm_force_matrix_subset[0])

        # and append to the complex forces...
        #print(f"mm force matrix head", mm_force_matrix[0])
        mm_force_matrix[list(self._subset_indices_map.keys()), :] += subset_force_matrix #and same, here...
        #print(f"mm force matrix head (after ani modification)", mm_force_matrix[0])

        return mm_force_matrix

    def _get_context_subset_parameters(self):
        """
        return a dictionary of the self.context_subset's parameters

        returns
            context_parameters : dict
            {parameter name <str> : parameter value value <float>}
        """
        swig_parameters = self.context_subset.getParameters()
        context_parameters = {q: swig_parameters[q] for q in swig_parameters}
        return context_parameters

    def _log_context_parameters(self):
        """
        log the context and context subset parameters
        """
        context_parameters = self._get_context_parameters()
        context_subset_parameters = self._get_context_subset_parameters()
        _logger.debug(f"\tcontext_parameters during integration:")
        for key, val in context_parameters.items():
            _logger.debug(f"\t\t{key}: {val}")

        _logger.debug(f"\tcontext subset parameters during integration:")
        for key, val in context_subset_parameters:
            _logger.debug(f"\t\t{key}: {val}")

    @property
    def state_works(self):
        return self._state_works
Exemplo n.º 11
0
def endstate_equilibration(system,
                           endstate_positions,
                           box_vectors,
                           directory_name,
                           trajectory_prefix,
                           md_topology,
                           number_of_applications,
                           steps_per_application,
                           endstate_parameters = 0.0,
                           alchemical_composability = RelativeAlchemicalState):
    """
    conduct an endstate equilibration with pdf_state parameters defined by `endstate_parameters` with decorrelation
    that will be written to disk

    arguments
        system : openmm.System
            parameterizable system
        endstate_positions : np.ndarray(N,3) * unit.nanometers (or length units)
            starting positions that will be minimized and simulated
        box_vectors : np.ndarray(3,3) * unit.nanometers (or length units)
            starting box vectors
        directory_name : str
            directory that will be written to
        trajectory_prefix : str
            .pdb prefix
        md_topology : mdtraj.Topology
            topology that will write the trajectory
        number_of_applications : int
            number of applications of the propagator
        steps_per_application : int
            number of integration steps per application
        endstate_parameters : float
            endstate parameters
        alchemical_composability : openmmtools.alchemy.AlchemicalState
            composer for alchemical composability creation
    """
    from perses.dispersed.feptasks import minimize
    from perses.dispersed.utils import compute_timeseries
    import mdtraj.utils as mdtrajutils

    #determine pressure
    forces = {type(force).__name__: force for force in system.getForces()}
    if "MonteCarloBarostat" in list(forces.keys()):
        pressure = 1.0 * unit.atmosphere
    else:
        pressure = None
    print(f"pressure: {pressure}")

    particle_state = OpenMMParticleState(positions = endstate_positions,
                                         box_vectors =  np.array(system.getDefaultPeriodicBoxVectors()),
                                        )
    pdf_state = OpenMMPDFState(system = system,
                               alchemical_composability = alchemical_composability,
                               pressure=pressure)

    #set the pdf_state endstate parameters
    pdf_state_parameters = pdf_state.get_parameters()
    reset_pdf_state_parameters = {key: endstate_parameters for key in pdf_state_parameters.keys()}
    pdf_state.set_parameters(reset_pdf_state_parameters)

    langevin_integrator = OMMLI(temperature = pdf_state.temperature,
                                timestep = 2.0 * unit.femtoseconds)

    endstate_propagator = OMMBIP(openmm_pdf_state = pdf_state,
                                 integrator = langevin_integrator)

    reporter = OpenMMReporter(directory_name, trajectory_prefix, md_topology = md_topology)

    particle = Particle(0)
    particle.update_state(particle_state)
    reporter.record([particle])

    minimize(endstate_propagator.pdf_state, particle_state)

    potentials = []
    for i in tqdm.trange(number_of_applications):
        particle_state, _return_dict = endstate_propagator.apply(particle_state, n_steps = steps_per_application)
        reporter.record([particle])
        potentials.append(_return_dict['new_pe'])


    [t0, g, Neff_max, A_t, uncorrelated_indices] = compute_timeseries(np.array(potentials))
    print(f"t0: {t0}, g: {g}, Neff_max: {Neff_max}, A_t: {A_t}, uncorrelated_indices:{uncorrelated_indices}")
    particle_hex = hex(id(particle))
    reporter.hex_dict[particle_hex] = [[reporter.hex_dict[particle_hex][0][q] for q in uncorrelated_indices],
                                       [reporter.hex_dict[particle_hex][1][q] for q in uncorrelated_indices],
                                       [reporter.hex_dict[particle_hex][2][q] for q in uncorrelated_indices]]

    try:
        reporter._write_trajectory(particle_hex, f"{reporter.neq_traj_filename}.{format(reporter.hex_to_index[particle_hex], '04')}.pdb")
    except Exception as e:
        print(e)
Exemplo n.º 12
0
def mcmc_smc_resampler(system,
                       endstate_cache_filename,
                       directory_name,
                       trajectory_prefix,
                       md_topology,
                       number_of_particles,
                       parameter_sequence,
                       observable = nESS,
                       threshold = vanilla_floor_threshold,
                      alchemical_composability = RelativeAlchemicalState,
                      integrator_kwargs = {'temperature': 300.0 * unit.kelvin,
                                                      'collision_rate': 1.0 / unit.picoseconds,
                                                      'timestep': 2.0 * unit.femtoseconds,
                                                      'splitting': "V R O R V",
                                                      'constraint_tolerance': 1e-6},
                      record_state_work_interval = 1,
                      trajectory_write_interval = 1):
    """
    Conduct a single-direction annealing protocol in the AIS regime (wherein particle incremental weights are computed according to Eq. 31 of https://www.stats.ox.ac.uk/~doucet/delmoral_doucet_jasra_sequentialmontecarlosamplersJRSSB.pdf)
    with resampling.

    arguments
        system : openmm.System
            parameterizable system
        endstate_cache_filename : str
            path to the endstate cache pdb
        directory_name : str
            directory that will be written to
        trajectory_prefix : str
            .pdb prefix
        md_topology : mdtraj.Topology
            topology that will write the trajectory
        number_of_particles : int
            number of particles that will be used
        parameter_sequence : list of dict
            list of dictionary of parameters to be set
        observable : function
            function to compute observables on particles
        threshold : function
            function to compute a threshold on an observable
        alchemical_composability : openmmtools.alchemy.AlchemicalState
            composer for alchemical composability creation
        integrator_kwargs : dict, see default
            kwargs to pass to OMMLIAIS integrator
    """
    pressure = handle_pressure(system)
    print(f"pressure: {pressure}")

    traj = md.Trajectory.load(endstate_cache_filename)
    num_frames = traj.n_frames
    proposal_pdf_state = OpenMMPDFState(system = system, alchemical_composability = alchemical_composability, pressure=pressure)
    target_pdf_state = OpenMMPDFState(system = system, alchemical_composability = alchemical_composability, pressure = pressure)

    reporter = OpenMMReporter(directory_name, trajectory_prefix, md_topology = md_topology)

    integrator = OMMLI(**integrator_kwargs)
    propagator = OMMBIP(proposal_pdf_state, integrator)
    proposal_factory = ProposalFactory(parameter_sequence, propagator)
    target_factory = TargetFactory(target_pdf_state, parameter_sequence, termination_parameters = None)

    resampler = MultinomialResampler()

    particles = [Particle(index = idx) for idx in range(number_of_particles)]

    for idx in range(number_of_particles):
        random_frame = np.random.choice(range(num_frames))
        positions = traj.xyz[random_frame] * unit.nanometers
        box_vectors = traj.unitcell_vectors[random_frame]*unit.nanometers
        particle_state = OpenMMParticleState(positions=positions, box_vectors = box_vectors)
        proposal_factory.equip_initial_sample(particle = particles[idx],
                                          initial_particle_state = particle_state,
                                          generation_pdf = None)
        reporter.record(particles)

    while True: #be careful...
        try:
            [particle.update_iteration() for particle in particles]#update the iteration
            incremental_works = np.array([target_factory.compute_incremental_work(particle, neglect_proposal_work = True) for particle in particles]) #compute incremental works
            randomize_velocities = True if particles[0].iteration == 1 else False #whether to randomize velocities in the first iteration
            [proposal_factory.propagate(particle, randomize_velocities=randomize_velocities, apply_pdf_to_context=True) for particle in particles] #propagate particles (be sure to save to context)
            resampler.resample(particles, incremental_works, observable = observable, threshold = threshold, update_particle_indices = True) #attempt to resample particles

            #ask to terminate
            if all(target_factory.terminate(particle) for particle in particles):
                reporter.record(particles, save_to_disk=True)
                break
            else:
                #update the auxiliary works...
                [particle.zero_auxiliary_work() for particle in particles]
                [particle.update_auxiliary_work(-target_factory.pdf_state.reduced_potential(particle.state)) for particle in particles]
                reporter.record(particles)
        except Exception as e:
            print(f"error raised in iteration {particles[0].iteration}: {e}")
            break

    return particles
Exemplo n.º 13
0
def annealed_importance_sampling(system,
                                 endstate_cache_filename,
                                 directory_name,
                                 trajectory_prefix,
                                 md_topology,
                                 alchemical_functions,
                                 number_of_applications,
                                 steps_per_application,
                                 endstate_parameters,
                                 alchemical_composability = RelativeAlchemicalState,
                                 integrator_kwargs = {'temperature': 300.0 * unit.kelvin,
                                                      'collision_rate': 1.0 / unit.picoseconds,
                                                      'timestep': 2.0 * unit.femtoseconds,
                                                      'splitting': "P I H N S V R O R V",
                                                      'constraint_tolerance': 1e-6},
                                record_state_work_interval = 1,
                                trajectory_write_interval = 1,
                                ):
    """
    conduct annealed importance sampling in the openmm regime

    arguments
        system : openmm.System
            parameterizable system
        endstate_cache_filename : str
            path to the endstate cache pdb
        directory_name : str
            directory that will be written to
        trajectory_prefix : str
            .pdb prefix
        md_topology : mdtraj.Topology
            topology that will write the trajectory
        alchemical_functions : dict
            {pdf_parameter <str>: function <str>, lepton-readable}
        number_of_applications : int
            number of applications of the propagator
        steps_per_application : int
            number of integration steps per application
        endstate_parameters : float
            endstate parameters
        alchemical_composability : openmmtools.alchemy.AlchemicalState
            composer for alchemical composability creation
        integrator_kwargs : dict, see default
            kwargs to pass to OMMLIAIS integrator
    """
    #determine pressure
    pressure = handle_pressure(system)

    print(f"pressure: {pressure}")

    traj = md.Trajectory.load(endstate_cache_filename)

    num_frames = traj.n_frames
    pdf_state = OpenMMPDFState(system = system, alchemical_composability = alchemical_composability, pressure=pressure)

    #set the pdf_state endstate parameters
    pdf_state_parameters = pdf_state.get_parameters()
    reset_pdf_state_parameters = {key: endstate_parameters for key in pdf_state_parameters.keys()}
    pdf_state.set_parameters(reset_pdf_state_parameters)

    reporter = OpenMMReporter(directory_name, trajectory_prefix, md_topology = md_topology)

    ais_integrator = OMMLIAIS(
                             alchemical_functions,
                             steps_per_application,
                             **integrator_kwargs)

    ais_propagator = OMMAISPR(openmm_pdf_state = pdf_state,
                              integrator = ais_integrator,
                              record_state_work_interval = record_state_work_interval,
                              reporter = reporter,
                              trajectory_write_interval = trajectory_write_interval,
                              context_cache=None,
                              reassign_velocities=True,
                              n_restart_attempts=0)

    frames = np.random.choice(range(num_frames), number_of_applications)

    particle = Particle(0)

    for i in tqdm.trange(number_of_applications):
        pdf_state.set_parameters(reset_pdf_state_parameters)
        particle_state = OpenMMParticleState(positions = traj.xyz[frames[i]] * unit.nanometers, box_vectors = traj.unitcell_vectors[frames[i]]*unit.nanometers)
        particle.update_state(particle_state)
        try:
            _, _return_dict = ais_propagator.apply(particle_state, n_steps = steps_per_application, reset_integrator=True, apply_pdf_to_context=True)
        except Exception as e:
            print(e)


    return ais_propagator.state_works
Exemplo n.º 14
0
def test_OMMLI():
    """
    test OMMLI (OpenMMLangevinIntegrator) in the baoab regime on the harmonic test system;
    Specifically, we run MD to convergence and assert that the potential energy of the system and the standard
    deviation thereof is within a specified threshold.
    We also check the accumulation of shadow, proposal works, as well as the ability to reset, initialize, and subsume the integrator into an OMMBIP propagator
    """
    from coddiwomple.openmm.propagators import OMMBIP
    from coddiwomple.openmm.integrators import OMMLI
    import tqdm

    temperature = 300 * unit.kelvin
    pressure = None
    from coddiwomple.openmm.states import OpenMMPDFState, OpenMMParticleState
    from coddiwomple.particles import Particle

    #create the default get_harmonic_testsystem
    testsystem, period, collision_rate, timestep, alchemical_functions = get_harmonic_testsystem(temperature = temperature)

    particle_state = OpenMMParticleState(positions = testsystem.positions) #make a particle state
    particle = Particle(index = 0, record_state=False, iteration = 0)
    particle.update_state(particle_state)

    num_applications = 100

    #create a pdf_state
    pdf_state = OpenMMPDFState(system = testsystem.system, alchemical_composability = HarmonicAlchemicalState, temperature = temperature, pressure = pressure)

    #create an integrator
    integrator = OMMLI(temperature=temperature, collision_rate=collision_rate, timestep=timestep)

    #create a propagator
    propagator = OMMBIP(openmm_pdf_state = pdf_state, integrator = integrator)

    #expected reduced potential
    mean_reduced_potential = testsystem.get_potential_expectation(pdf_state) * pdf_state.beta
    std_dev_reduced_potential = testsystem.get_potential_standard_deviation(pdf_state) * pdf_state.beta

    reduced_pe = []

    #some sanity checks for propagator:
    global_integrator_variables_before_integration = propagator._get_global_integrator_variables()
    print(f"starting integrator variables: {global_integrator_variables_before_integration}")

    #some sanity checks for integrator:
    start_proposal_work = propagator.integrator._get_energy_with_units('proposal_work', dimensionless=True)
    start_shadow_work = propagator.integrator._get_energy_with_units('shadow_work', dimensionless=True)
    assert start_proposal_work == global_integrator_variables_before_integration['proposal_work']
    assert start_shadow_work == global_integrator_variables_before_integration['shadow_work']

    for app_num in tqdm.trange(num_applications):
        particle_state, proposal_work = propagator.apply(particle_state, n_steps=20, reset_integrator=False, apply_pdf_to_context=False, randomize_velocities=True)
        assert proposal_work==0. #this must be the case since we did not pass a 'returnable_key'

        #sanity checks for inter-application methods
        assert propagator.integrator._get_energy_with_units('proposal_work', dimensionless=True) != 0. #this cannot be zero after a step of MD without resets
        assert propagator.integrator._get_energy_with_units('shadow_work', dimensionless=True) != 0. #this cannot be zero after a step of MD without resets
        reduced_pe.append(pdf_state.reduced_potential(particle_state))


    tol=6 * std_dev_reduced_potential
    calc_mean_reduced_pe = np.mean(reduced_pe)
    calc_stddev_reduced_pe = np.std(reduced_pe)
    assert calc_mean_reduced_pe < mean_reduced_potential + tol and calc_mean_reduced_pe > mean_reduced_potential - tol, f"the mean reduced energy and standard deviation ({calc_mean_reduced_pe}, {calc_stddev_reduced_pe}) is outside the tolerance \
        of a theoretical mean potential energy of {mean_reduced_potential} +/- {tol}"
    print(f"the mean reduced energy/standard deviation is {calc_mean_reduced_pe, calc_stddev_reduced_pe} and the theoretical mean reduced energy and stddev are {mean_reduced_potential}")

    #some cleanup of the integrator
    propagator.integrator.reset() #this should reset proposal, shadow, and ghmc staticstics (we omit ghmc stats)
    assert propagator.integrator._get_energy_with_units('proposal_work', dimensionless=True) == 0. #this should be zero after a reset
    assert propagator.integrator._get_energy_with_units('shadow_work', dimensionless=True) == 0. #this should be zero after a reset