def _runtest(self, num_walkers, num_cycles, dimension, debug_prints=False): print("Random walk simulation with: ") print("Dimension =", dimension) print("Probability =", self.probability) print("Number of Walkers", num_walkers) print("Number of Cycles", num_cycles) # set up initial state for walkers positions = np.zeros((1, dimension)) init_state = WalkerState(positions=positions, time=0.0) # create list of init_walkers initial_weight = 1 / num_walkers init_walkers = [] # init_walkers, n_cycles = get_final_state(path, num_walkers) init_walkers = [ Walker(init_state, initial_weight) for i in range(num_walkers) ] # set up raunner for system runner = RandomWalkRunner(dimension=dimension, probability=self.probability) units = dict(UNIT_NAMES) # instantiate a revo unbindingboudaryconditiobs segment_length = 10 # set up the reporter randomwalk_system_top_json = self.generate_topology() hdf5_reporter = WepyHDF5Reporter(self.hdf5_reporter_path, mode='w', save_fields=SAVE_FIELDS, topology=randomwalk_system_top_json, resampler=self.resampler, units=dict(UNITS), n_dims=dimension) # running the simulation sim_manager = Manager(init_walkers, runner=runner, resampler=self.resampler, work_mapper=Mapper(), reporters=[hdf5_reporter]) # run a simulation with the manager for n_steps cycles of length 1000 each steps = [segment_length for i in range(num_cycles)] print("Start simulation") sim_manager.run_simulation(num_cycles, steps, debug_prints=debug_prints) print("Finished Simulation")
ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE, initial_state=init_state, topology=json_str_top, ligand_idxs=np.array(test_sys.ligand_indices), receptor_idxs=np.array(test_sys.receptor_indices)) ## Reporters # make a dictionary of units for adding to the HDF5 units = dict(UNIT_NAMES) # open it in truncate mode first, then switch after first run hdf5_reporter = WepyHDF5Reporter(file_path=hdf5_path, mode='w', save_fields=SAVE_FIELDS, resampler=resampler, boundary_conditions=ubc, topology=json_str_top, units=units) dashboard_reporter = WExploreDashboardReporter( file_path='./outputs/wepy.dash.txt', mode='w', step_time=STEP_SIZE.value_in_unit(unit.second), max_n_regions=resampler.max_n_regions, max_region_sizes=resampler.max_region_sizes, bc_cutoff_distance=ubc.cutoff_distance) reporters = [hdf5_reporter, dashboard_reporter] ## Work Mapper
def main(n_runs, n_cycles, steps, n_walkers, n_workers=1, debug_prints=False, seed=None): ## Load objects needed for various purposes # load a json string of the topology with open(json_top_path, mode='r') as rf: sEH_TPPU_system_top_json = rf.read() # an openmm.State object for setting the initial walkers up with open(omm_state_path, mode='rb') as rf: omm_state = pickle.load(rf) ## set up the OpenMM Runner # load the psf which is needed for making a system in OpenMM with # CHARMM force fields psf = omma.CharmmPsfFile(charmm_psf_path) # set the box size lengths and angles lengths = [CUBE_LENGTH for i in range(3)] angles = [CUBE_ANGLE for i in range(3)] psf.setBox(*lengths, *angles) # charmm forcefields parameters params = omma.CharmmParameterSet(*charmm_param_paths) # create a system using the topology method giving it a topology and # the method for calculation system = psf.createSystem(params, nonbondedMethod=omma.CutoffPeriodic, nonbondedCutoff=NONBONDED_CUTOFF, constraints=omma.HBonds) # make this a constant temperature and pressure simulation at 1.0 # atm, 300 K, with volume move attempts every 50 steps barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE, VOLUME_MOVE_FREQ) # add it as a "Force" to the system system.addForce(barostat) # make an integrator object that is constant temperature integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE) # set up the OpenMMRunner with the system runner = OpenMMRunner(system, psf.topology, integrator, platform=PLATFORM) # the initial state, which is used as reference for many things init_state = OpenMMState(omm_state) ## Make the distance Metric # load the crystal structure coordinates crystal_traj = mdj.load_pdb(pdb_path) # get the atoms in the binding site according to the crystal structure bs_idxs = binding_site_atoms(crystal_traj.top, LIG_RESID, crystal_traj.xyz[0]) lig_idxs = ligand_idxs(crystal_traj.top, LIG_RESID) prot_idxs = protein_idxs(crystal_traj.top) # make the distance metric with the ligand and binding site # indices for selecting atoms for the image and for doing the # alignments to only the binding site. All images will be aligned # to the reference initial state unb_distance = UnbindingDistance(lig_idxs, bs_idxs, init_state) ## Make the resampler # make a Wexplore resampler with default parameters and our # distance metric resampler = WExploreResampler(distance=unb_distance, init_state=init_state, max_n_regions=MAX_N_REGIONS, max_region_sizes=MAX_REGION_SIZES, pmin=PMIN, pmax=PMAX) ## Make the Boundary Conditions # makes ref_traj and selects lingand_atom and protein atom indices # instantiate a revo unbindingboudaryconditiobs ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE, initial_state=init_state, topology=crystal_traj.topology, ligand_idxs=lig_idxs, receptor_idxs=prot_idxs) ## make the reporters # WepyHDF5 # make a dictionary of units for adding to the HDF5 # open it in truncate mode first, then switch after first run hdf5_reporter = WepyHDF5Reporter( hdf5_path, mode='w', # the fields of the State that will be saved in the HDF5 file save_fields=SAVE_FIELDS, # the topology in a JSON format topology=sEH_TPPU_system_top_json, # the resampler and boundary # conditions for getting data # types and shapes for saving resampler=resampler, boundary_conditions=ubc, # the units to save the fields in units=dict(UNITS), # sparse (in time) fields sparse_fields=dict(SPARSE_FIELDS), # sparse atoms fields main_rep_idxs=np.concatenate((lig_idxs, prot_idxs)), all_atoms_rep_freq=ALL_ATOMS_SAVE_FREQ) dashboard_reporter = WExploreDashboardReporter( dashboard_path, mode='w', step_time=STEP_SIZE.value_in_unit(unit.second), max_n_regions=resampler.max_n_regions, max_region_sizes=resampler.max_region_sizes, bc_cutoff_distance=ubc.cutoff_distance) setup_reporter = SetupReporter(setup_state_path, mode='w') restart_reporter = RestartReporter(restart_state_path, mode='w') reporters = [ hdf5_reporter, dashboard_reporter, setup_reporter, restart_reporter ] ## The work mapper # we use a mapper that uses GPUs work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker, num_workers=n_workers) ## Combine all these parts and setup the simulation manager # set up parameters for running the simulation # initial weights init_weight = 1.0 / n_walkers # a list of the initial walkers init_walkers = [ Walker(OpenMMState(omm_state), init_weight) for i in range(n_walkers) ] # Instantiate a simulation manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=ubc, work_mapper=work_mapper, reporters=reporters) ### RUN the simulation for run_idx in range(n_runs): print("Starting run: {}".format(run_idx)) sim_manager.run_simulation(n_cycles, steps, debug_prints=True) print("Finished run: {}".format(run_idx))
hdf5_path = osp.join(outputs_dir, hdf5_filename) print("Number of steps: {}".format(n_steps)) print("Number of cycles: {}".format(n_cycles)) # # create the initial walkers init_weight = 1.0 / n_walkers init_walkers = [ Walker(OpenMMState(omm_states[i], activity=np.array([0.])), init_weight) for i in range(n_walkers) ] hdf5_reporter = WepyHDF5Reporter( file_path=hdf5_path, mode='w', # save_fields set to None saves everything save_fields=None, resampler=resampler, boundary_conditions=None, topology=json_top, units=units) reporters = [hdf5_reporter] sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=None, work_mapper=mapper, reporters=reporters) # make a number of steps for each cycle. In principle it could be # different each cycle
def _run(self, num_runs, num_cycles, num_walkers): """Runs a random walk simulation. Parameters ---------- num_runs: int The number independet simulations. num_cycles: int The number of cycles that will be run in the simulation. num_walkers: int The number of walkers. """ print("Random walk simulation with: ") print("Dimension = {}".format(self.dimension)) print("Probability = {}".format(self.probability)) print("Number of Walkers = {}".format(num_walkers)) print("Number of Cycles ={}".format(num_cycles)) # set up initial state for walkers positions = np.zeros((1, self.dimension)) init_state = WalkerState(positions=positions, time=0.0) # create list of init_walkers initial_weight = 1 / num_walkers init_walkers = [] init_walkers = [ Walker(init_state, initial_weight) for i in range(num_walkers) ] # set up raunner for system runner = RandomWalkRunner(probability=self.probability) units = dict(UNIT_NAMES) # instantiate a revo unbindingboudaryconditiobs segment_length = 10 # set up the reporter randomwalk_system_top_json = self.generate_topology() hdf5_reporter = WepyHDF5Reporter(file_path=self.hdf5_filename, mode='w', save_fields=SAVE_FIELDS, topology=randomwalk_system_top_json, resampler=self.resampler, units=dict(UNITS), n_dims=self.dimension) # running the simulation sim_manager = Manager(init_walkers, runner=runner, resampler=self.resampler, work_mapper=Mapper(), reporters=[hdf5_reporter]) # run a simulation with the manager for n_steps cycles of length 1000 each steps = [segment_length for i in range(num_cycles)] ### RUN the simulation for run_idx in range(num_runs): print("Starting run: {}".format(run_idx)) sim_manager.run_simulation(num_cycles, steps) print("Finished run: {}".format(run_idx)) print("Finished Simulation")