def _runtest(self, num_walkers, num_cycles, dimension, debug_prints=False): print("Random walk simulation with: ") print("Dimension =", dimension) print("Probability =", self.probability) print("Number of Walkers", num_walkers) print("Number of Cycles", num_cycles) # set up initial state for walkers positions = np.zeros((1, dimension)) init_state = WalkerState(positions=positions, time=0.0) # create list of init_walkers initial_weight = 1 / num_walkers init_walkers = [] # init_walkers, n_cycles = get_final_state(path, num_walkers) init_walkers = [ Walker(init_state, initial_weight) for i in range(num_walkers) ] # set up raunner for system runner = RandomWalkRunner(dimension=dimension, probability=self.probability) units = dict(UNIT_NAMES) # instantiate a revo unbindingboudaryconditiobs segment_length = 10 # set up the reporter randomwalk_system_top_json = self.generate_topology() hdf5_reporter = WepyHDF5Reporter(self.hdf5_reporter_path, mode='w', save_fields=SAVE_FIELDS, topology=randomwalk_system_top_json, resampler=self.resampler, units=dict(UNITS), n_dims=dimension) # running the simulation sim_manager = Manager(init_walkers, runner=runner, resampler=self.resampler, work_mapper=Mapper(), reporters=[hdf5_reporter]) # run a simulation with the manager for n_steps cycles of length 1000 each steps = [segment_length for i in range(num_cycles)] print("Start simulation") sim_manager.run_simulation(num_cycles, steps, debug_prints=debug_prints) print("Finished Simulation")
n_walkers = int(sys.argv[3]) print("Number of steps: {}".format(n_steps)) print("Number of cycles: {}".format(n_cycles)) # create the initial walkers init_weight = 1.0 / n_walkers init_walkers = [ Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers) ] # initialize the simulation manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=ubc, work_mapper=mapper, reporters=reporters) # make a number of steps for each cycle. In principle it could be # different each cycle steps = [n_steps for i in range(n_cycles)] # actually run the simulation print("Starting run: {}".format(0)) sim_manager.run_simulation(n_cycles, steps) print("Finished run: {}".format(0)) print("Finished first file")
print("Number of steps: {}".format(n_steps)) print("Number of cycles: {}".format(n_cycles)) # create the initial walkers init_weight = 1.0 / n_walkers init_walkers = [ Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers) ] # initialize the simulation manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=ubc, work_mapper=mapper, reporters=reporters) # make a number of steps for each cycle. In principle it could be # different each cycle steps = [n_steps for i in range(n_cycles)] # actually run the simulations print("Running Simulations") for run_idx in range(n_runs): print("Starting run: {}".format(run_idx)) sim_manager.run_simulation(n_cycles, steps, debug_prints=True) print("Finished run: {}".format(run_idx)) print("Finished first file")
def main(n_runs, n_cycles, steps, n_walkers, n_workers=1, debug_prints=False, seed=None): ## Load objects needed for various purposes # load a json string of the topology with open(json_top_path, mode='r') as rf: sEH_TPPU_system_top_json = rf.read() # an openmm.State object for setting the initial walkers up with open(omm_state_path, mode='rb') as rf: omm_state = pickle.load(rf) ## set up the OpenMM Runner # load the psf which is needed for making a system in OpenMM with # CHARMM force fields psf = omma.CharmmPsfFile(charmm_psf_path) # set the box size lengths and angles lengths = [CUBE_LENGTH for i in range(3)] angles = [CUBE_ANGLE for i in range(3)] psf.setBox(*lengths, *angles) # charmm forcefields parameters params = omma.CharmmParameterSet(*charmm_param_paths) # create a system using the topology method giving it a topology and # the method for calculation system = psf.createSystem(params, nonbondedMethod=omma.CutoffPeriodic, nonbondedCutoff=NONBONDED_CUTOFF, constraints=omma.HBonds) # make this a constant temperature and pressure simulation at 1.0 # atm, 300 K, with volume move attempts every 50 steps barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE, VOLUME_MOVE_FREQ) # add it as a "Force" to the system system.addForce(barostat) # make an integrator object that is constant temperature integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE) # set up the OpenMMRunner with the system runner = OpenMMRunner(system, psf.topology, integrator, platform=PLATFORM) # the initial state, which is used as reference for many things init_state = OpenMMState(omm_state) ## Make the distance Metric # load the crystal structure coordinates crystal_traj = mdj.load_pdb(pdb_path) # get the atoms in the binding site according to the crystal structure bs_idxs = binding_site_atoms(crystal_traj.top, LIG_RESID, crystal_traj.xyz[0]) lig_idxs = ligand_idxs(crystal_traj.top, LIG_RESID) prot_idxs = protein_idxs(crystal_traj.top) # make the distance metric with the ligand and binding site # indices for selecting atoms for the image and for doing the # alignments to only the binding site. All images will be aligned # to the reference initial state unb_distance = UnbindingDistance(lig_idxs, bs_idxs, init_state) ## Make the resampler # make a Wexplore resampler with default parameters and our # distance metric resampler = WExploreResampler(distance=unb_distance, init_state=init_state, max_n_regions=MAX_N_REGIONS, max_region_sizes=MAX_REGION_SIZES, pmin=PMIN, pmax=PMAX) ## Make the Boundary Conditions # makes ref_traj and selects lingand_atom and protein atom indices # instantiate a revo unbindingboudaryconditiobs ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE, initial_state=init_state, topology=crystal_traj.topology, ligand_idxs=lig_idxs, receptor_idxs=prot_idxs) ## make the reporters # WepyHDF5 # make a dictionary of units for adding to the HDF5 # open it in truncate mode first, then switch after first run hdf5_reporter = WepyHDF5Reporter( hdf5_path, mode='w', # the fields of the State that will be saved in the HDF5 file save_fields=SAVE_FIELDS, # the topology in a JSON format topology=sEH_TPPU_system_top_json, # the resampler and boundary # conditions for getting data # types and shapes for saving resampler=resampler, boundary_conditions=ubc, # the units to save the fields in units=dict(UNITS), # sparse (in time) fields sparse_fields=dict(SPARSE_FIELDS), # sparse atoms fields main_rep_idxs=np.concatenate((lig_idxs, prot_idxs)), all_atoms_rep_freq=ALL_ATOMS_SAVE_FREQ) dashboard_reporter = WExploreDashboardReporter( dashboard_path, mode='w', step_time=STEP_SIZE.value_in_unit(unit.second), max_n_regions=resampler.max_n_regions, max_region_sizes=resampler.max_region_sizes, bc_cutoff_distance=ubc.cutoff_distance) setup_reporter = SetupReporter(setup_state_path, mode='w') restart_reporter = RestartReporter(restart_state_path, mode='w') reporters = [ hdf5_reporter, dashboard_reporter, setup_reporter, restart_reporter ] ## The work mapper # we use a mapper that uses GPUs work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker, num_workers=n_workers) ## Combine all these parts and setup the simulation manager # set up parameters for running the simulation # initial weights init_weight = 1.0 / n_walkers # a list of the initial walkers init_walkers = [ Walker(OpenMMState(omm_state), init_weight) for i in range(n_walkers) ] # Instantiate a simulation manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=ubc, work_mapper=work_mapper, reporters=reporters) ### RUN the simulation for run_idx in range(n_runs): print("Starting run: {}".format(run_idx)) sim_manager.run_simulation(n_cycles, steps, debug_prints=True) print("Finished run: {}".format(run_idx))
n_cycles = 1 # the number of MD dynamics steps for each cycle n_steps = 1000000 steps = [n_steps for i in range(n_cycles)] # number of parallel simulations n_walkers = 10 # the work mapper # work_mapper = ThreadMapper() work_mapper = Mapper() # create the initial walkers with equal weights with start_action(action_type="Init Walkers") as ctx: init_weight = 1.0 / n_walkers init_walkers = [Walker(copy(init_state), init_weight) for i in range(n_walkers)] with start_action(action_type="Init Sim Manager") as ctx: sim_manager = Manager( init_walkers, runner=runner, resampler=resampler, work_mapper=work_mapper) # run the simulation and get the results with start_action(action_type="Simulation") as ctx: final_walkers, _ = sim_manager.run_simulation(n_cycles, steps)
def run_sim(init_state_path, json_top_path, forcefield_paths, n_cycles, n_steps, n_workers, **kwargs): #### Wepy Orchestrator # load the wepy.OpenMMState with open(init_state_path, 'rb') as rf: init_state = pickle.load(rf) ### Apparatus # Runner components # load the JSON for the topology with open(json_top_path) as rf: json_top_str = rf.read() # load it with mdtraj and then convert to openmm mdj_top = json_to_mdtraj_topology(json_top_str) omm_topology = mdj_top.to_openmm() # we need to use the box vectors for setting the simulation up, # paying mind to the units box_vectors = init_state['box_vectors'] * init_state.box_vectors_unit # set the box to the last box size from equilibration omm_topology.setPeriodicBoxVectors(box_vectors) # force field parameters force_field = omma.ForceField(*forcefield_paths) # create a system using the topology method giving it a topology and # the method for calculation runner_system = force_field.createSystem(omm_topology, nonbondedMethod=NONBONDED_METHOD, nonbondedCutoff=NONBONDED_CUTOFF, constraints=MD_CONSTRAINTS, rigidWater=RIGID_WATER, removeCMMotion=REMOVE_CM_MOTION, hydrogenMass=HYDROGEN_MASS) # barostat to keep pressure constant runner_barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE, VOLUME_MOVE_FREQ) # add it to the system runner_system.addForce(runner_barostat) # set up for a short simulation to runner and prepare # instantiate an integrator runner_integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_TIME) ## Runner runner = OpenMMRunner(runner_system, omm_topology, runner_integrator, platform=PLATFORM) ## Resampler # Distance Metric # TODO set distance metric distance_metric = None # TODO set resampler resampler = None ## Boundary Conditions # TODO optional: set the boundary conditions bc = None # apparatus = WepySimApparatus(runner, resampler=resampler, # boundary_conditions=bc) print("created apparatus") ## CONFIGURATION # the idxs of the main representation to save in the output files, # it is just the protein and the ligand # TODO optional: set the main representation atom indices main_rep_idxs = None # REPORTERS # list of reporter classes and partial kwargs for using in the # orchestrator hdf5_reporter_kwargs = { 'main_rep_idxs': main_rep_idxs, 'topology': json_top_str, 'resampler': resampler, 'boundary_conditions': bc, # general parameters 'save_fields': SAVE_FIELDS, 'units': dict(UNITS), 'sparse_fields': dict(SPARSE_FIELDS), 'all_atoms_rep_freq': ALL_ATOMS_SAVE_FREQ } # get all the reporters together. Order is important since they # will get paired with the kwargs reporter_classes = [ WepyHDF5Reporter, ] # collate the kwargs in the same order reporter_kwargs = [ hdf5_reporter_kwargs, ] # make the configuration with all these reporters and the default number of workers configuration = Configuration(n_workers=DEFAULT_N_WORKERS, reporter_classes=reporter_classes, reporter_partial_kwargs=reporter_kwargs, config_name="no-orch") # then instantiate them reporters = configuration._gen_reporters() print("created configuration") ### Initial Walkers init_walkers = [ Walker(deepcopy(init_state), INIT_WEIGHT) for _ in range(N_WALKERS) ] print("created init walkers") ### Orchestrator # orchestrator = Orchestrator(apparatus, # default_init_walkers=init_walkers, # default_configuration=configuration) ### Work Mapper if PLATFORM in ('OpenCL', 'CUDA'): # we use a mapper that uses GPUs work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker, num_workers=n_workers) if PLATFORM in ('Reference', 'CPU'): # we just use the standard mapper work_mapper = Mapper ### Simulation Manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=bc, work_mapper=work_mapper, reporters=reporters) ### Run the simulation steps = [n_steps for _ in range(n_cycles)] sim_manager.run_simulation(n_cycles, steps)
def run_sim(init_state_path, json_top_path, forcefield_paths, n_cycles, n_steps, platform, n_workers, lig_ff=None, **kwargs): # add in the ligand force fields assert lig_ff is not None, "must give ligand forcefield" forcefield_paths.append(lig_ff) #### Wepy Orchestrator # load the wepy.OpenMMState with open(init_state_path, 'rb') as rf: init_state = pickle.load(rf) ### Apparatus # Runner components # load the JSON for the topology with open(json_top_path) as rf: json_top_str = rf.read() # load it with mdtraj and then convert to openmm mdj_top = json_to_mdtraj_topology(json_top_str) omm_topology = mdj_top.to_openmm() # we need to use the box vectors for setting the simulation up, # paying mind to the units box_vectors = init_state['box_vectors'] * init_state.box_vectors_unit positions = init_state['positions'] * init_state.positions_unit # set the box to the last box size from equilibration omm_topology.setPeriodicBoxVectors(box_vectors) # force field parameters force_field = omma.ForceField(*forcefield_paths) # create a system using the topology method giving it a topology and # the method for calculation runner_system = force_field.createSystem(omm_topology, nonbondedMethod=NONBONDED_METHOD, nonbondedCutoff=NONBONDED_CUTOFF, constraints=MD_CONSTRAINTS, rigidWater=RIGID_WATER, removeCMMotion=REMOVE_CM_MOTION, hydrogenMass=HYDROGEN_MASS) # barostat to keep pressure constant runner_barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE, VOLUME_MOVE_FREQ) # add it to the system runner_system.addForce(runner_barostat) # set up for a short simulation to runner and prepare # instantiate an integrator runner_integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_TIME) ## Runner runner = OpenMMRunner(runner_system, omm_topology, runner_integrator, platform=platform) ## Resampler # Distance Metric lig_idxs = ligand_idxs(json_top_str) prot_idxs = protein_idxs(json_top_str) bs_idxs = binding_site_idxs(json_top_str, positions, box_vectors, CUTOFF) # set distance metric distance_metric = UnbindingDistance(lig_idxs, bs_idxs, init_state) # set resampler resampler = WExploreResampler(distance=distance_metric, init_state=init_state, max_n_regions=MAX_N_REGIONS, max_region_sizes=MAX_REGION_SIZES, pmin=PMIN, pmax=PMAX) ## Boundary Conditions # optional: set the boundary conditions bc = None ## CONFIGURATION # the idxs of the main representation to save in the output files, # it is just the protein and the ligand # optional: set the main representation atom indices, set to None # to save all the atoms in the 'positions' field main_rep_idxs = np.concatenate((lig_idxs, prot_idxs)) # REPORTERS # list of reporter classes and partial kwargs for using in the # orchestrator hdf5_reporter_kwargs = { 'main_rep_idxs': main_rep_idxs, 'topology': json_top_str, 'resampler': resampler, 'boundary_conditions': bc, # general parameters 'save_fields': SAVE_FIELDS, 'units': dict(UNITS), 'sparse_fields': dict(SPARSE_FIELDS), 'all_atoms_rep_freq': ALL_ATOMS_SAVE_FREQ } # get all the reporters together. Order is important since they # will get paired with the kwargs reporter_classes = [ WepyHDF5Reporter, ] # collate the kwargs in the same order reporter_kwargs = [ hdf5_reporter_kwargs, ] # make the configuration with all these reporters and the default # number of workers. Don't be thrown off by this. You don't need # this. It is just a convenient way to dynamically name the # outputs of the reporters and parametrize the workers and worker # mappers. This is mainly for use in the Orchestrator framework # but it is useful here just for batch naming everything. configuration = Configuration(n_workers=DEFAULT_N_WORKERS, reporter_classes=reporter_classes, reporter_partial_kwargs=reporter_kwargs, config_name="no-orch", mode='w') # then instantiate the reporters from the configuration. THis # localizes the file paths to outputs and applies the key-word # arguments specified above. reporters = configuration._gen_reporters() print("created configuration") ### Initial Walkers init_walkers = [ Walker(deepcopy(init_state), INIT_WEIGHT) for _ in range(N_WALKERS) ] print("created init walkers") ### Work Mapper if platform in ('OpenCL', 'CUDA'): # we use a mapper that uses GPUs work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker, num_workers=n_workers) elif platform in ('CPU', ): # for the CPU we can choose how many threads to use per walker. worker_attributes = {'num_threads': N_CPU_THREADS} work_mapper = WorkerMapper(worker_type=OpenMMCPUWorker, worker_attributes=worker_attributes, num_workers=n_workers) elif platform in ('Reference', ): # we just use the standard mapper for in serial work_mapper = Mapper ### Simulation Manager sim_manager = Manager(init_walkers, runner=runner, resampler=resampler, boundary_conditions=bc, work_mapper=work_mapper, reporters=reporters) ### Run the simulation steps = [n_steps for _ in range(n_cycles)] sim_manager.run_simulation(n_cycles, steps)
def print_sim_objs(): print("init_walkers: ", get_size(init_walkers)) print("runner: ", get_size(runner)) print("resampler: ", get_size(resampler)) print("ubc: ", get_size(ubc)) print("mapper: ", get_size(mapper)) print("hdf5_reporter: ", get_size(hdf5_reporter)) print("dashboard_reporter: ", get_size(dashboard_reporter)) print("sim_manager: ", get_size(sim_manager)) print("steps: ", get_size(steps)) sim_monitor = SimMonitor(reporter_order=reporter_order) # actually run the simulation print("Starting run") print("----------------------------------------") print_sim_objs() print("----------------------------------------\n") sim_manager.run_simulation(n_cycles, steps, sim_monitor=sim_monitor) print("Finished run") print("----------------------------------------") print_sim_objs() print("----------------------------------------\n")
def test_lj_sim_manager_openmm_integration_run( self, class_tmp_path_factory, boundary_condition_class, resampler_class, work_mapper_class, platform, lj_params, lj_omm_sys, lj_integrator, lj_reporter_classes, lj_reporter_kwargs, lj_init_walkers, lj_openmm_runner, lj_unbinding_bc, lj_wexplore_resampler, lj_revo_resampler, ): """Run all combinations of components in the fixtures for the smallest amount of time, just to make sure they all work together and don't give errors.""" logging.getLogger().setLevel(logging.DEBUG) install_mp_handler() logging.debug("Starting the test") print("starting the test") # the configuration class gives us a convenient way to # parametrize our reporters for the locale from wepy.orchestration.configuration import Configuration # the runner from wepy.runners.openmm import OpenMMRunner # mappers from wepy.work_mapper.mapper import Mapper from wepy.work_mapper.worker import WorkerMapper from wepy.work_mapper.task_mapper import TaskMapper # the worker types for the WorkerMapper from wepy.work_mapper.worker import Worker from wepy.runners.openmm import OpenMMCPUWorker, OpenMMGPUWorker # the walker task types for the TaskMapper from wepy.work_mapper.task_mapper import WalkerTaskProcess from wepy.runners.openmm import OpenMMCPUWalkerTaskProcess, OpenMMGPUWalkerTaskProcess n_cycles = 1 n_steps = 2 num_workers = 2 # generate the reporters and temporary directory for this test # combination tmpdir_template = 'lj_fixture_{plat}-{wm}-{res}-{bc}' tmpdir_name = tmpdir_template.format(plat=platform, wm=work_mapper_class, res=resampler_class, bc=boundary_condition_class) # make a temporary directory for this configuration to work with tmpdir = str(class_tmp_path_factory.mktemp(tmpdir_name)) # make a config so that the reporters get parametrized properly reporters = Configuration( work_dir=tmpdir, reporter_classes=lj_reporter_classes, reporter_partial_kwargs=lj_reporter_kwargs).reporters steps = [n_steps for _ in range(n_cycles)] # choose the components based on the parametrization boundary_condition = None resampler = None walker_fixtures = [lj_init_walkers] runner_fixtures = [lj_openmm_runner] boundary_condition_fixtures = [lj_unbinding_bc] resampler_fixtures = [lj_wexplore_resampler, lj_revo_resampler] walkers = lj_init_walkers boundary_condition = [ boundary_condition for boundary_condition in boundary_condition_fixtures if type(boundary_condition).__name__ == boundary_condition_class ][0] resampler = [ resampler for resampler in resampler_fixtures if type(resampler).__name__ == resampler_class ][0] assert boundary_condition is not None assert resampler is not None # generate the work mapper given the type and the platform work_mapper_classes = { mapper_class.__name__: mapper_class for mapper_class in [Mapper, WorkerMapper, TaskMapper] } # # select the right one given the option # work_mapper_type = [mapper_type for mapper_type in work_mapper_classes # if type(mapper_type).__name__ == work_mapper_class][0] # decide based on the platform and the work mapper which # platform dependent components to build if work_mapper_class == 'Mapper': # then there is no settings work_mapper = Mapper() elif work_mapper_class == 'WorkerMapper': if platform == 'CUDA' or platform == 'OpenCL': work_mapper = WorkerMapper(num_workers=num_workers, worker_type=OpenMMGPUWorker, device_ids={ '0': 0, '1': 1 }, proc_start_method='spawn') if platform == 'OpenCL': work_mapper = WorkerMapper( num_workers=num_workers, worker_type=OpenMMGPUWorker, device_ids={ '0': 0, '1': 1 }, ) elif platform == 'CPU': work_mapper = WorkerMapper( num_workers=num_workers, worker_type=OpenMMCPUWorker, worker_attributes={'num_threads': 1}) elif platform == 'Reference': work_mapper = WorkerMapper( num_workers=num_workers, worker_type=Worker, ) elif work_mapper_class == 'TaskMapper': if platform == 'CUDA': work_mapper = TaskMapper( num_workers=num_workers, walker_task_type=OpenMMGPUWalkerTaskProcess, device_ids={ '0': 0, '1': 1 }, proc_start_method='spawn') elif platform == 'OpenCL': work_mapper = TaskMapper( num_workers=num_workers, walker_task_type=OpenMMGPUWalkerTaskProcess, device_ids={ '0': 0, '1': 1 }) elif platform == 'CPU': work_mapper = TaskMapper( num_workers=num_workers, walker_task_type=OpenMMCPUWalkerTaskProcess, worker_attributes={'num_threads': 1}) elif platform == 'Reference': work_mapper = TaskMapper( num_workers=num_workers, worker_type=WalkerTaskProcess, ) else: raise ValueError("Platform {} not recognized".format(platform)) # initialize the runner with the platform runner = OpenMMRunner(lj_omm_sys.system, lj_omm_sys.topology, lj_integrator, platform=platform) logging.debug("Constructing the manager") manager = Manager(walkers, runner=runner, boundary_conditions=boundary_condition, resampler=resampler, work_mapper=work_mapper, reporters=reporters) # since different work mappers need different process start # methods for different platforms i.e. CUDA and linux fork # vs. spawn we choose the appropriate one for each method. logging.debug("Starting the simulation") walkers, filters = manager.run_simulation(n_cycles, steps, num_workers=num_workers)
def _run(self, num_runs, num_cycles, num_walkers): """Runs a random walk simulation. Parameters ---------- num_runs: int The number independet simulations. num_cycles: int The number of cycles that will be run in the simulation. num_walkers: int The number of walkers. """ print("Random walk simulation with: ") print("Dimension = {}".format(self.dimension)) print("Probability = {}".format(self.probability)) print("Number of Walkers = {}".format(num_walkers)) print("Number of Cycles ={}".format(num_cycles)) # set up initial state for walkers positions = np.zeros((1, self.dimension)) init_state = WalkerState(positions=positions, time=0.0) # create list of init_walkers initial_weight = 1 / num_walkers init_walkers = [] init_walkers = [ Walker(init_state, initial_weight) for i in range(num_walkers) ] # set up raunner for system runner = RandomWalkRunner(probability=self.probability) units = dict(UNIT_NAMES) # instantiate a revo unbindingboudaryconditiobs segment_length = 10 # set up the reporter randomwalk_system_top_json = self.generate_topology() hdf5_reporter = WepyHDF5Reporter(file_path=self.hdf5_filename, mode='w', save_fields=SAVE_FIELDS, topology=randomwalk_system_top_json, resampler=self.resampler, units=dict(UNITS), n_dims=self.dimension) # running the simulation sim_manager = Manager(init_walkers, runner=runner, resampler=self.resampler, work_mapper=Mapper(), reporters=[hdf5_reporter]) # run a simulation with the manager for n_steps cycles of length 1000 each steps = [segment_length for i in range(num_cycles)] ### RUN the simulation for run_idx in range(num_runs): print("Starting run: {}".format(run_idx)) sim_manager.run_simulation(num_cycles, steps) print("Finished run: {}".format(run_idx)) print("Finished Simulation")