def run_orch(log, n_workers, checkpoint_freq, job_dir, job_name, narration, configuration, n_cycle_steps, run_time, start_hash, orchestrator): """ \b Parameters ---------- log : n_workers : checkpoint_freq : job_dir : job_name : narration : n_cycle_steps : run_time : start_hash : orchestrator : \b Returns ------- """ set_loglevel(log) # settle what the defaults etc. are for the different options as they are interdependent job_dir, job_name, narration, config = settle_run_options(n_workers=n_workers, job_dir=job_dir, job_name=job_name, narration=narration, configuration=configuration, start_hash=start_hash) # Open a wrapper around the orchestrator database that provides # the inputs for the simulation orch = Orchestrator(orchestrator, mode='r') logging.info("Orchestrator loaded") logging.info("Running snapshot by time") run_orch = orch.orchestrate_snapshot_run_by_time(start_hash, run_time, n_cycle_steps, checkpoint_freq=checkpoint_freq, work_dir=job_dir, config_name=job_name, narration=narration, configuration=config, ) logging.info("Finished running snapshot by time") start_hash, end_hash = run_orch.run_hashes()[0] logging.info("Closing the resultant orchestrator") run_orch.close() # write the run tuple out to the log run_line_str = "Run start and end hashes: {}, {}".format(start_hash, end_hash) # log it logging.info(run_line_str) # also put it to the terminal click.echo(run_line_str) logging.info("Closing the orchestrating orch") orch.close()
def run_snapshot(log, n_workers, checkpoint_freq, job_dir, job_name, narration, n_cycle_steps, run_time, configuration, snapshot): """ \b Parameters ---------- log : n_workers : checkpoint_freq : job_dir : job_name : narration : n_cycle_steps : run_time : start_hash : orchestrator : \b Returns ------- """ set_loglevel(log) logging.info("Loading the starting snapshot file") # read the config and snapshot in serial_snapshot = snapshot.read() logging.info("Creating orchestrating orch database") # make the orchestrator for this simulation in memory to start orch = Orchestrator() logging.info("Adding the starting snapshot to database") start_hash = orch.add_serial_snapshot(serial_snapshot) # settle what the defaults etc. are for the different options as they are interdependent job_dir, job_name, narration, config = settle_run_options(n_workers=n_workers, job_dir=job_dir, job_name=job_name, narration=narration, configuration=configuration, start_hash=start_hash) # add the parametrized configuration to the orchestrator # config_hash = orch.add_serial_configuration(config) logging.info("Orchestrator loaded") logging.info("Running snapshot by time") run_orch = orch.orchestrate_snapshot_run_by_time(start_hash, run_time, n_cycle_steps, checkpoint_freq=checkpoint_freq, work_dir=job_dir, config_name=job_name, narration=narration, configuration=config, ) logging.info("Finished running snapshot by time") start_hash, end_hash = run_orch.run_hashes()[0] run_orch.close() logging.info("Closed the resultant orch") # write the run tuple out to the log run_line_str = "Run start and end hashes: {}, {}".format(start_hash, end_hash) # log it logging.info(run_line_str) # also put it to the terminal click.echo(run_line_str) orch.close() logging.info("closed the orchestrating orch database")