def get_run(output, end_hash, start_hash, orchestrator): # first check if the output is None, if it is we automatically # generate a file in the cwd that is the hash of the snapshot if output is None: output = "{}-{}.orch.sqlite".format(start_hash, end_hash) # check that it doesn't exist, and fail if it does, since we # don't want to implicitly overwrite stuff if osp.exists(output): raise OSError("No output path was specified and default alredy exists, exiting.") orch = Orchestrator(orchestrator, mode='r') start_serial_snapshot = orch.snapshot_kv[start_hash] end_serial_snapshot = orch.snapshot_kv[end_hash] # get the records values for this run rec_d = {field : value for field, value in zip(Orchestrator.RUN_SELECT_FIELDS, orch.get_run_record(start_hash, end_hash))} config = orch.configuration_kv[rec_d['config_hash']] # create a new orchestrator at the output location new_orch = Orchestrator(output, mode='w') _ = new_orch.add_serial_snapshot(start_serial_snapshot) _ = new_orch.add_serial_snapshot(end_serial_snapshot) config_hash = new_orch.add_serial_configuration(config) new_orch.register_run(start_hash, end_hash, config_hash, rec_d['last_cycle_idx']) orch.close() new_orch.close()
def add_config(configuration, orchestrator): orch = Orchestrator(orchestrator, mode='r+') serial_config = configuration.read() config_hash = orch.add_serial_snapshot(serial_config) orch.close() click.echo(config_hash)
def add_snapshot(snapshot, orchestrator): orch = Orchestrator(orchestrator, mode='r+') serial_snapshot = snapshot.read() snaphash = orch.add_serial_snapshot(serial_snapshot) orch.close() click.echo(snaphash)
def run_snapshot(log, n_workers, checkpoint_freq, job_dir, job_name, narration, n_cycle_steps, run_time, configuration, snapshot): """ \b Parameters ---------- log : n_workers : checkpoint_freq : job_dir : job_name : narration : n_cycle_steps : run_time : start_hash : orchestrator : \b Returns ------- """ set_loglevel(log) logging.info("Loading the starting snapshot file") # read the config and snapshot in serial_snapshot = snapshot.read() logging.info("Creating orchestrating orch database") # make the orchestrator for this simulation in memory to start orch = Orchestrator() logging.info("Adding the starting snapshot to database") start_hash = orch.add_serial_snapshot(serial_snapshot) # settle what the defaults etc. are for the different options as they are interdependent job_dir, job_name, narration, config = settle_run_options(n_workers=n_workers, job_dir=job_dir, job_name=job_name, narration=narration, configuration=configuration, start_hash=start_hash) # add the parametrized configuration to the orchestrator # config_hash = orch.add_serial_configuration(config) logging.info("Orchestrator loaded") logging.info("Running snapshot by time") run_orch = orch.orchestrate_snapshot_run_by_time(start_hash, run_time, n_cycle_steps, checkpoint_freq=checkpoint_freq, work_dir=job_dir, config_name=job_name, narration=narration, configuration=config, ) logging.info("Finished running snapshot by time") start_hash, end_hash = run_orch.run_hashes()[0] run_orch.close() logging.info("Closed the resultant orch") # write the run tuple out to the log run_line_str = "Run start and end hashes: {}, {}".format(start_hash, end_hash) # log it logging.info(run_line_str) # also put it to the terminal click.echo(run_line_str) orch.close() logging.info("closed the orchestrating orch database")