def create(self, generator: BatchExpDefGenerator) -> None: utils.dir_create_checked(self.batch_input_root, self.cmdopts['exp_overwrite']) # Scaffold the batch experiment, creating experiment directories and # writing template XML input files for each experiment in the batch with # changes from the batch criteria added. exp_def = xml.XMLLuigi(input_fpath=self.batch_config_template, write_config=xml.XMLWriterConfig({'.': ''})) self.criteria.scaffold_exps(exp_def, self.cmdopts) # Pickle experiment definitions in the actual batch experiment # directory for later retrieval. self.criteria.pickle_exp_defs(self.cmdopts) # Run batch experiment generator (must be after scaffolding so the # per-experiment template files are in place). defs = generator.generate_defs() assert len(defs) > 0, "No XML modifications generated?" for i, defi in enumerate(defs): self.logger.debug( "Applying generated scenario+controller changes to exp%s", i) exp_output_root = os.path.join( self.batch_output_root, self.criteria.gen_exp_dirnames(self.cmdopts)[i]) exp_input_root = os.path.join( self.batch_input_root, self.criteria.gen_exp_dirnames(self.cmdopts)[i]) ExpCreator(self.cmdopts, self.criteria, self.batch_config_template, exp_input_root, exp_output_root, i).from_def(defi)
def __init__(self, main_config: tp.Dict[str, types.Cmdopts], cmdopts: types.Cmdopts) -> None: self.main_config = main_config self.cmdopts = cmdopts self.batch_stat_collate_root = self.cmdopts['batch_stat_collate_root'] utils.dir_create_checked(self.batch_stat_collate_root, exist_ok=True)
def __call__(self, main_config: types.YAMLDict, criteria: bc.IConcreteBatchCriteria) -> None: exp_to_run = utils.exp_range_calc(self.cmdopts, self.cmdopts['batch_output_root'], criteria) exp_dirnames = criteria.gen_exp_dirnames(self.cmdopts) for i, exp in enumerate(exp_to_run): exp = os.path.split(exp)[1] exp_index = exp_dirnames.index(exp) cmdopts = copy.deepcopy(self.cmdopts) cmdopts["exp0_output_root"] = os.path.join( self.cmdopts["batch_output_root"], exp_dirnames[0]) cmdopts["exp0_stat_root"] = os.path.join( self.cmdopts["batch_stat_root"], exp_dirnames[0]) cmdopts["exp_input_root"] = os.path.join( self.cmdopts['batch_input_root'], exp) cmdopts["exp_output_root"] = os.path.join( self.cmdopts['batch_output_root'], exp) cmdopts["exp_graph_root"] = os.path.join( self.cmdopts['batch_graph_root'], exp) cmdopts["exp_stat_root"] = os.path.join( self.cmdopts["batch_stat_root"], exp) cmdopts["exp_model_root"] = os.path.join( cmdopts['batch_model_root'], exp) utils.dir_create_checked(cmdopts['exp_model_root'], exist_ok=True) for model in self.models: if not model.run_for_exp(criteria, cmdopts, exp_index): self.logger.debug( "Skip running intra-experiment model from '%s' for exp%s", str(model), exp_index) continue # Run the model self.logger.debug("Run intra-experiment model '%s' for exp%s", str(model), exp_index) dfs = model.run(criteria, exp_index, cmdopts) for df, csv_stem in zip(dfs, model.target_csv_stems()): path_stem = os.path.join(cmdopts['exp_model_root'], csv_stem) # Write model legend file so the generated graph can find it with open(path_stem + '.legend', 'w') as f: for i, search in enumerate(dfs): if search.values.all() == df.values.all(): legend = model.legend_names()[i] f.write(legend) break # Write model .csv file storage.DataFrameWriter('storage.csv')(df, path_stem + '.model', index=False)
def __call__(self, exp_input_root: str, exp_num: int) -> None: """Executes experimental runs for a single experiment in parallel. Arguments: n_jobs: How many concurrent jobs are allowed? exec_resume: Is this run of SIERRA resuming a previous run that failed/did not finish? nodefile: List of compute resources to use for the experiment. """ self.logger.info("Running exp%s in '%s'", exp_num, exp_input_root) sys.stdout.flush() wd = os.path.relpath(exp_input_root, os.path.expanduser("~")) start = time.time() _, exp = os.path.split(exp_input_root) scratch_root = os.path.join(self.cmdopts['batch_scratch_root'], exp) utils.dir_create_checked(scratch_root, exist_ok=True) assert self.cmdopts['exec_jobs_per_node'] is not None, \ "# parallel jobs can't be None" exec_opts = { 'exp_input_root': exp_input_root, 'work_dir': wd, 'scratch_dir': scratch_root, 'cmdfile_stem_path': os.path.join(exp_input_root, config.kGNUParallel['cmdfile_stem']), 'cmdfile_ext': config.kGNUParallel['cmdfile_ext'], 'exec_resume': self.cmdopts['exec_resume'], 'n_jobs': self.cmdopts['exec_jobs_per_node'], 'nodefile': self.cmdopts['nodefile'] } for spec in self.generator.exec_exp_cmds(exec_opts): if not self.shell.run_from_spec(spec): self.logger.error("Check outputs in %s for full details", exec_opts['scratch_dir']) elapsed = int(time.time() - start) sec = datetime.timedelta(seconds=elapsed) self.logger.info('Exp%s elapsed time: %s', exp_num, sec) with open(self.exec_times_fpath, 'a') as f: f.write('exp' + str(exp_num) + ': ' + str(sec) + '\n')
def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria) -> None: self.cmdopts = cmdopts self.criteria = criteria self.batch_exp_root = os.path.abspath(self.cmdopts['batch_input_root']) self.batch_stat_root = os.path.abspath(self.cmdopts['batch_stat_root']) self.batch_stat_exec_root = os.path.join(self.batch_stat_root, 'exec') self.batch_scratch_root = os.path.abspath( self.cmdopts['batch_scratch_root']) self.exec_exp_range = self.cmdopts['exp_range'] self.logger = logging.getLogger(__name__) utils.dir_create_checked(self.batch_stat_exec_root, exist_ok=True) utils.dir_create_checked(self.batch_scratch_root, exist_ok=True)
def _scaffold_expi(self, expi_def: xml.XMLLuigi, modsi: tp.Union[xml.XMLAttrChangeSet, xml.XMLTagAddList], i: int, cmdopts: types.Cmdopts) -> None: exp_dirname = self.gen_exp_dirnames(cmdopts)[i] self.logger.debug("Applying %s XML modifications from '%s' for exp%s in %s", len(modsi), self.cli_arg, i, exp_dirname) exp_input_root = os.path.join(self.batch_input_root, str(exp_dirname)) utils.dir_create_checked(exp_input_root, exist_ok=cmdopts['exp_overwrite']) for mod in modsi: if isinstance(mod, xml.XMLAttrChange): expi_def.attr_change(mod.path, mod.attr, mod.value) elif isinstance(mod, xml.XMLTagAdd): expi_def.tag_add(mod.path, mod.tag, mod.attr, mod.allow_dup) else: assert False,\ "Batch criteria can only modify or remove XML tags" # This will be the "template" input file used to generate the input # files for each experimental run in the experiment wr_config = xml.XMLWriterConfig([{'src_parent': None, 'src_tag': '.', 'opath_leaf': None, 'create_tags': None, 'dest_parent': None }]) expi_def.write_config_set(wr_config) opath = utils.batch_template_path(cmdopts, self.batch_input_root, exp_dirname) expi_def.write(opath)
def __call__(self, main_config: types.YAMLDict, criteria: bc.IConcreteBatchCriteria) -> None: cmdopts = copy.deepcopy(self.cmdopts) utils.dir_create_checked(cmdopts['batch_model_root'], exist_ok=True) utils.dir_create_checked(cmdopts['batch_graph_collate_root'], exist_ok=True) for model in self.models: if not model.run_for_batch(criteria, cmdopts): self.logger.debug("Skip running inter-experiment model '%s'", str(model)) continue # Run the model self.logger.debug("Run inter-experiment model '%s'", str(model)) dfs = model.run(criteria, cmdopts) for df, csv_stem in zip(dfs, model.target_csv_stems()): path_stem = os.path.join(cmdopts['batch_model_root'], csv_stem) # Write model .csv file storage.DataFrameWriter('storage.csv')(df, path_stem + '.model', index=False) # 1D dataframe -> line graph with legend if len(df.index) == 1: # Write model legend file so the generated graph can find it with open(path_stem + '.legend', 'w') as f: for i, search in enumerate(dfs): if search.values.all() == df.values.all(): legend = model.legend_names()[i] f.write(legend) break
def for_exp_run(self, exp_input_root: str, run_output_root: str) -> None: if self.cmdopts['platform_vc']: frames_fpath = os.path.join(run_output_root, config.kARGoS['frames_leaf']) utils.dir_create_checked(frames_fpath, exist_ok=True)