def run_parsers(): if not os.path.isdir(self.path): logging.critical(f"{self.path} is missing or not a directory") # Copy all parsers from their source to their destination again. self._build_resources(only_parsers=True) run_dirs = sorted(glob(os.path.join(self.path, "runs-*-*", "*"))) total_dirs = len(run_dirs) logging.info( f"Parsing properties in {total_dirs:d} run directories") for index, run_dir in enumerate(run_dirs, start=1): if os.path.exists(os.path.join(run_dir, "properties")): tools.remove_path(os.path.join(run_dir, "properties")) loglevel = logging.INFO if index % 100 == 0 else logging.DEBUG logging.log(loglevel, f"Parsing run: {index:6d}/{total_dirs:d}") for resource in self.resources: if resource.is_parser: parser_filename = self.env_vars_relative[resource.name] rel_parser = os.path.join("../../", parser_filename) # Since parsers often produce output which we would # rather not want to see for each individual run, we # suppress it here. subprocess.check_call( [tools.get_python_executable(), rel_parser], cwd=run_dir, stdout=subprocess.DEVNULL, )
def _get_run_job_body(self): return tools.fill_template( self.RUN_JOB_BODY_TEMPLATE_FILE, task_order=" ".join(str(i) for i in self._get_task_order()), exp_path="../" + self.exp.name, python=tools.get_python_executable(), )
def __init__(self, exp, algo, task): Run.__init__(self, exp) self.algo = algo self.task = task self.driver_options = algo.driver_options[:] if self.task.domain_file is None: self.add_resource("task", self.task.problem_file, "task.sas", symlink=True) input_files = ["{task}"] # Without PDDL input files, we can't validate the solution. self.driver_options.remove("--validate") else: self.add_resource( "domain", self.task.domain_file, "domain.pddl", symlink=True ) self.add_resource( "problem", self.task.problem_file, "problem.pddl", symlink=True ) input_files = ["{domain}", "{problem}"] self.add_command( "planner", [tools.get_python_executable()] + ["{" + _get_solver_resource_name(algo.cached_revision) + "}"] + self.driver_options + input_files + algo.component_options, ) self._set_properties()
def __init__(self, exp, algo, task): Run.__init__(self, exp) self.algo = algo self.task = task self._set_properties() # Linking to instead of copying the PDDL files makes building # the experiment twice as fast. self.add_resource("domain", self.task.domain_file, "domain.pddl", symlink=True) self.add_resource("problem", self.task.problem_file, "problem.pddl", symlink=True) self.add_command( "planner", [tools.get_python_executable()] + ["{" + _get_solver_resource_name(algo.cached_revision) + "}"] + algo.driver_options + ["{domain}", "{problem}"] + algo.component_options, )
def _get_step_job_body(self, step): return tools.fill_template( self.STEP_JOB_BODY_TEMPLATE_FILE, cwd=os.getcwd(), python=tools.get_python_executable(), script=sys.argv[0], step_name=step.name)
def add_parser(self, path_to_parser): """ Add a parser to each run of the experiment. Add the parser as a resource to the experiment and add a command that executes the parser to each run. Since commands are executed in the order they are added, parsers should be added after all other commands. If you need to change your parsers and execute them again you can use the :meth:`.add_parse_again_step` method. *path_to_parser* must be the path to a Python script. The script is executed in the run directory and manipulates the run's "properties" file. The last part of the filename (without the extension) is used as a resource name. Therefore, it must be unique among all parsers and other resources. Also, it must start with a letter and contain only letters, numbers, underscores and dashes (which are converted to underscores automatically). For information about how to write parsers see :ref:`parsing`. """ name, _ = os.path.splitext(os.path.basename(path_to_parser)) name = name.replace("-", "_") self._check_alias(name) if not os.path.isfile(path_to_parser): logging.critical(f"Parser {path_to_parser} could not be found.") dest = os.path.basename(path_to_parser) self.env_vars_relative[name] = dest self.resources.append( _Resource(name, path_to_parser, dest, symlink=False, is_parser=True) ) self.add_command(name, [tools.get_python_executable(), f"{{{name}}}"])
def _get_run_job_body(self, run_step): num_runs = len(self.exp.runs) num_tasks = self._get_num_tasks(run_step) logging.info(f"Grouping {num_runs} runs into {num_tasks} Slurm tasks.") return tools.fill_template( self.RUN_JOB_BODY_TEMPLATE_FILE, exp_path="../" + self.exp.name, num_runs=num_runs, python=tools.get_python_executable(), runs_per_task=self._get_num_runs_per_task(), task_order=" ".join(str(i) for i in self._get_task_order(num_tasks)), )
def __init__(self, exp, algo, task): Run.__init__(self, exp) self.algo = algo self.task = task self._set_properties() # Linking to instead of copying the PDDL files makes building # the experiment twice as fast. self.add_resource( 'domain', self.task.domain_file, 'domain.pddl', symlink=True) self.add_resource( 'problem', self.task.problem_file, 'problem.pddl', symlink=True) self.add_command( 'planner', [tools.get_python_executable()] + ['{' + algo.cached_revision.get_planner_resource_name() + '}'] + algo.driver_options + ['{domain}', '{problem}'] + algo.component_options)
def start_runs(self): tools.run_command( [tools.get_python_executable(), self.EXP_RUN_SCRIPT], cwd=self.exp.path )