def main(): args = parse_custom_args() if not args.revision: rev = 'WORK' name = 'current' elif args.revision.lower() == 'baseline': rev = BASELINE name = 'baseline' else: rev = checkouts.get_global_rev(REPO, args.revision) name = rev combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))] exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo) exp.add_suite(SUITES[args.test]) for nick, config in CONFIGS[args.test]: exp.add_config(nick, config) exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report') # Only compare results if we are not running the baseline experiment. if rev != BASELINE: exp.steps.insert(0, Step('rm-eval-dir', shutil.rmtree, exp.eval_dir, ignore_errors=True)) exp.add_step(Step('fetch-baseline-results', Fetcher(), get_exp_dir('baseline', args.test) + '-eval', exp.eval_dir)) exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison') exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path)) exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path)) exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir)) exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS), name='regression-check') exp()
def main(): args = parse_custom_args() if not args.revision: rev = 'WORK' name = 'current' elif args.revision.lower() == 'baseline': rev = BASELINE name = 'baseline' else: rev = checkouts.get_global_rev(REPO, args.revision) name = rev combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))] exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo) exp.add_suite(SUITES[args.test]) for nick, config in CONFIGS[args.test]: exp.add_config(nick, config) exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report') # Only compare results if we are not running the baseline experiment. if rev != BASELINE: dirty_paths = [ path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir] if os.path.exists(path) ] if dirty_paths: logging.critical( 'The last run found a regression. Please inspect what ' 'went wrong and then delete the following directories ' 'manually: %s' % dirty_paths) exp.add_step( Step('fetch-baseline-results', Fetcher(), get_exp_dir('baseline', args.test) + '-eval', exp.eval_dir)) exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison') exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS), name='regression-check') # We abort if there is a regression and keep the directories. exp.add_step( Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path)) exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path)) exp.add_step( Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir)) exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir)) exp()
def build_combo_with_name(repo, trans_rev, preprocess_rev, search_rev): """Generate a tuple (combination, name) for the given revisions. combination is a (Translator, Preprocessor, Search) tuple and name is the name that lab uses to refer to it.""" # TODO: In the future, it would be nice if we didn't need the name # information any more, as it is somewhat of an implementation # detail. combo = (Translator(repo, trans_rev), Preprocessor(repo, preprocess_rev), Planner(repo, search_rev)) if trans_rev == preprocess_rev == search_rev: name = str(search_rev) else: name = "%s-%s-%s" % (trans_rev, preprocess_rev, search_rev) return combo, name
def __init__(self, path, repo, environment=None, combinations=None, compact=True, limits=None, cache_dir=None): """ The experiment will be built at *path*. *repo* must be the path to a Fast Downward repository. Among other things this repository is used to search for benchmark files. *environment* must be an :ref:`Environment <environments>` instance. By default the experiment is run locally. If given, *combinations* must be a list of :ref:`Checkout <checkouts>` tuples of the form (Translator, Preprocessor, Planner). If combinations is None (default), perform an experiment with the working copy in *repo*. The *compact* parameter is only relevant for the search stage. If *compact* is ``False``, the preprocessed task and the two PDDL files are **copied** into the respective run directories for all configurations. This requires a lot of space (tens of GB), so it is strongly recommended to use the default (``compact=True``) which only references these files. Use ``compact=False`` only if you really need a portable experiment. If *limits* is given, it must be a dictionary that maps a subset of the keys below to seconds and MiB. It will be used to overwrite the default limits:: default_limits = { 'translate_time': 7200, 'translate_memory': 8192, 'preprocess_time': 7200, 'preprocess_memory': 8192, 'search_time': 1800, 'search_memory': 2048, } *cache_dir* is used to cache Fast Downward clones and preprocessed tasks. By default it points to ``~/lab``. .. note:: The directory *cache_dir* can grow very large (tens of GB). Example: :: repo = '/path/to/downward-repo' env = GkiGridEnvironment(queue='xeon_core.q', priority=-2) combos = [(Translator(repo, rev=123), Preprocessor(repo, rev='e2a018c865f7'), Planner(repo, rev='tip')] exp = DownwardExperiment('/tmp/path', repo, environment=env, combinations=combos, limits={'search_time': 30, 'search_memory': 1024}) """ Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir) #if not repo or not os.path.isdir(repo): # logging.critical('The path "%s" is not a local Fast Downward ' # 'repository.' % repo) self.repo = repo self.orig_path = self.path self.search_exp_path = self.path self.preprocess_exp_path = self.path + '-p' self._path_to_python = None Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache') self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks') tools.makedirs(self.preprocessed_tasks_dir) self.combinations = (combinations or [(Translator(repo), Preprocessor(repo), Planner(repo))]) self.compact = compact self.suites = defaultdict(list) self._algorithms = [] self._portfolios = [] limits = limits or {} for key, value in limits.items(): if key not in LIMITS: logging.critical('Unknown limit: %s' % key) self.limits = LIMITS self.limits.update(limits) # Save if this is a compact experiment i.e. preprocessed tasks are referenced. self.set_property('compact', compact) # TODO: Integrate this into the API. self.include_preprocess_results_in_search_runs = True self.compilation_options = ['-j%d' % self._jobs] self._search_parsers = [] self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py')) # Remove the default experiment steps self.steps = Sequence() self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess')) self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess')) self.add_fetcher(src=self.preprocess_exp_path, dest=self.preprocessed_tasks_dir, name='fetch-preprocess-results', copy_all=True, write_combined_props=False) self.add_step(Step('build-search-exp', self.build, stage='search')) self.add_PAC_fetcher(src='/home/sternron/gal-dreiman/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter dest=self.search_exp_path, name='fetch-preprocess-results', copy_all=True, write_combined_props=False)#new featcher to copy preprocess for PAC results self.add_step(Step('run-search-exp', self.run, stage='search')) self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions ]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([(Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite)
def __init__(self, configs=None, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, suite=None, parsers=None, **kwargs): """Create a DownwardExperiment with some convenience features. If "configs" is specified, it should be a dict of {nick: cmdline} pairs that sets the planner configurations to test. If "grid_priority" is specified and no environment is specifically requested in **kwargs, use the maia environment with the specified priority. If "path" is not specified, the experiment data path is derived automatically from the main script's filename. If "repo" is not specified, the repository base is derived automatically from the main script's path. If "revisions" is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. If "search_revisions" is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All experiments use the translator and preprocessor component of the first revision. If "suite" is specified, it should specify a problem suite. If "parsers" is specified, it should be a list of paths to parsers that should be run in addition to search_parser.py. Options "combinations" (from the base class), "revisions" and "search_revisions" are mutually exclusive.""" if grid_priority is not None and "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() num_rev_opts_specified = (int(revisions is not None) + int(search_revisions is not None) + int(kwargs.get("combinations") is not None)) if num_rev_opts_specified > 1: raise ValueError('must specify exactly one of "revisions", ' '"search_revisions" or "combinations"') # See add_comparison_table_step for more on this variable. self._HACK_revisions = revisions if revisions is not None: if not revisions: raise ValueError("revisions cannot be empty") combinations = [(Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions] kwargs["combinations"] = combinations if search_revisions is not None: if not search_revisions: raise ValueError("search_revisions cannot be empty") base_rev = search_revisions[0] translator = Translator(repo, base_rev) preprocessor = Preprocessor(repo, base_rev) combinations = [(translator, preprocessor, Planner(repo, rev)) for rev in search_revisions] kwargs["combinations"] = combinations self._additional_parsers = parsers or [] DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) if configs is not None: for nick, config in configs.items(): self.add_config(nick, config) if suite is not None: self.add_suite(suite) self._report_prefix = get_experiment_name()
from downward.reports.compare import CompareConfigsReport from downward.reports.timeout import TimeoutReport import standard_exp DIR = os.path.dirname(os.path.abspath(__file__)) EXPNAME = 'showcase-options' EXPPATH = 'exp-lmcut-showcase' REPO = os.path.expanduser('~/downward') ENV = LocalEnvironment(processes=1) ATTRIBUTES = ['coverage'] LIMITS = {'search_time': 100} COMBINATIONS = [(Translator(repo=REPO), Preprocessor(repo=REPO), Planner(repo=REPO))] exp = DownwardExperiment(EXPPATH, repo=REPO, environment=ENV, combinations=COMBINATIONS, limits=LIMITS, cache_dir=standard_exp.CACHE_DIR) exp.set_path_to_python(standard_exp.PYTHON) exp.add_suite('gripper:prob01.pddl') exp.add_suite('zenotravel:pfile1', benchmark_dir=os.path.join(REPO, 'benchmarks')) exp.add_config('iter-hadd', [ '--heuristic', 'hadd=add()', '--search', 'iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)'
def __init__(self, path, repo, opt_or_sat, rev, base_rev=None, use_core_configs=True, use_ipc_configs=True, use_extended_configs=False, **kwargs): """ See :py:class:`DownwardExperiment <downward.experiments.DownwardExperiment>` for inherited parameters. The experiment will be built at *path*. *repo* must be the path to a Fast Downward repository. This repository is used to search for problem files. If *opt_or_sat* is 'opt', configurations for optimal planning will be tested on all domains suited for optimal planning. If it is 'sat', configurations for satisficing planning will be tested on the satisficing suite. *rev* determines the new revision to test. If *base_rev* is None (default), the latest revision on the branch default that is an ancestor of *rev* will be used. *use_core_configs* determines if the most common configurations are tested (default: True). *use_ipc_configs* determines if the configurations used in the IPCs are tested (default: True). *use_extended_configs* determines if some less common configurations are tested (default: False). """ base_rev = checkouts.get_common_ancestor(repo, rev) combos = [(Translator(repo, rev=r), Preprocessor(repo, rev=r), Planner(repo, rev=r)) for r in (base_rev, rev)] DownwardExperiment.__init__(self, path, repo, combinations=combos, **kwargs) # ------ suites and configs ------------------------------------ if opt_or_sat == 'opt': self.add_suite(suite_optimal_with_ipc11()) configs = default_configs_optimal(use_core_configs, use_ipc_configs, use_extended_configs) elif opt_or_sat == 'sat': self.add_suite(suite_satisficing_with_ipc11()) configs = default_configs_satisficing(use_core_configs, use_ipc_configs, use_extended_configs) else: logging.critical('Select to test either \'opt\' or \'sat\' configurations') for nick, command in configs.items(): self.add_config(nick, command) # ------ reports ----------------------------------------------- comparison = CompareRevisionsReport(base_rev, rev, attributes=COMPARED_ATTRIBUTES) self.add_report(comparison, name='report-compare-scores', outfile='report-compare-scores.html') for nick in configs.keys(): config_before = '%s-%s' % (base_rev, nick) config_after = '%s-%s' % (rev, nick) for attribute in SCATTER_PLOT_ATTRIBUTES: name = 'scatter-%s-%s' % (attribute, nick) self.add_report( ScatterPlotReport( filter_config=[config_before, config_after], attributes=[attribute], get_category=lambda run1, run2: run1['domain']), outfile=name)