Ejemplo n.º 1
0
 def _make_search_runs(self):
     DownwardExperiment._make_search_runs(self)
     for i, parser in enumerate(self._additional_parsers):
         parser_alias = 'ADDITIONALPARSER%d' % i
         self.add_resource(parser_alias, parser, os.path.basename(parser))
         for run in self.runs:
             run.require_resource(parser_alias)
             run.add_command('additional-parser-%d' % i, [parser_alias])
Ejemplo n.º 2
0
 def _make_search_runs(self):
     DownwardExperiment._make_search_runs(self)
     for i, parser in enumerate(self._additional_parsers):
         parser_alias = 'ADDITIONALPARSER%d' % i
         self.add_resource(parser_alias, parser, os.path.basename(parser))
         for run in self.runs:
             run.require_resource(parser_alias)
             run.add_command('additional-parser-%d' % i, [parser_alias])
Ejemplo n.º 3
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO,
                         rev=rev), Preprocessor(REPO,
                                                rev=rev), Planner(REPO,
                                                                  rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test),
                             repo=REPO,
                             combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        exp.steps.insert(
            0,
            Step('rm-eval-dir',
                 shutil.rmtree,
                 exp.eval_dir,
                 ignore_errors=True))
        exp.add_step(
            Step('fetch-baseline-results', Fetcher(),
                 get_exp_dir('baseline', args.test) + '-eval', exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_step(
            Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(
            Step('rm-preprocessed-tasks', shutil.rmtree,
                 exp.preprocessed_tasks_dir))
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')

    exp()
Ejemplo n.º 4
0
    def __init__(self, configs=None, grid_priority=None, path=None,
                 repo=None, revisions=None, search_revisions=None,
                 suite=None, parsers=None, **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If "configs" is specified, it should be a dict of {nick:
        cmdline} pairs that sets the planner configurations to test.

        If "grid_priority" is specified and no environment is
        specifically requested in **kwargs, use the maia environment
        with the specified priority.

        If "path" is not specified, the experiment data path is
        derived automatically from the main script's filename.

        If "repo" is not specified, the repository base is derived
        automatically from the main script's path.

        If "revisions" is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search.

        If "search_revisions" is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All experiments use the
        translator and preprocessor component of the first
        revision.

        If "suite" is specified, it should specify a problem suite.

        If "parsers" is specified, it should be a list of paths to 
        parsers that should be run in addition to search_parser.py.

        Options "combinations" (from the base class), "revisions" and
        "search_revisions" are mutually exclusive."""

        if grid_priority is not None and "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        num_rev_opts_specified = (
            int(revisions is not None) +
            int(search_revisions is not None) +
            int(kwargs.get("combinations") is not None))

        if num_rev_opts_specified > 1:
            raise ValueError('must specify exactly one of "revisions", '
                             '"search_revisions" or "combinations"')

        # See add_comparison_table_step for more on this variable.
        self._HACK_revisions = revisions

        if revisions is not None:
            if not revisions:
                raise ValueError("revisions cannot be empty")
            combinations = [(Translator(repo, rev),
                             Preprocessor(repo, rev),
                             Planner(repo, rev))
                            for rev in revisions]
            kwargs["combinations"] = combinations

        if search_revisions is not None:
            if not search_revisions:
                raise ValueError("search_revisions cannot be empty")
            base_rev = search_revisions[0]
            translator = Translator(repo, base_rev)
            preprocessor = Preprocessor(repo, base_rev)
            combinations = [(translator, preprocessor, Planner(repo, rev))
                            for rev in search_revisions]
            kwargs["combinations"] = combinations

        self._additional_parsers = parsers or []

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        if configs is not None:
            for nick, config in configs.items():
                self.add_config(nick, config)

        if suite is not None:
            self.add_suite(suite)

        self._report_prefix = get_experiment_name()
Ejemplo n.º 5
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir]
                       if os.path.exists(path)]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
 def add_config(self, nick, config, timeout=None):
     DownwardExperiment.add_config(self, nick, config, timeout=timeout)
     self._config_nicks.append(nick)
    def __init__(self, configs, suite, grid_priority=None, path=None,
                 repo=None, revisions=None, search_revisions=None,
                 test_suite=None, **kwargs):
        """Create a DownwardExperiment with some convenience features.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. ::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        If *repo* is specified, it must be the path to the root of a
        local Fast Downward repository. If omitted, the repository
        is derived automatically from the main script's path. Example::

            script = /path/to/fd-repo/experiments/issue123/exp01.py -->
            repo = /path/to/fd-repo

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        If *search_revisions* is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All runs use the
        translator and preprocessor component of the first
        revision. ::

            IssueExperiment(search_revisions=["default", "issue123"])

        If you really need to specify the (translator, preprocessor,
        planner) triples manually, use the *combinations* parameter
        from the base class (might be deprecated soon). The options
        *revisions*, *search_revisions* and *combinations* can be
        freely mixed, but at least one of them must be given.

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        kwargs.setdefault("combinations", [])

        if not any([revisions, search_revisions, kwargs["combinations"]]):
            raise ValueError('At least one of "revisions", "search_revisions" '
                             'or "combinations" must be given')

        if revisions:
            kwargs["combinations"].extend([
                (Translator(repo, rev),
                 Preprocessor(repo, rev),
                 Planner(repo, rev))
                for rev in revisions])

        if search_revisions:
            base_rev = search_revisions[0]
            # Use the same nick for all parts to get short revision nick.
            kwargs["combinations"].extend([
                (Translator(repo, base_rev, nick=rev),
                 Preprocessor(repo, base_rev, nick=rev),
                 Planner(repo, rev, nick=rev))
                for rev in search_revisions])

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        self._config_nicks = []
        for nick, config in configs.items():
            self.add_config(nick, config)

        self.add_suite(suite)
Ejemplo n.º 8
0
 def add_config(self, nick, config, timeout=None):
     DownwardExperiment.add_config(self, nick, config, timeout=timeout)
     self._config_nicks.append(nick)
Ejemplo n.º 9
0
    def __init__(self,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 repo=None,
                 revisions=None,
                 search_revisions=None,
                 test_suite=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. ::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        If *repo* is specified, it must be the path to the root of a
        local Fast Downward repository. If omitted, the repository
        is derived automatically from the main script's path. Example::

            script = /path/to/fd-repo/experiments/issue123/exp01.py -->
            repo = /path/to/fd-repo

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        If *search_revisions* is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All runs use the
        translator and preprocessor component of the first
        revision. ::

            IssueExperiment(search_revisions=["default", "issue123"])

        If you really need to specify the (translator, preprocessor,
        planner) triples manually, use the *combinations* parameter
        from the base class (might be deprecated soon). The options
        *revisions*, *search_revisions* and *combinations* can be
        freely mixed, but at least one of them must be given.

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        kwargs.setdefault("combinations", [])

        if not any([revisions, search_revisions, kwargs["combinations"]]):
            raise ValueError('At least one of "revisions", "search_revisions" '
                             'or "combinations" must be given')

        if revisions:
            kwargs["combinations"].extend([
                (Translator(repo, rev), Preprocessor(repo,
                                                     rev), Planner(repo, rev))
                for rev in revisions
            ])

        if search_revisions:
            base_rev = search_revisions[0]
            # Use the same nick for all parts to get short revision nick.
            kwargs["combinations"].extend([(Translator(repo,
                                                       base_rev,
                                                       nick=rev),
                                            Preprocessor(repo,
                                                         base_rev,
                                                         nick=rev),
                                            Planner(repo, rev, nick=rev))
                                           for rev in search_revisions])

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        self._config_nicks = []
        for nick, config in configs.items():
            self.add_config(nick, config)

        self.add_suite(suite)
Ejemplo n.º 10
0
    def __init__(self, configs=None, grid_priority=None, path=None,
                 repo=None, revisions=None, search_revisions=None,
                 combinations=None, suite=None, do_test_run="auto",
                 test_suite=DEFAULT_TEST_SUITE, **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If "configs" is specified, it should be a dict of {nick:
        cmdline} pairs that sets the planner configurations to test.

        If "grid_priority" is specified and no environment is
        specifically requested in **kwargs, use the maia environment
        with the specified priority.

        If "path" is not specified, the experiment data path is
        derived automatically from the main script's filename.

        If "repo" is not specified, the repository base is derived
        automatically from the main script's path.

        If "combinations" is specified, it should be a non-empty list
        of revision triples of the form (translator_rev,
        preprocessor_rev, search_rev).

        If "revisions" is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search.

        If "search_revisions" is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All experiments use the
        translator and preprocessor component of the first
        revision.

        It is possible to specify a mixture of"combinations",
        "revisions" and "search_revisions".

        If "suite" is specified, it should specify a problem suite.

        If "do_test_run" is true, the "grid_priority" and
        "environment" (from the base class) arguments are ignored and
        a local experiment with default arguments is run instead. In
        this case, the "suite" argument is replaced by the "test_suite"
        argument.

        If "do_test_run" is the string "auto" (the default), then
        do_test_run is set to False when run on a grid machine and
        to True otherwise. A grid machine is identified as one whose
        node name ends with ".cluster".
        """

        if do_test_run == "auto":
            do_test_run = not is_on_grid()

        if do_test_run:
            # In a test run, overwrite certain arguments.
            grid_priority = None
            kwargs.pop("environment", None)
            suite = test_suite

        if grid_priority is not None and "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        combinations, self._combination_names = build_combos_with_names(
            repo=repo,
            combinations=combinations,
            revisions=revisions,
            search_revisions=search_revisions)
        kwargs["combinations"] = combinations

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        if configs is not None:
            for nick, config in configs.items():
                self.add_config(nick, config)

        if suite is not None:
            self.add_suite(suite)

        self._report_prefix = get_experiment_name()
Ejemplo n.º 11
0
    def __init__(self,
                 configs=None,
                 grid_priority=None,
                 path=None,
                 repo=None,
                 revisions=None,
                 search_revisions=None,
                 suite=None,
                 parsers=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If "configs" is specified, it should be a dict of {nick:
        cmdline} pairs that sets the planner configurations to test.

        If "grid_priority" is specified and no environment is
        specifically requested in **kwargs, use the maia environment
        with the specified priority.

        If "path" is not specified, the experiment data path is
        derived automatically from the main script's filename.

        If "repo" is not specified, the repository base is derived
        automatically from the main script's path.

        If "revisions" is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search.

        If "search_revisions" is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All experiments use the
        translator and preprocessor component of the first
        revision.

        If "suite" is specified, it should specify a problem suite.

        If "parsers" is specified, it should be a list of paths to 
        parsers that should be run in addition to search_parser.py.

        Options "combinations" (from the base class), "revisions" and
        "search_revisions" are mutually exclusive."""

        if grid_priority is not None and "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        num_rev_opts_specified = (int(revisions is not None) +
                                  int(search_revisions is not None) +
                                  int(kwargs.get("combinations") is not None))

        if num_rev_opts_specified > 1:
            raise ValueError('must specify exactly one of "revisions", '
                             '"search_revisions" or "combinations"')

        # See add_comparison_table_step for more on this variable.
        self._HACK_revisions = revisions

        if revisions is not None:
            if not revisions:
                raise ValueError("revisions cannot be empty")
            combinations = [(Translator(repo, rev), Preprocessor(repo, rev),
                             Planner(repo, rev)) for rev in revisions]
            kwargs["combinations"] = combinations

        if search_revisions is not None:
            if not search_revisions:
                raise ValueError("search_revisions cannot be empty")
            base_rev = search_revisions[0]
            translator = Translator(repo, base_rev)
            preprocessor = Preprocessor(repo, base_rev)
            combinations = [(translator, preprocessor, Planner(repo, rev))
                            for rev in search_revisions]
            kwargs["combinations"] = combinations

        self._additional_parsers = parsers or []

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        if configs is not None:
            for nick, config in configs.items():
                self.add_config(nick, config)

        if suite is not None:
            self.add_suite(suite)

        self._report_prefix = get_experiment_name()
Ejemplo n.º 12
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir]
                       if os.path.exists(path)]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
Ejemplo n.º 13
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        exp.steps.insert(0, Step('rm-eval-dir', shutil.rmtree, exp.eval_dir, ignore_errors=True))
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))

    exp()