Example #1
0
    def __init__(self, revisions=None, configs=None, path=None, **kwargs):
        """

        You can either specify both *revisions* and *configs* or none
        of them. If they are omitted, you will need to call
        exp.add_algorithm() manually.

        If *revisions* is given, it must be a non-empty list of
        revision identifiers, which specify which planner versions to
        use in the experiment. The same versions are used for
        translator, preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        If *configs* is given, it must be a non-empty list of
        IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        """

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        if (revisions and not configs) or (not revisions and configs):
            raise ValueError(
                "please provide either both or none of revisions and configs")

        revisions = revisions or []
        configs = configs or []

        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   get_repo_base(),
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self._revisions = revisions
        self._configs = configs
Example #2
0
 def __init__(self, soft_limit=1024, hard_limit=10240, *args, **kwargs):
     FastDownwardExperiment.__init__(self, *args, **kwargs)
     self.soft_limit = soft_limit
     self.hard_limit = hard_limit
     # Add built-in parsers to the experiment.
     self.add_parser(self.EXITCODE_PARSER)
     self.add_parser(self.TRANSLATOR_PARSER)
     self.add_parser(self.SINGLE_SEARCH_PARSER)
     self.add_parser(self.PLANNER_PARSER)
     # Add custom parsers to the experiment.
     DIR = os.path.dirname(os.path.abspath(__file__))
     self.add_parser(os.path.join(DIR, "start-parser.py"))
     self.add_parser(os.path.join(DIR, "split-time-parser.py"))
     self.add_parser(os.path.join(DIR, "average-split-parser.py"))
Example #3
0
    def __init__(self,
                 revisions,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 test_suite=None,
                 email=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
                                                    email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   repo,
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs
    def __init__(self, revisions, configs, suite, grid_priority=None,
                 path=None, test_suite=None, email=None, processes=1,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment(processes=processes)
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(
                priority=grid_priority, email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(
                    get_algo_nick(rev, config.nick),
                    repo,
                    rev,
                    config.component_options,
                    build_options=config.build_options,
                    driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs