Ejemplo n.º 1
0
    def __init__(self, revisions=None, configs=None, path=None, **kwargs):
        """

        You can either specify both *revisions* and *configs* or none
        of them. If they are omitted, you will need to call
        exp.add_algorithm() manually.

        If *revisions* is given, it must be a non-empty list of
        revision identifiers, which specify which planner versions to
        use in the experiment. The same versions are used for
        translator, preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        If *configs* is given, it must be a non-empty list of
        IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        """

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        if (revisions and not configs) or (not revisions and configs):
            raise ValueError(
                "please provide either both or none of revisions and configs")

        revisions = revisions or []
        configs = configs or []

        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   get_repo_base(),
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self._revisions = revisions
        self._configs = configs
Ejemplo n.º 2
0
    def _add_runs(self):
        """
        Example showing how to modify the automatically generated runs.

        This uses private members, so it might break between different
        versions of Lab.

        """
        FastDownwardExperiment._add_runs(self)
        for run in self.runs:
            command = run.commands["planner"]
            # Slightly raise soft limit for output to stdout.
            command[1]["soft_stdout_limit"] = 1.5 * 1024
Ejemplo n.º 3
0
 def __init__(self, soft_limit=1024, hard_limit=10240, *args, **kwargs):
     FastDownwardExperiment.__init__(self, *args, **kwargs)
     self.soft_limit = soft_limit
     self.hard_limit = hard_limit
     # Add built-in parsers to the experiment.
     self.add_parser(self.EXITCODE_PARSER)
     self.add_parser(self.TRANSLATOR_PARSER)
     self.add_parser(self.SINGLE_SEARCH_PARSER)
     self.add_parser(self.PLANNER_PARSER)
     # Add custom parsers to the experiment.
     DIR = os.path.dirname(os.path.abspath(__file__))
     self.add_parser(os.path.join(DIR, "start-parser.py"))
     self.add_parser(os.path.join(DIR, "split-time-parser.py"))
     self.add_parser(os.path.join(DIR, "average-split-parser.py"))
Ejemplo n.º 4
0
        return min_cost / cost

    def store_costs(self, run):
        cost = run.get("cost")
        if cost is not None:
            assert run["coverage"]
            self.tasks_to_costs[self._get_task(run)].append(cost)
        return True

    def add_quality(self, run):
        run["quality"] = self._compute_quality(
            run.get("cost"), self.tasks_to_costs[self._get_task(run)])
        return run


exp = FastDownwardExperiment(environment=ENV, revision_cache=REV_CACHE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, ["gripper:prob01.pddl", "miconic:s1-0.pddl"])
exp.add_algorithm(
    "iter-hadd",
    REPO,
    REV,
    [
        "--heuristic",
        "hadd=add()",
        "--search",
Ejemplo n.º 5
0
if NODE.endswith(".scicore.unibas.ch") or NODE.endswith(".cluster.bc2.ch"):
    # Create bigger suites with suites.py from the downward-benchmarks repo.
    SUITE = ["depot", "freecell", "gripper", "zenotravel"]
    ENV = BaselSlurmEnvironment(email="*****@*****.**")
else:
    SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "mystery:prob07.pddl"]
    ENV = LocalEnvironment(processes=2)
# Use path to your Fast Downward repository.
REPO = os.environ["DOWNWARD_REPO"]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
# If REVISION_CACHE is None, the default ./data/revision-cache is used.
REVISION_CACHE = os.environ.get("DOWNWARD_REVISION_CACHE")
VCS = cached_revision.get_version_control_system(REPO)
REV = "default" if VCS == cached_revision.MERCURIAL else "main"

exp = FastDownwardExperiment(environment=ENV, revision_cache=REVISION_CACHE)

# Add built-in parsers to the experiment.
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm("blind", REPO, REV, ["--search", "astar(blind())"])
exp.add_algorithm("lmcut", REPO, REV, ["--search", "astar(lmcut())"])

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
Ejemplo n.º 6
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = args.revision
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.path, exp.eval_dir] if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step('rm-exp-dir', shutil.rmtree, exp.path)
        exp.add_step('rm-eval-dir', shutil.rmtree, exp.eval_dir)

    exp.run_steps()
Ejemplo n.º 7
0
    def __init__(self,
                 revisions,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 test_suite=None,
                 email=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
                                                    email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   repo,
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs
Ejemplo n.º 8
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = cached_revision.get_global_rev(REPO, rev=args.revision)
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:

        def result_handler(success):
            regression_test_handler(args.test, rev, success)

        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS,
                                             result_handler),
                       name='regression-check')

    exp.run_steps()
Ejemplo n.º 9
0
        return min_cost / cost

    def store_costs(self, run):
        cost = run.get('cost')
        if cost is not None:
            assert run['coverage']
            self.tasks_to_costs[self._get_task(run)].append(cost)
        return True

    def add_quality(self, run):
        run['quality'] = self._compute_quality(
            run.get('cost'), self.tasks_to_costs[self._get_task(run)])
        return run


exp = FastDownwardExperiment(environment=ENV, revision_cache=REV_CACHE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, ['gripper:prob01.pddl', 'miconic:s1-0.pddl'])
exp.add_algorithm('iter-hadd', REPO, REV, [
    '--heuristic', 'hadd=add()', '--search',
    'iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)'
])
exp.add_algorithm('ipdb',
                  REPO,
                  REV, ["--search", "astar(ipdb())"],
                  driver_options=['--search-time-limit', 10])
    def __init__(self, revisions, configs, suite, grid_priority=None,
                 path=None, test_suite=None, email=None, processes=1,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment(processes=processes)
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(
                priority=grid_priority, email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(
                    get_algo_nick(rev, config.nick),
                    repo,
                    rev,
                    config.component_options,
                    build_options=config.build_options,
                    driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs
VANILLAREPO = '/mnt/data_server/schaefer/fast-downward'
BENCHMARK = '/mnt/data_server/schaefer/asnetsfastdownward/benchmarks/evaluation_domains'
ENV = OracleGridEngineEnvironment(queue='all.q@@fai0x')
REVISION_CACHE = os.path.expanduser('~/lab/revision-cache')

SUITE = [
    'turnandopen', 'tyreworld', 'sokoban', 'hanoi', 'floortile', 'blocksworld',
    'elevator', 'parcprinter'
]

ATTRIBUTES = [
    'unsolvable', 'memory', 'total_search_time', 'total_time', 'plan_length',
    'cost', 'coverage', 'error'
]

exp = FastDownwardExperiment(environment=ENV, revision_cache=REVISION_CACHE)
exp.add_suite(BENCHMARK, SUITE)

# baseline planners:
# baseline 1: LAMA-2011 (executed with vanilla fast-downward)
exp.add_algorithm(
    "lama",
    VANILLAREPO,
    "default", [],
    build_options=["release64"],
    driver_options=["--build", "release64", "--alias", "seq-sat-lama-2011"])

# baseline 2: A* with LM-Cut
exp.add_algorithm("astar_lmcut",
                  NEURALREPO,
                  "default", ["--search", "astar(lmcut())"],
Ejemplo n.º 12
0
 def _add_runs(self):
     FastDownwardExperiment._add_runs(self)
     for run in self.runs:
         command = run.commands["planner"]
         command[1]['soft_stdout_limit'] = self.soft_limit
         command[1]['hard_stdout_limit'] = self.hard_limit