Exemple #1
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = args.revision
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.path, exp.eval_dir] if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step('rm-exp-dir', shutil.rmtree, exp.path)
        exp.add_step('rm-eval-dir', shutil.rmtree, exp.eval_dir)

    exp.run_steps()
Exemple #2
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = cached_revision.get_global_rev(REPO,
                                             vcs=cached_revision.MERCURIAL,
                                             rev=args.revision)
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:

        def result_handler(success):
            regression_test_handler(args.test, rev, success)

        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS,
                                             result_handler),
                       name='regression-check')

    exp.run_steps()
Exemple #3
0
        return True

    def add_quality(self, run):
        run["quality"] = self._compute_quality(
            run.get("cost"), self.tasks_to_costs[self._get_task(run)])
        return run


exp = FastDownwardExperiment(environment=ENV, revision_cache=REV_CACHE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, ["gripper:prob01.pddl", "miconic:s1-0.pddl"])
exp.add_algorithm(
    "iter-hadd",
    REPO,
    REV,
    [
        "--heuristic",
        "hadd=add()",
        "--search",
        "iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)",
    ],
)
exp.add_algorithm(
    "ipdb",
    REPO,
    REV,
Exemple #4
0
REPO = os.environ["DOWNWARD_REPO"]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
# If REVISION_CACHE is None, the default ./data/revision-cache is used.
REVISION_CACHE = os.environ.get("DOWNWARD_REVISION_CACHE")
VCS = cached_revision.get_version_control_system(REPO)
REV = "default" if VCS == cached_revision.MERCURIAL else "main"

exp = FastDownwardExperiment(environment=ENV, revision_cache=REVISION_CACHE)

# Add built-in parsers to the experiment.
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm("blind", REPO, REV, ["--search", "astar(blind())"])
exp.add_algorithm("lmcut", REPO, REV, ["--search", "astar(lmcut())"])

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
exp.add_step("start", exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Add report step (AbsoluteReport is the standard report).
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="report.html")
Exemple #5
0
        return True

    def add_quality(self, run):
        run["quality"] = self._compute_quality(
            run.get("cost"), self.tasks_to_costs[self._get_task(run)])
        return run


exp = FastDownwardExperiment(environment=ENV, revision_cache=REV_CACHE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, ["gripper:prob01.pddl", "miconic:s1-0.pddl"])
exp.add_suite(DIR / ".." / "tests" / "benchmarks",
              ["blocks", "gripper:p01.sas"])
exp.add_algorithm(
    "iter-hadd",
    REPO,
    REV,
    [
        "--heuristic",
        "hadd=add()",
        "--search",
        "iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)",
    ],
)
exp.add_algorithm(
    "ipdb",
        return True

    def add_quality(self, run):
        run['quality'] = self._compute_quality(
            run.get('cost'), self.tasks_to_costs[self._get_task(run)])
        return run


exp = FastDownwardExperiment(environment=ENV, revision_cache=REV_CACHE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, ['gripper:prob01.pddl', 'miconic:s1-0.pddl'])
exp.add_algorithm('iter-hadd', REPO, REV, [
    '--heuristic', 'hadd=add()', '--search',
    'iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)'
])
exp.add_algorithm('ipdb',
                  REPO,
                  REV, ["--search", "astar(ipdb())"],
                  driver_options=['--search-time-limit', 10])
exp.add_algorithm('ff', REPO, REV, ["--search", "eager_greedy([ff()])"])
exp.add_algorithm(
    'lama11',
    REPO,
    REV, [],
    driver_options=['--alias', 'seq-sat-lama-2011', '--plan-file', 'sas_plan'])
exp.add_algorithm('sat-fdss-1',
BENCHMARK = '/mnt/data_server/schaefer/asnetsfastdownward/benchmarks/evaluation_domains'
ENV = OracleGridEngineEnvironment(queue='all.q@@fai0x')
REVISION_CACHE = os.path.expanduser('~/lab/revision-cache')

SUITE = [
    'turnandopen', 'tyreworld', 'sokoban', 'hanoi', 'floortile', 'blocksworld',
    'elevator', 'parcprinter'
]

ATTRIBUTES = [
    'unsolvable', 'memory', 'total_search_time', 'total_time', 'plan_length',
    'cost', 'coverage', 'error'
]

exp = FastDownwardExperiment(environment=ENV, revision_cache=REVISION_CACHE)
exp.add_suite(BENCHMARK, SUITE)

# baseline planners:
# baseline 1: LAMA-2011 (executed with vanilla fast-downward)
exp.add_algorithm(
    "lama",
    VANILLAREPO,
    "default", [],
    build_options=["release64"],
    driver_options=["--build", "release64", "--alias", "seq-sat-lama-2011"])

# baseline 2: A* with LM-Cut
exp.add_algorithm("astar_lmcut",
                  NEURALREPO,
                  "default", ["--search", "astar(lmcut())"],
                  build_options=["release64dynamic"],