Пример #1
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = args.revision
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.path, exp.eval_dir] if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step('rm-exp-dir', shutil.rmtree, exp.path)
        exp.add_step('rm-eval-dir', shutil.rmtree, exp.eval_dir)

    exp.run_steps()
Пример #2
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = cached_revision.get_global_rev(REPO,
                                             vcs=cached_revision.MERCURIAL,
                                             rev=args.revision)
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:

        def result_handler(success):
            regression_test_handler(args.test, rev, success)

        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS,
                                             result_handler),
                       name='regression-check')

    exp.run_steps()
Пример #3
0
    [],
    driver_options=[
        "--portfolio",
        os.path.join(REPO, "driver", "portfolios", "seq_opt_fdss_1.py"),
    ],
)

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
exp.add_step("start", exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

exp.add_parse_again_step()


# Define a filter.
def only_two_algorithms(run):
    return run["algorithm"] in ["lama11", "iter-hadd"]


# Showcase some fetcher options.


def eval_dir(num):
    return os.path.join(exp.eval_dir, "test%d" % num)
Пример #4
0
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm("blind", REPO, REV, ["--search", "astar(blind())"])
exp.add_algorithm("lmcut", REPO, REV, ["--search", "astar(lmcut())"])

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
exp.add_step("start", exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Add report step (AbsoluteReport is the standard report).
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="report.html")

# Add scatter plot report step.
exp.add_report(
    ScatterPlotReport(attributes=["expansions"], filter_algorithm=["blind", "lmcut"]),
    outfile="scatterplot.png",
)

# Parse the commandline and show or run experiment steps.
exp.run_steps()
Пример #5
0
# Add built-in parsers to the experiment.
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm('blind', REPO, 'default', ['--search', 'astar(blind())'])
exp.add_algorithm('lmcut', REPO, 'default', ['--search', 'astar(lmcut())'])

# Add step that writes experiment files to disk.
exp.add_step('build', exp.build)

# Add step that executes all runs.
exp.add_step('start', exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name='fetch')

# Add report step (AbsoluteReport is the standard report).
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile='report.html')

# Add scatter plot report step.
exp.add_report(ScatterPlotReport(attributes=["expansions"],
                                 filter_algorithm=["blind", "lmcut"]),
               outfile='scatterplot.png')

# Parse the commandline and show or run experiment steps.
exp.run_steps()
Пример #6
0
                  REV, [],
                  driver_options=[
                      '--portfolio',
                      os.path.join(REPO, 'driver', 'portfolios',
                                   'seq_opt_fdss_1.py')
                  ])

# Add step that writes experiment files to disk.
exp.add_step('build', exp.build)

# Add step that executes all runs.
exp.add_step('start', exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name='fetch')

exp.add_parse_again_step()


# Define a filter.
def only_two_algorithms(run):
    return run['algorithm'] in ['lama11', 'iter-hadd']


# Showcase some fetcher options.


def eval_dir(num):
    return os.path.join(exp.eval_dir, 'test%d' % num)