Ejemplo n.º 1
0
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = cached_revision.get_global_rev(REPO,
                                             vcs=cached_revision.MERCURIAL,
                                             rev=args.revision)
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 revision_cache=REVISION_CACHE)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)

    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    exp.add_parser(exp.PLANNER_PARSER)

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:

        def result_handler(success):
            regression_test_handler(args.test, rev, success)

        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        merge=True,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS,
                                             result_handler),
                       name='regression-check')

    exp.run_steps()
Ejemplo n.º 2
0
from lab.experiment import ARGPARSER

from downward import cached_revision
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport

from regression_test import Check, RegressionCheckReport

DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
EXPERIMENTS_DIR = os.path.expanduser('~/experiments')
REVISION_CACHE = os.path.expanduser('~/lab/revision-cache')

BASELINE = cached_revision.get_global_rev(REPO, rev='e5e39fcb7a71')
CONFIGS = {}
CONFIGS['nightly'] = [
    ('lmcut', ['--search', 'astar(lmcut())']),
    ('lazy-greedy-ff',
     ['--heuristic', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-cea',
     ['--heuristic', 'h=cea()', '--search',
      'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-ff-cea', [
        '--heuristic', 'hff=ff()', '--heuristic', 'hcea=cea()', '--search',
        'lazy_greedy([hff, hcea], preferred=[hff, hcea])'
    ]),
    ('blind', ['--search', 'astar(blind())']),
    # TODO: Revert to optimal=true.
    ('lmcount-optimal', [
Ejemplo n.º 3
0
from downward import cached_revision
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport

from regression_test import Check, RegressionCheckReport

DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
DEFAULT_BASE_DIR = os.path.dirname(tools.get_script_path())
BASE_DIR = os.getenv("BUILDBOT_EXP_BASE_DIR", DEFAULT_BASE_DIR)
EXPERIMENTS_DIR = os.path.join(BASE_DIR, 'data')
REVISION_CACHE = os.path.join(BASE_DIR, 'revision-cache')
REGRESSIONS_DIR = os.path.join(BASE_DIR, 'regressions')

BASELINE = cached_revision.get_global_rev(REPO, rev='9e8be78bb8e5')
CONFIGS = {}
CONFIGS['nightly'] = [
    ('lmcut', ['--search', 'astar(lmcut())']),
    ('lazy-greedy-ff',
     ['--evaluator', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-cea',
     ['--evaluator', 'h=cea()', '--search',
      'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-ff-cea', [
        '--evaluator', 'hff=ff()', '--heuristic', 'hcea=cea()', '--search',
        'lazy_greedy([hff, hcea], preferred=[hff, hcea])'
    ]),
    ('blind', ['--search', 'astar(blind())']),
    # TODO: Revert to optimal=true.
    ('lmcount-optimal', [
Ejemplo n.º 4
0
from downward import cached_revision
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport

from regression_test import Check, RegressionCheckReport

DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
DEFAULT_BASE_DIR = os.path.dirname(tools.get_script_path())
BASE_DIR = os.getenv("BUILDBOT_EXP_BASE_DIR", DEFAULT_BASE_DIR)
EXPERIMENTS_DIR = os.path.join(BASE_DIR, 'data')
REVISION_CACHE = os.path.join(BASE_DIR, 'revision-cache')
REGRESSIONS_DIR = os.path.join(BASE_DIR, 'regressions')

BASELINE = cached_revision.get_global_rev(REPO, rev='0b4344f8f5a8')
CONFIGS = {}
CONFIGS['nightly'] = [
    ('lmcut', ['--search', 'astar(lmcut())']),
    ('lazy-greedy-ff',
     ['--evaluator', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-cea',
     ['--evaluator', 'h=cea()', '--search',
      'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-ff-cea', [
        '--evaluator', 'hff=ff()', '--heuristic', 'hcea=cea()', '--search',
        'lazy_greedy([hff, hcea], preferred=[hff, hcea])'
    ]),
    ('blind', ['--search', 'astar(blind())']),
    # TODO: Revert to optimal=true.
    ('lmcount-optimal', [
Ejemplo n.º 5
0
from lab.experiment import ARGPARSER

from downward import cached_revision
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport

from regression_test import Check, RegressionCheckReport


DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
EXPERIMENTS_DIR = os.path.expanduser('~/experiments')
CACHE_DIR = os.path.expanduser('~/lab')

BASELINE = cached_revision.get_global_rev(REPO, rev='eb9f8c86918f')
CONFIGS = {}
CONFIGS['nightly'] = [
    ('lmcut', ['--search', 'astar(lmcut())']),
    ('lazy-greedy-ff', ['--heuristic', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-cea', ['--heuristic', 'h=cea()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-ff-cea', ['--heuristic', 'hff=ff()', '--heuristic',  'hcea=cea()',
                            '--search', 'lazy_greedy([hff, hcea], preferred=[hff, hcea])']),
    ('blind', ['--search', 'astar(blind())']),
    # TODO: Revert to optimal=true.
    ('lmcount-optimal', ['--search',
        'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=false))']),
]
CONFIGS['weekly'] = CONFIGS['nightly']

SUITES = {
Ejemplo n.º 6
0
from lab.experiment import ARGPARSER

from downward import cached_revision
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport

from regression_test import Check, RegressionCheckReport

DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.abspath(os.path.join(DIR, '../../'))
BENCHMARKS_DIR = os.path.join(REPO, "misc", "tests", "benchmarks")
EXPERIMENTS_DIR = os.path.expanduser('~/experiments')
REVISION_CACHE = os.path.expanduser('~/lab/revision-cache')

BASELINE = cached_revision.get_global_rev(REPO, rev='8bf3979d39d4')
CONFIGS = {}
CONFIGS['nightly'] = [
    ('lmcut', ['--search', 'astar(lmcut())']),
    ('lazy-greedy-ff',
     ['--evaluator', 'h=ff()', '--search', 'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-cea',
     ['--evaluator', 'h=cea()', '--search',
      'lazy_greedy([h], preferred=[h])']),
    ('lazy-greedy-ff-cea', [
        '--evaluator', 'hff=ff()', '--heuristic', 'hcea=cea()', '--search',
        'lazy_greedy([hff, hcea], preferred=[hff, hcea])'
    ]),
    ('blind', ['--search', 'astar(blind())']),
    # TODO: Revert to optimal=true.
    ('lmcount-optimal', [