def main(revisions=None): SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { 'cea': ['--search', 'eager_greedy(cea())'], 'cg': ['--search', 'eager_greedy(cg())'], 'lmcount': ['--search', 'eager_greedy(lmcount(lm_rhw()))'], } exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', grid_priority=-10, ) attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.append('landmarks') attributes.append('landmarks_generation_time') exp.add_comparison_table_step(attributes=attributes) exp()
def main(revisions=None): suite = suites.suite_satisficing_with_ipc11() configs = { IssueConfig('lazy-greedy-ff', [ '--heuristic', 'h=ff()', '--search', 'lazy_greedy(h, preferred=h)' ]), IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first']), IssueConfig('eager_greedy_cg', [ '--heuristic', 'h=cg()', '--search', 'eager_greedy(h, preferred=h)' ]), IssueConfig('eager_greedy_cea', [ '--heuristic', 'h=cea()', '--search', 'eager_greedy(h, preferred=h)' ]), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_memory_%s.png' % config.nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_total_time_%s.png' % config.nick) exp()
def main(revisions=None): suite = suites.suite_satisficing_with_ipc11() configs = { IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']), IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']), IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: nick = config.nick exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_memory_%s.png' % nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_total_time_%s.png' % nick) exp()
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue544-base", "issue544-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_add": ["--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_ff": ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "lazy_greedy_add": ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_ff": ["--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites, configs from downward.reports.compare import CompareConfigsReport import common_setup REVISIONS = ["issue462-base", "issue462-v1"] exp = common_setup.IssueExperiment( search_revisions=REVISIONS, configs=configs.default_configs_satisficing(), suite=suites.suite_satisficing_with_ipc11(), limits={"search_time": 300}, ) exp.add_absolute_report_step() exp.add_comparison_table_step() def grouped_configs_to_compare(config_nicks): grouped_configs = [] for config_nick in config_nicks: col_names = ["%s-%s" % (r, config_nick) for r in REVISIONS] grouped_configs.append((col_names[0], col_names[1], "Diff - %s" % config_nick)) return grouped_configs exp.add_report( CompareConfigsReport( compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()),
from downward.configs import default_configs_satisficing from downward.reports.scatter import ScatterPlotReport import common_setup REVS = ["issue214-base", "issue214-v2"] CONFIGS = default_configs_satisficing() TEST_RUN = True if TEST_RUN: SUITE = "gripper:prob01.pddl" PRIORITY = None # "None" means local experiment else: SUITE = suite_satisficing_with_ipc11() PRIORITY = 0 # number means maia experiment exp = common_setup.MyExperiment( grid_priority=PRIORITY, revisions=REVS, configs=CONFIGS, suite=SUITE, parsers=['state_size_parser.py'], ) exp.add_comparison_table_step( attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] )
)] = [ "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", "--search", "iterated([" "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false)," "lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," "repeat_last=true,continue_on_fail=true)" % locals() ] SUITE = sorted( set(suites.suite_satisficing_with_ipc11()) & set(suites.suite_diverse_costs())) exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp()
REVS = ["issue392-v2"] LIMITS = {"search_time": 300} CONFIGS = {} for randomize in ["false", "true"]: for pref_first in ["false", "true"]: CONFIGS["lama-nonunit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [ "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", "--search", "iterated([" "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false)," "lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," "repeat_last=true,continue_on_fail=true)" % locals(), ] SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) & set(suites.suite_diverse_costs())) exp = common_setup.IssueExperiment(revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS) exp.add_absolute_report_step() exp()
def __init__(self, path, repo, opt_or_sat, rev, base_rev=None, use_core_configs=True, use_ipc_configs=True, use_extended_configs=False, **kwargs): """ See :py:class:`DownwardExperiment <downward.experiments.DownwardExperiment>` for inherited parameters. The experiment will be built at *path*. *repo* must be the path to a Fast Downward repository. This repository is used to search for problem files. If *opt_or_sat* is 'opt', configurations for optimal planning will be tested on all domains suited for optimal planning. If it is 'sat', configurations for satisficing planning will be tested on the satisficing suite. *rev* determines the new revision to test. If *base_rev* is None (default), the latest revision on the branch default that is an ancestor of *rev* will be used. *use_core_configs* determines if the most common configurations are tested (default: True). *use_ipc_configs* determines if the configurations used in the IPCs are tested (default: True). *use_extended_configs* determines if some less common configurations are tested (default: False). """ base_rev = checkouts.get_common_ancestor(repo, rev) combos = [(Translator(repo, rev=r), Preprocessor(repo, rev=r), Planner(repo, rev=r)) for r in (base_rev, rev)] DownwardExperiment.__init__(self, path, repo, combinations=combos, **kwargs) # ------ suites and configs ------------------------------------ if opt_or_sat == 'opt': self.add_suite(suite_optimal_with_ipc11()) configs = default_configs_optimal(use_core_configs, use_ipc_configs, use_extended_configs) elif opt_or_sat == 'sat': self.add_suite(suite_satisficing_with_ipc11()) configs = default_configs_satisficing(use_core_configs, use_ipc_configs, use_extended_configs) else: logging.critical('Select to test either \'opt\' or \'sat\' configurations') for nick, command in configs.items(): self.add_config(nick, command) # ------ reports ----------------------------------------------- comparison = CompareRevisionsReport(base_rev, rev, attributes=COMPARED_ATTRIBUTES) self.add_report(comparison, name='report-compare-scores', outfile='report-compare-scores.html') for nick in configs.keys(): config_before = '%s-%s' % (base_rev, nick) config_after = '%s-%s' % (rev, nick) for attribute in SCATTER_PLOT_ATTRIBUTES: name = 'scatter-%s-%s' % (attribute, nick) self.add_report( ScatterPlotReport( filter_config=[config_before, config_after], attributes=[attribute], get_category=lambda run1, run2: run1['domain']), outfile=name)
#! /usr/bin/env python from standard_experiment import REMOTE, get_exp from downward import suites #from lab.reports import Attribute, avg import os.path # Set the following variables for the experiment REPO_NAME = 'fd-issue123' # revisions, e.g. ['3d6c1ccacdce'] REVISIONS = ['issue123-base'] # suites, e.g. ['gripper:prob01.pddl', 'zenotravel:pfile1'] or suites.suite_satisficing_with_ipc11() LOCAL_SUITE = ['depot:pfile1'] GRID_SUITE = suites.suite_satisficing_with_ipc11() # configs, e.g. '--search', 'astar(lmcut())' for config CONFIGS = { 'lama-2011': [ "--if-unit-cost", "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "iterated([" " lazy_greedy([hff,hlm],preferred=[hff,hlm])," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)" " ],repeat_last=true,continue_on_fail=true)", "--if-non-unit-cost",