def main(revisions=None): exp = IssueExperiment(benchmarks_dir=".", suite=[]) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v3-eval"), filter=lambda(run): "base" not in run["config"], ) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v5-eval"), filter=lambda(run): "base" not in run["config"], ) for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v5-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v5_memory_%s.png' % config_nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v5-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v5_total_time_%s.png' % config_nick ) exp()
def main(revisions=None): suite = suites.suite_satisficing_with_ipc11() configs = { IssueConfig('lazy-greedy-ff', [ '--heuristic', 'h=ff()', '--search', 'lazy_greedy(h, preferred=h)' ]), IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first']), IssueConfig('eager_greedy_cg', [ '--heuristic', 'h=cg()', '--search', 'eager_greedy(h, preferred=h)' ]), IssueConfig('eager_greedy_cea', [ '--heuristic', 'h=cea()', '--search', 'eager_greedy(h, preferred=h)' ]), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_memory_%s.png' % config.nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_total_time_%s.png' % config.nick) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-cegar-original', [ '--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))' ]), IssueConfig('astar-cegar-lm-goals', [ '--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))' ]), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v3-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v3_memory_%s.png' % config.nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue627-v3-base-%s" % config.nick, "issue627-v3-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v3_total_time_%s.png' % config.nick) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-seq_opt_bjolp', [ '--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)' ]), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: nick = config.nick exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_memory_%s.png' % nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_total_time_%s.png' % nick) exp()
def main(revisions=None): benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') suite=suites.suite_optimal_strips() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '5m']), } exp = IssueExperiment( benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'), revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() attribute = "total_time" config_nick = 'astar-blind' exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config_nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick) ) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue499_base_v1_memory.png' ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue499_base_v1_total_time.png' ) exp.add_report( RelativeScatterPlotReport( attributes=["expansions_until_last_jump"], filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue499_base_v1_expansions_until_last_jump.png' ) exp()
def main(revisions=None): suite = suites.suite_satisficing_with_ipc11() configs = { IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']), IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']), IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: nick = config.nick exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_memory_%s.png' % nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue416_base_v2_total_time_%s.png' % nick) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']), IssueConfig( 'astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=[ "issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_memory_%s.png' % config.nick) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=[ "issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick ], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_total_time_%s.png' % config.nick) exp()
def main(revisions=None): benchmarks_dir = os.path.expanduser('~/projects/downward/benchmarks') suite = [ 'assembly', 'miconic-fulladl', 'openstacks', 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'trucks', ] configs = { IssueConfig('lazy-greedy-ff', [ '--heuristic', 'h=ff()', '--search', 'lazy_greedy(h, preferred=h)' ]), IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first']), IssueConfig('eager_greedy_cg', [ '--heuristic', 'h=cg()', '--search', 'eager_greedy(h, preferred=h)' ]), IssueConfig('eager_greedy_cea', [ '--heuristic', 'h=cea()', '--search', 'eager_greedy(h, preferred=h)' ]), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='*****@*****.**', ) exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}".format(rev, config.nick) for rev in revisions ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format( exp.name, attribute, config.nick)) exp()
def addScatterPlot(attrib, algorithm, compare="random"): filename = 'scatter-' + attrib + '-' + algorithm if compare != "random": filename = filename + '-' + compare exp.add_report(RelativeScatterPlotReport( attributes=[attrib], filter_algorithm=[compare, algorithm], xlim_left=1e-1, ylim_bottom=1e-4, ylim_top=1e4, tick_size=14, label_size=20, title_size=24), outfile=filename + '.png')
def main(revisions=None): benchmarks_dir = os.path.expanduser('~/repos/downward/benchmarks') suite = suites.suite_all() configs = { IssueConfig('blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '60s']), IssueConfig('lama-first', [], driver_options=[ '--alias', 'lama-first', '--search-time-limit', '60s' ]), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], processes=4, email='*****@*****.**', ) attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.append('translator_*') exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}".format(rev, config.nick) for rev in revisions ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format( exp.name, attribute, config.nick)) exp()
if rev.endswith("base") and config.nick.endswith("dynamic"): continue exp.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for algo1, algo2 in [("issue662-base-astar-lmcut-static", "issue662-v1-astar-lmcut-static"), ("issue662-v1-astar-lmcut-static", "issue662-v1-astar-lmcut-dynamic")]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=[algo1, algo2], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, algo1, algo2) ) exp.run_steps()
import common_setup REVS = ["issue560-base", "issue560-v1"] SUITE = suites.suite_all() # We are only interested in the preprocessing here and will only run the first steps of the experiment. CONFIGS = { "astar_blind": [ "--search", "astar(blind())"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, ) exp.add_report( RelativeScatterPlotReport( attributes=["preprocess_wall_clock_time"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue560_base_v1_preprocess_wall_clock_time.png' ) exp.add_absolute_report_step(attributes=["preprocess_wall_clock_time"]) exp()
configs = [ IssueConfig( "cegar-10K-original", ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), ] revisions = ["issue632-base", "issue632-v1"] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suites.suite_optimal_with_ipc11(), test_suite=["depot:pfile1"], email="*****@*****.**", ) exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('rl-b50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('cggl-b50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('dfp-b50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('rl-ginf', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('cggl-ginf', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('dfp-ginf', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))' ]), IssueConfig('rl-f50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))' ]), IssueConfig('cggl-f50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))' ]), IssueConfig('dfp-f50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))' ]), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='*****@*****.**', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue604-base-dfp-ginf", "issue604-v1-dfp-ginf"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue604_base_v1_memory_dfp.png') exp.add_report(RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue604-base-rl-ginf", "issue604-v1-rl-ginf"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue604_base_v1_memory_rl.png') exp()
SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_comparison_table_step() for r1, r2 in combinations(REVISIONS, 2): for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]: exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_algorithm=["%s-%s" % (r, nick) for r in [r1, r2]], get_category=lambda run1, run2: run1["domain"]), outfile="issue925-v1-total-time-%s-%s-%s.png" % (r1, r2, nick)) exp.run_steps()
def main(revisions=None): benchmarks_dir = os.path.expanduser('~/repos/downward/benchmarks') suite = suites.suite_optimal_strips() configs = { IssueConfig('dfp-b50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))' ]), IssueConfig('dfp-ginf', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))' ]), IssueConfig('dfp-f50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))' ]), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='*****@*****.**', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}".format(rev, config.nick) for rev in revisions ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format( exp.name, attribute, config.nick)) exp()
# -*- coding: utf-8 -*- from downward import suites import common_setup from relativescatter import RelativeScatterPlotReport REVS = ["issue67-v4-base", "issue67-v4"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_blind": ["--search", "astar(blind())"], "astar_lmcut": ["--search", "astar(lmcut())"], "astar_lm_zg": ["--search", "astar(lmcount(lm_zg(), admissible=true, optimal=true))"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, ) exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue67-v4-total-time.png') exp()
search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="*****@*****.**") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["search_time"], filter_algorithm=["issue688-v3-base-blind-release32", "issue688-v3-blind-release32"], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-blind-search_time.png".format(exp.name)) exp.run_steps()
def main(revisions=None): benchmarks_dir = os.path.expanduser('~/projects/downward/benchmarks') suite = suites.suite_optimal() configs = [] for osi in ['103', '107']: for cplex in ['1251', '1263']: if osi == '107' and cplex == '1251': # incompatible versions continue configs += [ IssueConfig('astar_initial_state_potential_OSI%s_CPLEX%s' % (osi, cplex), ['--search', 'astar(initial_state_potential())'], build_options=[ 'issue680_OSI%s_CPLEX%s' % (osi, cplex) ], driver_options=[ '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex) ]), IssueConfig('astar_sample_based_potentials_OSI%s_CPLEX%s' % (osi, cplex), ['--search', 'astar(sample_based_potentials())'], build_options=[ 'issue680_OSI%s_CPLEX%s' % (osi, cplex) ], driver_options=[ '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex) ]), IssueConfig( 'astar_all_states_potential_OSI%s_CPLEX%s' % (osi, cplex), ['--search', 'astar(all_states_potential())'], build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], driver_options=[ '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex) ]), ] exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], processes=4, email='*****@*****.**', ) attributes = exp.DEFAULT_TABLE_ATTRIBUTES domains = suites.suite_optimal_strips() exp.add_absolute_report_step(filter_domain=domains) for attribute in ["memory", "total_time"]: for config in [ 'astar_initial_state_potential', 'astar_sample_based_potentials', 'astar_all_states_potential' ]: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107'] ], filter_domain=domains, get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}_CPLEX1263.png".format( exp.name, attribute, config)) exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263'] ], filter_domain=domains, get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}_OSI103.png".format( exp.name, attribute, config)) exp()
exp = common_setup.IssueExperiment( revisions=[], configs={}, suite=SUITE, ) for nick, (rev, cmd) in ALGORITHMS.items(): exp.add_algorithm(nick, REPO, rev, cmd) exp.add_report( CompareConfigsReport( COMPARED_ALGORITHMS, attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES)) exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=["astar_ipdb_base", "astar_ipdb_v3"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue585_ipdb_base_v3_total_time.png') exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_config=["astar_gapdb_base", "astar_gapdb_v3"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue585_gapdb_base_v3_total_time.png') exp()
driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment(priority=0, email="*****@*****.**") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["search_time"], filter_algorithm=[ "issue688-v2-base-blind-release32", "issue688-v2-blind-release32" ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-blind-search_time.png".format(exp.name)) exp.run_steps()
SEARCH_REVS = ["issue547-base", "issue547-v2"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { 'astar_ipdb': ['--search', 'astar(ipdb())'], } exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, ) exp.add_search_parser("custom-parser.py") attributes = attributes = exp.DEFAULT_TABLE_ATTRIBUTES + [ "successor_generator_time", "reopened_until_last_jump" ] exp.add_comparison_table_step(attributes=attributes) for conf in CONFIGS: for attr in ("memory", "search_time"): exp.add_report(RelativeScatterPlotReport( attributes=[attr], get_category=lambda run1, run2: run1.get("domain"), filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf]), outfile='issue547_base_v2_%s_%s.png' % (conf, attr)) exp()
if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attr in ["total_time", "search_time", "memory"]: for rev1, rev2 in [("base", "v1")]: for config_nick in ["ehc_ff"]: exp.add_report(RelativeScatterPlotReport( attributes=[attr], filter_algorithm=[ "issue700-%s-%s" % (rev1, config_nick), "issue700-%s-%s" % (rev2, config_nick) ], get_category=lambda r1, r2: r1["domain"], ), outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) exp.run_steps()
def main(revisions=None): benchmarks_dir = os.path.expanduser('~/repos/downward/benchmarks') suite = suites.suite_optimal_strips() # dummy configs with correct names so that comparison report works configs = { IssueConfig('rl-b50k', []), IssueConfig('cggl-b50k', []), IssueConfig('dfp-b50k', []), IssueConfig('rl-ginf', []), IssueConfig('cggl-ginf', []), IssueConfig('dfp-ginf', []), IssueConfig('rl-f50k', []), IssueConfig('cggl-f50k', []), IssueConfig('dfp-f50k', []), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='*****@*****.**', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_fetcher('data/issue655-base-eval') exp.add_fetcher('data/issue655-v1-eval') exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_config=[ "{}-{}".format(rev, config.nick) for rev in revisions ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format( exp.name, attribute, config.nick)) exp()
if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_absolute_report_step() for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]: exp.add_report(RelativeScatterPlotReport( attributes=["total_time"], filter_algorithm=["issue752-v2-%s-%s" % (nick, solver) for solver in ["cplex", "soplex"]], get_category=lambda r1, r2: r1["domain"]), outfile="issue752-v2-scatter-total-time-%s.png" % nick) exp.run_steps()
ENVIRONMENT = BaselSlurmEnvironment(email="*****@*****.**", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() for attribute in ["total_time"]: for config in ["astar-seq-pho", "astar-seq-lmcut"]: for rev in REVISIONS: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=[ "{}-{}-{}".format(rev, config, solver) for solver in ["cplex", "soplex"] ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format( exp.name, attribute, config)) exp.run_steps()
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="*****@*****.**", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) #exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report(RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=[ "{}-{}".format(rev, config.nick) for rev in REVISIONS ], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format( exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
exp.add_fetcher('data/issue705-v4-eval') exp.add_comparison_table_step() def add_sg_peak_mem_diff_per_task_size(run): mem = run.get("sg_peak_mem_diff") size = run.get("translator_task_size") if mem and size: run["sg_peak_mem_diff_per_task_size"] = mem / float(size) return run for attr in [ "total_time", "search_time", "sg_construction_time", "memory", "sg_peak_mem_diff_per_task_size" ]: for rev1, rev2 in [("base", "v7"), ("v6", "v7")]: exp.add_report(RelativeScatterPlotReport( attributes=[attr], filter_algorithm=[ "issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2 ], filter=add_sg_peak_mem_diff_per_task_size, get_category=lambda r1, r2: r1["domain"], ), outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) exp.run_steps()