"h{h2}={h2}".format(**locals()), "--search", "{search}(h{h1}, h{h2}, preferred=[h{h1},h{h2}])".format( **locals()) ], driver_options=["--search-time-limit", "1m"]) rev = "issue714-v1" config_nick = "-".join([search, h1, h2]) algo2 = common_setup.get_algo_nick(rev, config_nick) exp.add_algorithm( algo2, common_setup.get_repo_base(), rev, [ "--heuristic", "h{h1}={h1}".format(**locals()), "--heuristic", "h{h2}={h2}".format(**locals()), "--search", "{search}([h{h1},h{h2}], preferred=[h{h1},h{h2}])".format( **locals()) ], driver_options=["--search-time-limit", "1m"]) compared_algorithms.append( [algo1, algo2, "Diff ({config_nick})".format(**locals())]) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_report(compare.ComparativeReport( compared_algorithms, attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), name=common_setup.get_experiment_name() + "-comparison") exp.run_steps()
# -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue744-v1"] SEARCHES = [ ("bjolp-silent", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc, verbosity=silent)" ]), ("blind-silent", ["--search", "astar(blind(), verbosity=silent)"]), ("cegar-silent", ["--search", "astar(cegar(), verbosity=silent)"]), # ("divpot", ["--search", "astar(diverse_potentials(), verbosity=silent)"]), ("ipdb-silent", ["--search", "astar(ipdb(), verbosity=silent)"]), ("lmcut-silent", ["--search", "astar(lmcut(), verbosity=silent)"]), ("mas-silent", [