def main(revisions=None):
    suite = SUITE_MCO14

    configs = [
        IssueConfig("astar_goalcount", [
            "--search",
            "astar(goalcount)"]),
        IssueConfig("eager_greedy_ff", [
            "--heuristic",
            "h=ff()",
            "--search",
            "eager_greedy(h, preferred=h)"]),
        IssueConfig("eager_greedy_add", [
            "--heuristic",
            "h=add()",
            "--search",
            "eager_greedy(h, preferred=h)"]),
        IssueConfig("eager_greedy_cg", [
            "--heuristic",
            "h=cg()",
            "--search",
            "eager_greedy(h, preferred=h)"]),
        IssueConfig("eager_greedy_cea", [
            "--heuristic",
            "h=cea()",
            "--search",
            "eager_greedy(h, preferred=h)"]),
        IssueConfig("lazy_greedy_ff", [
            "--heuristic",
            "h=ff()",
            "--search",
            "lazy_greedy(h, preferred=h)"]),
        IssueConfig("lazy_greedy_add", [
            "--heuristic",
            "h=add()",
            "--search",
            "lazy_greedy(h, preferred=h)"]),
        IssueConfig("lazy_greedy_cg", [
            "--heuristic",
            "h=cg()",
            "--search",
            "lazy_greedy(h, preferred=h)"]),
        IssueConfig("seq_sat_lama_2011", [], driver_options=[
            "--alias", "seq-sat-lama-2011"]),
        IssueConfig("seq_sat_fdss_1", [], driver_options=[
            "--alias", "seq-sat-fdss-1"]),
        IssueConfig("seq_sat_fdss_2", [], driver_options=[
            "--alias", "seq-sat-fdss-2"]),
    ]

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        suite=suite,
        test_suite=[
            #'cavediving-sat14-adl:testing01_easy.pddl',
            #'childsnack-sat14-strips:child-snack_pfile05.pddl',
            #'citycar-sat14-adl:p3-2-2-0-1.pddl',
            #'ged-sat14-strips:d-3-6.pddl',
            'hiking-sat14-strips:ptesting-1-2-7.pddl',
            #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl',
            #'tetris-sat14-strips:p020.pddl',
            #'thoughtful-sat14-strips:bootstrap-typed-01.pddl',
            #'transport-sat14-strips:p01.pddl',
        ],
        processes=4,
        email='*****@*****.**',
    )

    exp.add_absolute_report_step()

    exp()
Beispiel #2
0
                                      absolute=True,
                                      min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
search_out_of_time = Attribute('search_out_of_time',
                               absolute=True,
                               min_wins=True)

extra_attributes = [
    perfect_heuristic,
    ms_construction_time,
    ms_atomic_construction_time,
    ms_abstraction_constructed,
    ms_atomic_fts_constructed,
    ms_final_size,
    ms_out_of_memory,
    ms_out_of_time,
    search_out_of_memory,
    search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)

exp.add_absolute_report_step(attributes=attributes)

exp.run_steps()
Beispiel #3
0
]
CONFIGS = [
    IssueConfig("{nick}-{build}".format(**locals()),
                search,
                build_options=[build],
                driver_options=["--build", build]) for nick, search in SEARCHES
    for build in BUILDS
] + [
    IssueConfig("lama-first-{build}".format(**locals()), [],
                build_options=[build],
                driver_options=["--build", build, "--alias", "lama-first"])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(priority=0,
                              email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()

exp.run_steps()
Beispiel #4
0
def main(revisions=None):
    benchmarks_dir = os.path.expanduser('~/projects/downward/benchmarks')
    suite = suites.suite_optimal()

    configs = []

    for osi in ['103', '107']:
        for cplex in ['1251', '1263']:
            if osi == '107' and cplex == '1251':
                # incompatible versions
                continue
            configs += [
                IssueConfig('astar_initial_state_potential_OSI%s_CPLEX%s' %
                            (osi, cplex),
                            ['--search', 'astar(initial_state_potential())'],
                            build_options=[
                                'issue680_OSI%s_CPLEX%s' % (osi, cplex)
                            ],
                            driver_options=[
                                '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)
                            ]),
                IssueConfig('astar_sample_based_potentials_OSI%s_CPLEX%s' %
                            (osi, cplex),
                            ['--search', 'astar(sample_based_potentials())'],
                            build_options=[
                                'issue680_OSI%s_CPLEX%s' % (osi, cplex)
                            ],
                            driver_options=[
                                '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)
                            ]),
                IssueConfig(
                    'astar_all_states_potential_OSI%s_CPLEX%s' % (osi, cplex),
                    ['--search', 'astar(all_states_potential())'],
                    build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)],
                    driver_options=[
                        '--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)
                    ]),
            ]

    exp = IssueExperiment(
        benchmarks_dir=benchmarks_dir,
        suite=suite,
        revisions=revisions,
        configs=configs,
        test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'],
        processes=4,
        email='*****@*****.**',
    )

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES

    domains = suites.suite_optimal_strips()

    exp.add_absolute_report_step(filter_domain=domains)

    for attribute in ["memory", "total_time"]:
        for config in [
                'astar_initial_state_potential',
                'astar_sample_based_potentials', 'astar_all_states_potential'
        ]:
            exp.add_report(RelativeScatterPlotReport(
                attributes=[attribute],
                filter_config=[
                    "{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi)
                    for osi in ['103', '107']
                ],
                filter_domain=domains,
                get_category=lambda run1, run2: run1.get("domain"),
            ),
                           outfile="{}-{}-{}_CPLEX1263.png".format(
                               exp.name, attribute, config))
            exp.add_report(RelativeScatterPlotReport(
                attributes=[attribute],
                filter_config=[
                    "{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex)
                    for cplex in ['1251', '1263']
                ],
                filter_domain=domains,
                get_category=lambda run1, run2: run1.get("domain"),
            ),
                           outfile="{}-{}-{}_OSI103.png".format(
                               exp.name, attribute, config))

    exp()
Beispiel #5
0
exp.add_absolute_report_step(attributes=[
    Attribute(
        "sg_construction_time", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_counts_switches", functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_counts_forks", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_immediates", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_size_estimate_default_generator",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_size_estimate_operators",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_size_estimate_overhead",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_size_estimate_switch_var",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute(
        "sg_size_estimate_total", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_size_estimate_value_generator",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_size_estimate_next_generator",
              functions=[arithmetic_mean],
              min_wins=True),
    Attribute("sg_counts_empty_rel", functions=[geometric_mean],
              min_wins=True),
    Attribute(
        "sg_counts_leaf_empty_rel", functions=[geometric_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_more_rel", functions=[geometric_mean], min_wins=True),
    Attribute("sg_counts_leaf_single_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute(
        "sg_counts_leaves_rel", functions=[geometric_mean], min_wins=True),
    Attribute("sg_counts_switch_empty_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_counts_switch_more_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_counts_switch_single_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute(
        "sg_counts_switches_rel", functions=[geometric_mean], min_wins=True),
    Attribute("sg_counts_forks_rel", functions=[geometric_mean],
              min_wins=True),
    Attribute(
        "sg_counts_immediates_rel", functions=[geometric_mean], min_wins=True),
    Attribute("sg_size_estimate_default_generator_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_size_estimate_operators_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_size_estimate_overhead_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_size_estimate_switch_var_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_size_estimate_value_generator_rel",
              functions=[geometric_mean],
              min_wins=True),
    Attribute("sg_size_estimate_next_generator_rel",
              functions=[geometric_mean],
              min_wins=True),
    "error",
    "run_dir",
])
Beispiel #6
0
    for min_ratio in [0.2]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
    partition="infai_1",
    email="*****@*****.**",
    export=["PATH", "DOWNWARD_BENCHMARKS"])

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER)
exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER)
#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER)
exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER)
exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py"))

exp.add_absolute_report_step(
    attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"])
#exp.add_comparison_table_step()

exp.run_steps()
Beispiel #7
0
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-v3"]
CONFIGS = [
    IssueConfig('translate', [], driver_options=['--translate']),
    IssueConfig('translate-time-limit', [],
                driver_options=['--translate-time-limit', '5s',
                                '--translate']),
    IssueConfig(
        'translate-memory-limit', [],
        driver_options=['--translate-memory-limit', '100M', '--translate']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="*****@*****.**",
                                    export=["PATH", "DOWNWARD_BENCHMARKS"])

if common_setup.is_test_run():
    SUITE = ['gripper:prob10.pddl', 'mystery:prob07.pddl']
    ENVIRONMENT = LocalEnvironment(processes=4)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
del exp.commands['parse-search']
exp.add_absolute_report_step(attributes=['translator_*', 'error'])

exp.run_steps()
Beispiel #8
0
def main(revisions=None):
    benchmarks_dir = os.environ["DOWNWARD_BENCHMARKS"]
    suite = [
        "agricola-sat18-strips", "airport", "barman-sat11-strips",
        "barman-sat14-strips", "blocks", "childsnack-sat14-strips",
        "data-network-sat18-strips", "depot", "driverlog",
        "elevators-sat08-strips", "elevators-sat11-strips",
        "floortile-sat11-strips", "floortile-sat14-strips", "freecell",
        "ged-sat14-strips", "grid", "gripper", "hiking-sat14-strips",
        "logistics00", "logistics98", "miconic", "movie", "mprime", "mystery",
        "nomystery-sat11-strips", "openstacks-sat08-strips",
        "openstacks-sat11-strips", "openstacks-sat14-strips",
        "openstacks-strips", "organic-synthesis-sat18-strips",
        "organic-synthesis-split-sat18-strips", "parcprinter-08-strips",
        "parcprinter-sat11-strips", "parking-sat11-strips",
        "parking-sat14-strips", "pathways", "pegsol-08-strips",
        "pegsol-sat11-strips", "pipesworld-notankage", "pipesworld-tankage",
        "psr-small", "rovers", "satellite", "scanalyzer-08-strips",
        "scanalyzer-sat11-strips", "snake-sat18-strips",
        "sokoban-sat08-strips", "sokoban-sat11-strips", "spider-sat18-strips",
        "storage", "termes-sat18-strips", "tetris-sat14-strips",
        "thoughtful-sat14-strips", "tidybot-sat11-strips", "tpp",
        "transport-sat08-strips", "transport-sat11-strips",
        "transport-sat14-strips", "trucks-strips", "visitall-sat11-strips",
        "visitall-sat14-strips", "woodworking-sat08-strips",
        "woodworking-sat11-strips", "zenotravel"
    ]
    # suite = ["elevators-sat08-strips:p01.pddl"]
    environment = LocalEnvironment(processes=48)

    BUILD_OPTIONS = ["--build", "release64"]
    DRIVER_OPTIONS = [
        "--transform-task", "builds/h2-mutexes/bin/preprocess",
        "--overall-time-limit", "30m", "--overall-memory-limit", "4096M",
        "--alias", "seq-sat-fdss-2018"
    ]

    configs = {
        IssueConfig("fdss", [],
                    build_options=BUILD_OPTIONS,
                    driver_options=DRIVER_OPTIONS)
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        environment=environment,
    )
    exp.add_suite(benchmarks_dir, suite)

    #exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
    #exp.add_parser(exp.LAB_DRIVER_PARSER)
    #exp.add_parser(exp.EXITCODE_PARSER)
    #exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    #exp.add_parser(exp.PLANNER_PARSER)

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')

    # exp.add_comparison_table_step(attributes=attributes)

    exp.add_absolute_report_step(attributes=attributes)

    exp.run_steps()
Beispiel #9
0
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="*****@*****.**",
                                    export=["PATH", "DOWNWARD_BENCHMARKS"])

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_absolute_report_step(filter_algorithm=["lama-first"])
exp.add_comparison_table_step()

for attr in ["total_time", "search_time", "memory"]:
    for rev1, rev2 in [("base", "v1")]:
        for config_nick in ["lama-first", "ehc_ff"]:
            exp.add_report(RelativeScatterPlotReport(
                attributes=[attr],
                filter_algorithm=[
                    "issue700-%s-%s" % (rev1, config_nick),
                    "issue700-%s-%s" % (rev2, config_nick)
                ],
                get_category=lambda r1, r2: r1["domain"],
            ),
                           outfile="issue700-%s-%s-%s-%s.png" %
                           (config_nick, attr, rev1, rev2))
Beispiel #10
0
def main(revisions=None):
    benchmarks_dir = os.environ["DOWNWARD_BENCHMARKS_IPC2018"]
    # optimal union satisficing
    suite = []
    #suite.extend(['briefcaseworld', 'cavediving-14-adl', 'citycar-sat14-adl', 'fsc-blocks', 'fsc-grid-a1', 'fsc-grid-a2', 'fsc-grid-r', 'fsc-hall', 'fsc-visualmarker', 'gedp-ds2ndp', 'miconic-simpleadl', 't0-adder', 't0-coins', 't0-comm', 't0-grid-dispose', 't0-grid-push', 't0-grid-trash', 't0-sortnet', 't0-sortnet-alt', 't0-uts'])
    suite.extend([
        "agricola-sat18", "caldera-sat18", "caldera-split-sat18",
        "data-network-sat18", "flashfill-sat18", "nurikabe-sat18",
        "organic-synthesis-sat18", "organic-synthesis-split-sat18",
        "settlers-sat18", "snake-sat18", "spider-sat18", "termes-sat18"
    ])

    environment = OracleGridEngineEnvironment(queue='all.q')

    BUILD_OPTIONS = ["release64"]
    DRIVER_OPTIONS = [
        "--build", "release64", "--overall-time-limit", "30m",
        "--overall-memory-limit", "4096M"
    ]

    configs = {
        IssueConfig('rb-ce-cerberus', [
            '--heuristic', 'hrb=RB(dag=from_coloring, extract_plan=true)',
            '--search', 'lazy_greedy([hrb],reopen_closed=false)'
        ],
                    build_options=BUILD_OPTIONS,
                    driver_options=DRIVER_OPTIONS),
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        environment=environment,
    )
    exp.add_suite(benchmarks_dir, suite)

    exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
    exp.add_parser(exp.LAB_DRIVER_PARSER)
    exp.add_parser(exp.EXITCODE_PARSER)
    exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    #exp.add_parser(exp.PLANNER_PARSER)

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')

    exp.add_absolute_report_step(attributes=attributes)
    report_name = os.path.basename(exp.path.rstrip('/'))
    exp.add_step('copy_report', subprocess.call, [
        'cp',
        os.path.join(exp.eval_dir, '%s.html' % report_name),
        '/storage/US1J6721/EXTERNAL'
    ])

    exp.add_step('print_dest', subprocess.call, [
        'echo',
        os.path.join("https://syss063.pok.stglabs.ibm.com/users/mkatz/storage",
                     '%s.html' % report_name)
    ])

    algorithm_nicks = ['rb-ce-cerberus']

    OTHER_REV = '79675435c191'
    exp.add_fetcher(
        'data/AAAI2018-conditional-effects-2018-07-08-ipc2018-eval',
        filter_algorithm=[
            '{}-{}'.format(OTHER_REV, x) for x in algorithm_nicks
        ])

    exp.add_report(
        ComparativeReport(
            algorithm_pairs=[('{}-{}'.format(OTHER_REV,
                                             x), '{}-{}'.format(REVISION, x))
                             for x in algorithm_nicks],
            attributes=attributes,
        ),
        outfile=os.path.join(exp.eval_dir, 'a' + exp.name + '-compare.html'),
    )

    exp.run_steps()
Beispiel #11
0
from common_setup import IssueConfig, IssueExperiment

DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue714-base", "issue714-v1"]
CONFIGS = [
    IssueConfig(alias, [], driver_options=["--alias", alias])
    for alias in [
        "seq-sat-fdss-1", "seq-sat-fdss-2", "seq-sat-fdss-2014",
        "seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2"]
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = MaiaEnvironment(
    priority=0, email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES)
exp.add_comparison_table_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES)

exp.run_steps()
Beispiel #12
0
        # Don't filter this run, yet.
        return True

    def filter_tasks_with_equal_values(self, run):
        values = self._tasks_to_values[self._get_task(run)]
        return len(set(values)) != 1


exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_parse_again_step()
exp.add_fetcher(name='fetch')

ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
exp.add_absolute_report_step(outfile=os.path.join(
    exp.eval_dir, "{EXPNAME}.html".format(**locals())),
                             attributes=ATTRIBUTES)
same_value_flters = SameValueFilters("translator_output_sas_hash")
exp.add_absolute_report_step(
    outfile=os.path.join(exp.eval_dir,
                         "{EXPNAME}-filtered.html".format(**locals())),
    attributes=ATTRIBUTES,
    filter=[
        same_value_flters.store_values,
        same_value_flters.filter_tasks_with_equal_values
    ])
exp.add_report(TranslatorDiffReport(
    attributes=["domain", "problem", "algorithm", "run_dir"]),
               outfile="different_output_sas.csv")

exp.run_steps()