Ejemplo n.º 1
0
def main(revisions=None):
    SUITE = suites.suite_satisficing_with_ipc11()

    CONFIGS = {
        'cea': ['--search', 'eager_greedy(cea())'],
        'cg': ['--search', 'eager_greedy(cg())'],
        'lmcount': ['--search', 'eager_greedy(lmcount(lm_rhw()))'],
    }

    exp = common_setup.IssueExperiment(
        revisions=revisions,
        configs=CONFIGS,
        suite=SUITE,
        test_suite=['depot:pfile1'],
        processes=4,
        email='*****@*****.**',
	grid_priority=-10,
    )

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.append('landmarks')
    attributes.append('landmarks_generation_time')
	

    exp.add_comparison_table_step(attributes=attributes)

    exp()
Ejemplo n.º 2
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites

import common_setup

REVS = ["issue544-base", "issue544-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
    "eager_greedy_add":
    ["--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"],
    "eager_greedy_ff":
    ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"],
    "lazy_greedy_add":
    ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"],
    "lazy_greedy_ff":
    ["--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"],
}

exp = common_setup.IssueExperiment(
    search_revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    limits=LIMITS,
)
exp.add_comparison_table_step()

exp()
Ejemplo n.º 3
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites

import common_setup

exp = common_setup.IssueExperiment(
    search_revisions=["issue422-base", "issue422-v1"],
    configs={"lmcut": ["--search", "astar(lmcut())"]},
    suite=suites.suite_optimal_with_ipc11(),
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step()

exp()
Ejemplo n.º 4
0
def main(revisions=None):
    SUITE = suites.suite_optimal_with_ipc11()

    B_CONFIGS = {
        'rl-b50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
        'cggl-b50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
        'dfp-b50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
    }
    G_CONFIGS = {
        'rl-ginf': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
        'cggl-ginf': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
        'dfp-ginf': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'
        ],
    }
    F_CONFIGS = {
        'rl-f50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'
        ],
        'cggl-f50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'
        ],
        'dfp-f50k': [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'
        ],
    }
    CONFIGS = dict(B_CONFIGS)
    CONFIGS.update(G_CONFIGS)
    CONFIGS.update(F_CONFIGS)

    exp = common_setup.IssueExperiment(
        revisions=revisions,
        configs=CONFIGS,
        suite=SUITE,
        test_suite=['depot:pfile1'],
        processes=4,
        email='*****@*****.**',
    )
    exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
    exp.add_command('ms-parser', ['ms_parser'])

    # planner outcome attributes
    perfect_heuristic = Attribute('perfect_heuristic',
                                  absolute=True,
                                  min_wins=False)
    proved_unsolvability = Attribute('proved_unsolvability',
                                     absolute=True,
                                     min_wins=False)
    actual_search_time = Attribute('actual_search_time',
                                   absolute=False,
                                   min_wins=True,
                                   functions=[gm])

    # m&s attributes
    ms_construction_time = Attribute('ms_construction_time',
                                     absolute=False,
                                     min_wins=True,
                                     functions=[gm])
    ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                           absolute=True,
                                           min_wins=False)
    ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
    ms_out_of_memory = Attribute('ms_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
    ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
    search_out_of_memory = Attribute('search_out_of_memory',
                                     absolute=True,
                                     min_wins=True)
    search_out_of_time = Attribute('search_out_of_time',
                                   absolute=True,
                                   min_wins=True)

    extra_attributes = [
        perfect_heuristic,
        proved_unsolvability,
        actual_search_time,
        ms_construction_time,
        ms_abstraction_constructed,
        ms_final_size,
        ms_out_of_memory,
        ms_out_of_time,
        search_out_of_memory,
        search_out_of_time,
    ]
    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.extend(extra_attributes)

    exp.add_comparison_table_step(attributes=attributes)

    exp()
Ejemplo n.º 5
0
        'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'
    ],
    'dfp-f50k': [
        '--search',
        'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'
    ],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)

exp = common_setup.IssueExperiment(
    search_revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    limits=LIMITS,
    test_suite=['depot:pfile1'],
    processes=4,
    email='*****@*****.**',
)

# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic',
                              absolute=True,
                              min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability',
                                 absolute=True,
                                 min_wins=False)
actual_search_time = Attribute('actual_search_time',
                               absolute=False,
                               min_wins=True,
Ejemplo n.º 6
0
import common_setup

FILE = os.path.abspath(__file__)
DIR = os.path.dirname(FILE)

FILENAME = os.path.splitext(os.path.basename(__file__))[0]
EXPS = os.path.join(DIR, "data")
EXPPATH = os.path.join(EXPS, FILENAME)

def remove_file(filename):
    try:
        os.remove(filename)
    except OSError:
        pass

exp = common_setup.IssueExperiment()
exp.steps = []
exp.add_step(
    'remove-combined-properties',
    remove_file,
    os.path.join(exp.eval_dir, "properties"))

exp.add_fetcher(os.path.join(EXPS, "issue781-v2-eval"), merge=True)
exp.add_fetcher(os.path.join(EXPS, "issue781-v3-queue-ratio-eval"), merge=True)

ATTRIBUTES = [
    "cost", "error", "run_dir", "search_start_time",
    "search_start_memory", "coverage", "expansions_until_last_jump",
    "total_time", "initial_h_value", "search_time", "abstractions",
    "stored_heuristics", "stored_values", "stored_lookup_tables",
]
Ejemplo n.º 7
0
# -*- coding: utf-8 -*-

import downward.suites

import common_setup
import configs

CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False)

# The following lines remove some configs that we don't currently
# support.

DISABLED = []
for key, value in list(CONFIGS.items()):
    if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")):
        del CONFIGS[key]
print(sorted(CONFIGS.keys()))
print(len(CONFIGS))

SUITE = downward.suites.suite_satisficing_with_ipc11()

exp = common_setup.IssueExperiment(
    search_revisions=["issue77-v3", "issue77-v4"],
    configs=CONFIGS,
    suite=SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
# exp.add_scatter_plot_step()

exp()
Ejemplo n.º 8
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites

import common_setup

CONFIGS = {
    'astar_ipdb': ['--search', 'astar(ipdb())'],
    'astar_pdb': ['--search', 'astar(pdb())'],
    'astar_gapdb': ['--search', 'astar(gapdb())'],
}

exp = common_setup.IssueExperiment(
    search_revisions=["issue488-base", "issue488-v1"],
    configs=CONFIGS,
    suite=suites.suite_optimal_with_ipc11(),
)

exp.add_comparison_table_step()

exp()
Ejemplo n.º 9
0
import common_setup

REPO = common_setup.get_repo_base()
REV_BASE = 'issue585-base'
REV_V1 = 'issue585-v2'
SUITE = suites.suite_optimal_with_ipc11()
ALGORITHMS = {
    'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']),
    'astar_ipdb_v2': (REV_V1, ['--search', 'astar(ipdb())']),
}
COMPARED_ALGORITHMS = [
    ('astar_ipdb_base', 'astar_ipdb_v2', 'Diff (ipdb)'),
]

exp = common_setup.IssueExperiment(
    revisions=[],
    configs={},
    suite=SUITE,
)

for nick, (rev, cmd) in ALGORITHMS.items():
    exp.add_algorithm(nick, REPO, rev, cmd)

exp.add_report(
    CompareConfigsReport(
        COMPARED_ALGORITHMS,
        attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES))

exp()
Ejemplo n.º 10
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites, configs
from downward.reports.compare import CompareConfigsReport

import common_setup

REVISIONS = ["issue462-base", "issue462-v1"]
CONFIGS = configs.default_configs_optimal()

# remove config that is disabled in this branch
del CONFIGS['astar_selmax_lmcut_lmcount']

exp = common_setup.IssueExperiment(search_revisions=REVISIONS,
                                   configs=CONFIGS,
                                   suite=suites.suite_optimal_with_ipc11(),
                                   limits={"search_time": 300})
exp.add_absolute_report_step()
exp.add_comparison_table_step()


def grouped_configs_to_compare(config_nicks):
    grouped_configs = []
    for config_nick in config_nicks:
        col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
        grouped_configs.append(
            (col_names[0], col_names[1], 'Diff - %s' % config_nick))
    return grouped_configs


exp.add_report(CompareConfigsReport(
Ejemplo n.º 11
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites

import common_setup

CONFIGS = {
    "cg-lazy-nopref": [
        "--heuristic", "h=cg()",
        "--search", "lazy_greedy(h)"
        ],
    "cg-lazy-pref": [
        "--heuristic", "h=cg()",
        "--search", "lazy_greedy(h, preferred=[h])"
        ],
    }

exp = common_setup.IssueExperiment(
    search_revisions=["issue470-base", "issue470-v1"],
    configs=CONFIGS,
    suite=suites.suite_satisficing_with_ipc11(),
    )

exp.add_comparison_table_step()

exp()
Ejemplo n.º 12
0
from downward import suites
from downward.reports.scatter import ScatterPlotReport

import common_setup
from relativescatter import RelativeScatterPlotReport

SEARCH_REVS = ["issue547-base", "issue547-v2"]
SUITE = suites.suite_optimal_with_ipc11()

CONFIGS = {
    'astar_ipdb': ['--search', 'astar(ipdb())'],
}

exp = common_setup.IssueExperiment(
    revisions=SEARCH_REVS,
    configs=CONFIGS,
    suite=SUITE,
)
exp.add_search_parser("custom-parser.py")

attributes = attributes = exp.DEFAULT_TABLE_ATTRIBUTES + [
    "successor_generator_time", "reopened_until_last_jump"
]
exp.add_comparison_table_step(attributes=attributes)

for conf in CONFIGS:
    for attr in ("memory", "search_time"):
        exp.add_report(RelativeScatterPlotReport(
            attributes=[attr],
            get_category=lambda run1, run2: run1.get("domain"),
            filter_config=["issue547-base-%s" % conf,
Ejemplo n.º 13
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites
from lab.reports import Attribute

import common_setup

import os

exp = common_setup.IssueExperiment(
    search_revisions=["issue479-v2"],
    configs={
        'dfp-b-50k': [
            '--search',
            'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(max_states=100000,threshold=1,greedy=false),merge_strategy=merge_dfp(),label_reduction=label_reduction(before_shrinking=true, before_merging=false)))'
        ],
        'blind': ['--search', 'astar(blind())'],
    },
    suite=['airport'],
    limits={"search_time": 300},
)

exp.add_absolute_report_step(attributes=['coverage', 'error', 'run_dir'])

exp()
Ejemplo n.º 14
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites, configs
from downward.reports.compare import CompareConfigsReport

import common_setup

REVISIONS = ["issue425-base", "issue425-v1"]

exp = common_setup.IssueExperiment(
    search_revisions=REVISIONS,
    configs=configs.default_configs_satisficing(),
    suite=suites.suite_satisficing_with_ipc11(),
    limits={"search_time": 300})
exp.add_absolute_report_step()
exp.add_comparison_table_step()


def grouped_configs_to_compare(config_nicks):
    grouped_configs = []
    for config_nick in config_nicks:
        col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
        grouped_configs.append(
            (col_names[0], col_names[1], 'Diff - %s' % config_nick))
    return grouped_configs


exp.add_report(CompareConfigsReport(
    compared_configs=grouped_configs_to_compare(
        configs.configs_satisficing_core()),
Ejemplo n.º 15
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites
from lab.reports import Attribute
from lab.suites import suite_all

import common_setup

import os

exp = common_setup.IssueExperiment(
    search_revisions=["issue469-base", "issue469-v1"],
    configs={"astar_blind": ["--search", "astar(blind())"]},
    suite=suite_all(),
)

parser = os.path.join(common_setup.get_script_dir(), 'raw_memory_parser.py')
exp.add_search_parser(parser)


def add_unexplained_errors_as_int(run):
    if run.get('error').startswith('unexplained'):
        run['unexplained_errors'] = 1
    else:
        run['unexplained_errors'] = 0
    return run


exp.add_absolute_report_step(
    attributes=['raw_memory',