コード例 #1
0
ファイル: v4.py プロジェクト: Eldeeqq/bi-zum
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('parser.py')

exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')

attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
    Attribute('generator_computation_time',
              absolute=False,
              min_wins=True,
              functions=[geometric_mean]),
    Attribute('cpdbs_computation_time',
              absolute=False,
              min_wins=True,
              functions=[geometric_mean]),
    Attribute('dominance_pruning_time',
              absolute=False,
              min_wins=True,
              functions=[geometric_mean]),
])

#exp.add_absolute_report_step()
exp.add_comparison_table_step(attributes=attributes)
exp.add_scatter_plot_step(relative=True,
                          attributes=[
コード例 #2
0
ファイル: v1-sat-reparse.py プロジェクト: yanxi0830/downward
        "--heuristic", "hff=ff(cost_type=one)", "--search",
        "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])]),"
        "preferred=[hff],cost_type=one)"
    ]),
]

exp = IssueExperiment(revisions=REVS,
                      configs=CONFIGS,
                      suite=SUITE,
                      email="*****@*****.**")

exp.add_fetcher('data/issue648-v1-sat-test', parsers=['parser.py'])

# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic',
                              absolute=True,
                              min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability',
                                 absolute=True,
                                 min_wins=False)
out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True)
out_of_time = Attribute('out_of_time', absolute=True, min_wins=True)

extra_attributes = [
    perfect_heuristic,
    proved_unsolvability,
    out_of_memory,
    out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
コード例 #3
0
ファイル: v6-debug.py プロジェクト: yanxi0830/downward
                driver_options=["--alias", "lama-first", "--build", build])
    for build in BUILDS
]
SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE +
            common_setup.DEFAULT_SATISFICING_SUITE)
ENVIRONMENT = BaselSlurmEnvironment(priority=0,
                                    email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])

attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
    Attribute("sg_construction_time", functions=[finite_sum], min_wins=True),
    Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True),
]

exp.add_comparison_table_step(attributes=attributes)

exp.run_steps()
コード例 #4
0
        "barman", "blocksworld_3", "blocksworld_4", "childsnack", "delivery",
        "gripper", "miconic", "reward", "spanner", "visitall"
    ]
    TIME_LIMIT = 3 * 3600
else:
    ENV = LocalEnvironment(processes=16)
    SUITE = [
        "blocksworld_3:p-3-0.pddl", "childsnack:p-2-1.0-0.0-1-0.pddl",
        "delivery:instance_2_1_0.pddl", "gripper:p-1-0.pddl",
        "miconic:p-2-2-0.pddl", "reward:instance_2x2_0.pddl",
        "visitall:p-1-0.5-2-0.pddl"
    ]
    TIME_LIMIT = 180
ATTRIBUTES = [
    Attribute("generate_time_complexity_10",
              absolute=True,
              min_wins=True,
              scale="linear"),
    Attribute("generate_memory_complexity_10",
              absolute=True,
              min_wins=True,
              scale="linear"),
    Attribute("num_generated_features_complexity_10",
              absolute=True,
              min_wins=True,
              scale="linear"),
    Attribute("num_novel_features_complexity_10",
              absolute=True,
              min_wins=True,
              scale="linear"),
    Attribute("num_states", absolute=True, min_wins=False, scale="linear"),
    Attribute("num_dynamic_atoms",
コード例 #5
0
ファイル: v1.py プロジェクト: yanxi0830/downward
if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])

exp.add_absolute_report_step(attributes=[
    Attribute(
        "sg_construction_time", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True),
    Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True),
    Attribute(
        "sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True),
コード例 #6
0
def main(revisions=[]):
    suite = suites.suite_optimal_with_ipc11()

    configs = {}

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        suite=suite,
        test_suite=['depot:pfile1'],
        processes=4,
        email='*****@*****.**',
    )
    exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
    exp.add_command('ms-parser', ['ms_parser'])

    # planner outcome attributes
    search_out_of_memory = Attribute('search_out_of_memory',
                                     absolute=True,
                                     min_wins=True)
    search_out_of_time = Attribute('search_out_of_time',
                                   absolute=True,
                                   min_wins=True)
    perfect_heuristic = Attribute('perfect_heuristic',
                                  absolute=True,
                                  min_wins=False)
    proved_unsolvability = Attribute('proved_unsolvability',
                                     absolute=True,
                                     min_wins=False)

    # m&s attributes
    ms_construction_time = Attribute('ms_construction_time',
                                     absolute=False,
                                     min_wins=True,
                                     functions=[gm])
    ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                           absolute=True,
                                           min_wins=False)
    ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
    ms_out_of_memory = Attribute('ms_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
    ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
    ms_memory_delta = Attribute('ms_memory_delta',
                                absolute=False,
                                min_wins=True)

    extra_attributes = [
        search_out_of_memory,
        search_out_of_time,
        perfect_heuristic,
        proved_unsolvability,
        ms_construction_time,
        ms_abstraction_constructed,
        ms_final_size,
        ms_out_of_memory,
        ms_out_of_time,
        ms_memory_delta,
    ]
    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.extend(extra_attributes)

    exp.add_fetcher('data/issue604-v1-eval',
                    filter_config=[
                        'issue604-base-rl-b50k',
                        'issue604-base-cggl-b50k',
                        'issue604-base-dfp-b50k',
                        'issue604-base-rl-ginf',
                        'issue604-base-cggl-ginf',
                        'issue604-base-dfp-ginf',
                        'issue604-base-rl-f50k',
                        'issue604-base-cggl-f50k',
                        'issue604-base-dfp-f50k',
                    ])

    exp.add_fetcher('data/issue604-v7-eval',
                    filter_config=[
                        'issue604-v7-rl-b50k',
                        'issue604-v7-cggl-b50k',
                        'issue604-v7-dfp-b50k',
                        'issue604-v7-rl-ginf',
                        'issue604-v7-cggl-ginf',
                        'issue604-v7-dfp-ginf',
                        'issue604-v7-rl-f50k',
                        'issue604-v7-cggl-f50k',
                        'issue604-v7-dfp-f50k',
                    ])

    exp.add_fetcher('data/issue604-v7-rest-eval',
                    filter_config=[
                        'issue604-v7-rl-b50k',
                        'issue604-v7-cggl-b50k',
                        'issue604-v7-dfp-b50k',
                        'issue604-v7-rl-ginf',
                        'issue604-v7-cggl-ginf',
                        'issue604-v7-dfp-ginf',
                        'issue604-v7-rl-f50k',
                        'issue604-v7-cggl-f50k',
                        'issue604-v7-dfp-f50k',
                    ])

    exp.add_report(CompareConfigsReport(compared_configs=[
        ('issue604-base-rl-b50k', 'issue604-v7-rl-b50k'),
        ('issue604-base-cggl-b50k', 'issue604-v7-cggl-b50k'),
        ('issue604-base-dfp-b50k', 'issue604-v7-dfp-b50k'),
        ('issue604-base-rl-ginf', 'issue604-v7-rl-ginf'),
        ('issue604-base-cggl-ginf', 'issue604-v7-cggl-ginf'),
        ('issue604-base-dfp-ginf', 'issue604-v7-dfp-ginf'),
        ('issue604-base-rl-f50k', 'issue604-v7-rl-f50k'),
        ('issue604-base-cggl-f50k', 'issue604-v7-cggl-f50k'),
        ('issue604-base-dfp-f50k', 'issue604-v7-dfp-f50k'),
    ],
                                        attributes=attributes),
                   outfile=os.path.join(exp.eval_dir,
                                        'issue604-base-v7-comparison.html'))

    exp()
コード例 #7
0
ファイル: v1.py プロジェクト: yanxi0830/downward
def main(revisions=None):
    suite = suites.suite_optimal_with_ipc11()

    configs = {
        IssueConfig('rl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('cggl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('dfp-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('rl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('cggl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('dfp-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('rl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
        IssueConfig('cggl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
        IssueConfig('dfp-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        suite=suite,
        test_suite=['depot:pfile1'],
        processes=4,
        email='*****@*****.**',
    )
    exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
    exp.add_command('ms-parser', ['ms_parser'])

    # planner outcome attributes
    perfect_heuristic = Attribute('perfect_heuristic',
                                  absolute=True,
                                  min_wins=False)
    proved_unsolvability = Attribute('proved_unsolvability',
                                     absolute=True,
                                     min_wins=False)
    actual_search_time = Attribute('actual_search_time',
                                   absolute=False,
                                   min_wins=True,
                                   functions=[gm])

    # m&s attributes
    ms_construction_time = Attribute('ms_construction_time',
                                     absolute=False,
                                     min_wins=True,
                                     functions=[gm])
    ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                           absolute=True,
                                           min_wins=False)
    ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
    ms_out_of_memory = Attribute('ms_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
    ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
    search_out_of_memory = Attribute('search_out_of_memory',
                                     absolute=True,
                                     min_wins=True)
    search_out_of_time = Attribute('search_out_of_time',
                                   absolute=True,
                                   min_wins=True)

    extra_attributes = [
        perfect_heuristic,
        proved_unsolvability,
        actual_search_time,
        ms_construction_time,
        ms_abstraction_constructed,
        ms_final_size,
        ms_out_of_memory,
        ms_out_of_time,
        search_out_of_memory,
        search_out_of_time,
    ]
    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.extend(extra_attributes)

    exp.add_comparison_table_step()

    exp.add_report(RelativeScatterPlotReport(
        attributes=["memory"],
        filter_config=["issue604-base-dfp-ginf", "issue604-v1-dfp-ginf"],
        get_category=lambda run1, run2: run1.get("domain"),
    ),
                   outfile='issue604_base_v1_memory_dfp.png')

    exp.add_report(RelativeScatterPlotReport(
        attributes=["memory"],
        filter_config=["issue604-base-rl-ginf", "issue604-v1-rl-ginf"],
        get_category=lambda run1, run2: run1.get("domain"),
    ),
                   outfile='issue604_base_v1_memory_rl.png')

    exp()
コード例 #8
0
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')

exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")

log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]

exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()

sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec)

exp.run_steps()
コード例 #9
0
from lab.suites import suite_all

import common_setup

import os

exp = common_setup.IssueExperiment(
    search_revisions=["issue469-base", "issue469-v1"],
    configs={"astar_blind": ["--search", "astar(blind())"]},
    suite=suite_all(),
)

parser = os.path.join(common_setup.get_script_dir(), 'raw_memory_parser.py')
exp.add_search_parser(parser)


def add_unexplained_errors_as_int(run):
    if run.get('error').startswith('unexplained'):
        run['unexplained_errors'] = 1
    else:
        run['unexplained_errors'] = 0
    return run


exp.add_absolute_report_step(
    attributes=['raw_memory',
                Attribute('unexplained_errors', absolute=True)],
    filter=add_unexplained_errors_as_int)

exp()
コード例 #10
0
ファイル: cegar-ocp.py プロジェクト: mzumsteg/downward-lab
from histogram_report import HistogramReport
from domain_comparison_report import (DomainComparisonReport,
                                      OptimalStrategyEvaluator,
                                      IdealProblemsEvaluator,
                                      AttributeStatisticsEvaluator)
from h_stats_report import HeuristicStatisticsReport


def mean(list):
    return sum(list) / len(list)


ATTRIBUTES = [
    "coverage", "error", "expansions_until_last_jump", "initial_h_value",
    "search_start_time", "search_start_memory", "split_time",
    Attribute("average_split_options", functions=mean, min_wins=False),
    Attribute("average_distinct_rated", functions=mean, min_wins=False)
]

NODE = platform.node()
if NODE.endswith(".scicore.unibas.ch") or NODE.endswith(".cluster.bc2.ch"):
    SUITE = [
        'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
        'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
        'data-network-opt18-strips', 'depot', 'driverlog',
        'elevators-opt08-strips', 'elevators-opt11-strips',
        'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
        'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
        'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery',
        'nomystery-opt11-strips', 'openstacks-opt08-strips',
        'openstacks-opt11-strips', 'openstacks-opt14-strips',
コード例 #11
0
ファイル: v4.py プロジェクト: Eldeeqq/bi-zum
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('ms-parser.py')

exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')

# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic',
                              absolute=True,
                              min_wins=False)

# m&s attributes
ms_construction_time = Attribute('ms_construction_time',
                                 absolute=False,
                                 min_wins=True,
                                 functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time',
                                        absolute=False,
                                        min_wins=True,
                                        functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                       absolute=True,
                                       min_wins=False)
ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed',
コード例 #12
0
ファイル: __init__.py プロジェクト: bernardobgam/blocmarket
class PlanningReport(Report):
    """
    This is the base class for planner reports.

    The :py:attr:`~INFO_ATTRIBUTES` and :py:attr:`~ERROR_ATTRIBUTES`
    class members hold attributes for Fast Downward experiments by
    default. You may want to adjust the two lists in derived classes.

    """
    #: List of predefined :py:class:`~Attribute` instances. If
    #: PlanningReport receives ``attributes=['coverage']``, it converts
    #: the plain string ``'coverage'`` to the attribute instance
    #: ``Attribute('coverage', absolute=True, min_wins=False, scale='linear')``.
    #: The list can be overriden in subclasses.
    PREDEFINED_ATTRIBUTES = [
        Attribute('cost', scale='linear'),
        Attribute('coverage', absolute=True, min_wins=False, scale='linear'),
        Attribute('dead_ends', min_wins=False),
        Attribute('evaluations', function=geometric_mean),
        Attribute('expansions', function=geometric_mean),
        Attribute('generated', function=geometric_mean),
        Attribute('initial_h_value',
                  min_wins=False,
                  scale='linear',
                  function=reports.finite_sum),
        Attribute('plan_length', scale='linear'),
        Attribute('planner_time', function=geometric_mean),
        Attribute('quality', absolute=True, min_wins=False),
        Attribute('score_*', min_wins=False, digits=4),
        Attribute('search_time', function=geometric_mean),
        Attribute('total_time', function=geometric_mean),
        Attribute('unsolvable', absolute=True, min_wins=False),
    ]

    #: Attributes shown in the algorithm info table. Can be overriden in
    #: subclasses.
    INFO_ATTRIBUTES = [
        'local_revision', 'global_revision', 'revision_summary',
        'build_options', 'driver_options', 'component_options'
    ]

    #: Attributes shown in the unexplained-errors table. Can be overriden
    #: in subclasses.
    ERROR_ATTRIBUTES = [
        'domain', 'problem', 'algorithm', 'unexplained_errors', 'error',
        'planner_wall_clock_time', 'raw_memory', 'node'
    ]

    def __init__(self, **kwargs):
        """
        See :class:`~lab.reports.Report` for inherited parameters.

        You can filter and modify runs for a report with
        :py:class:`filters <.Report>`. For example, you can include only
        a subset of algorithms or compute new attributes. If you provide
        a list for *filter_algorithm*, it will be used to determine the
        order of algorithms in the report.

        >>> # Use a filter function to select algorithms.
        >>> def only_blind_and_lmcut(run):
        ...     return run['algorithm'] in ['blind', 'lmcut']
        >>> report = PlanningReport(filter=only_blind_and_lmcut)

        >>> # Use "filter_algorithm" to select and *order* algorithms.
        >>> r = PlanningReport(filter_algorithm=['lmcut', 'blind'])

        :py:class:`Filters <.Report>` can be very helpful so we
        recommend reading up on them to use their full potential.

        """
        # Set non-default options for some attributes.
        attributes = tools.make_list(kwargs.get('attributes'))
        kwargs['attributes'] = [
            self._prepare_attribute(attr) for attr in attributes
        ]

        # Remember the order of algorithms if it is given as a keyword argument filter.
        self.filter_algorithm = tools.make_list(kwargs.get('filter_algorithm'))

        Report.__init__(self, **kwargs)

    def _prepare_attribute(self, attr):
        predefined = {str(attr): attr for attr in self.PREDEFINED_ATTRIBUTES}
        if not isinstance(attr, Attribute):
            if attr in predefined:
                return predefined[attr]
            for pattern in predefined.values():
                if (fnmatch(attr, pattern)):
                    return pattern.copy(attr)
        return Report._prepare_attribute(self, attr)

    def _scan_data(self):
        self._scan_planning_data()
        Report._scan_data(self)

    def _scan_planning_data(self):
        problems = set()
        self.domains = defaultdict(list)
        self.problem_runs = defaultdict(list)
        self.domain_algorithm_runs = defaultdict(list)
        self.runs = {}
        for run in self.props.values():
            domain, problem, algo = run['domain'], run['problem'], run[
                'algorithm']
            problems.add((domain, problem))
            self.problem_runs[(domain, problem)].append(run)
            self.domain_algorithm_runs[(domain, algo)].append(run)
            self.runs[(domain, problem, algo)] = run
        for domain, problem in problems:
            self.domains[domain].append(problem)

        self.algorithms = self._get_algorithm_order()

        if len(problems) * len(self.algorithms) != len(self.runs):
            logging.warning(
                'Not every algorithm has been run on every task. '
                'However, if you applied a filter this is to be '
                'expected. If not, there might be old properties in the '
                'eval-dir that got included in the report. '
                'Algorithms (%d): %s, problems (%d), domains (%d): %s, runs (%d)'
                %
                (len(self.algorithms), self.algorithms, len(problems),
                 len(self.domains), list(self.domains.keys()), len(self.runs)))

        # Sort each entry in problem_runs by algorithm.
        algo_to_index = {
            algorithm: index
            for index, algorithm in enumerate(self.algorithms)
        }

        def run_key(run):
            return algo_to_index[run['algorithm']]

        for problem_runs in self.problem_runs.values():
            problem_runs.sort(key=run_key)

        self.algorithm_info = self._scan_algorithm_info()

    def _scan_algorithm_info(self):
        info = {}
        for runs in self.problem_runs.values():
            for run in runs:
                info[run['algorithm']] = {
                    attr: run.get(attr, '?')
                    for attr in self.INFO_ATTRIBUTES
                }
            # We only need to scan the algorithms for one task.
            break
        return info

    def _get_node_names(self):
        return {
            run.get("node", "<attribute 'node' missing>")
            for run in self.runs.values()
        }

    def _get_warnings_text_and_table(self):
        """
        Return a :py:class:`Table <lab.reports.Table>` containing one line for
        each run where an unexplained error occured.
        """
        if not self.ERROR_ATTRIBUTES:
            logging.critical('The list of error attributes must not be empty.')

        table = reports.Table(title='Unexplained errors')
        table.set_column_order(self.ERROR_ATTRIBUTES)

        wrote_to_slurm_err = any(
            'output-to-slurm.err' in run.get('unexplained_errors', [])
            for run in self.runs.values())

        num_unexplained_errors = 0
        for run in self.runs.values():
            error_message = tools.get_unexplained_errors_message(run)
            if error_message:
                logging.error(error_message)
                num_unexplained_errors += 1
                for attr in self.ERROR_ATTRIBUTES:
                    table.add_cell(run['run_dir'], attr, run.get(attr, '?'))

        if num_unexplained_errors:
            logging.error(
                'There were {num_unexplained_errors} runs with unexplained'
                ' errors.'.format(**locals()))

        errors = []

        if wrote_to_slurm_err:
            src_dir = self.eval_dir.rstrip('/')[:-len('-eval')]
            slurm_err_file = src_dir + '-grid-steps/slurm.err'
            try:
                slurm_err_content = tools.get_slurm_err_content(src_dir)
            except IOError:
                slurm_err_content = (
                    'The slurm.err file was missing while creating the report.'
                )
            else:
                slurm_err_content = tools.filter_slurm_err_content(
                    slurm_err_content)

            logging.error(
                'There was output to {slurm_err_file}.'.format(**locals()))

            errors.append(
                ' Contents of {slurm_err_file} without "memory cg"'
                ' errors:\n```\n{slurm_err_content}\n```'.format(**locals()))

        if table:
            errors.append(str(table))

        infai_1_nodes = {
            'ase{:02d}.cluster.bc2.ch'.format(i)
            for i in range(1, 25)
        }
        infai_2_nodes = {
            'ase{:02d}.cluster.bc2.ch'.format(i)
            for i in range(31, 55)
        }
        nodes = self._get_node_names()
        if nodes & infai_1_nodes and nodes & infai_2_nodes:
            errors.append(
                'Report combines runs from infai_1 and infai_2 partitions.')

        return '\n'.join(errors)

    def _get_algorithm_order(self):
        """
        Return a list of algorithms in the order determined by the user.

        If 'filter_algorithm' is given, algorithms are sorted in that
        order. Otherwise, they are sorted alphabetically.

        You can use the order of algorithms in your own custom report
        subclasses by accessing self.algorithms which is calculated in
        self._scan_planning_data.

        """
        all_algos = {run['algorithm'] for run in self.props.values()}
        if self.filter_algorithm:
            # Other filters may have changed the set of available algorithms by either
            # removing all runs for one algorithm or changing run['algorithm'] for a run.
            # Maintain the original order of algorithms and only keep algorithms that
            # still have runs after filtering. Then add all new algorithms
            # sorted naturally at the end.
            algo_order = (
                [c for c in self.filter_algorithm if c in all_algos] +
                tools.natural_sort(all_algos - set(self.filter_algorithm)))
        else:
            algo_order = tools.natural_sort(all_algos)
        return algo_order
コード例 #13
0
ファイル: showcase-options.py プロジェクト: silvansievers/lab
                name="fetcher-test1",
                filter=only_two_algorithms)
exp.add_fetcher(dest=eval_dir(2),
                name="fetcher-test2",
                filter_algorithm="lama11")

# Add report steps.
exp.add_report(AbsoluteReport(attributes=["coverage", "cost"]),
               name="report-abs-d")
quality_filters = QualityFilters()
exp.add_report(
    AbsoluteReport(
        attributes=[
            "coverage",
            "cost",
            Attribute("quality", function=reports.arithmetic_mean),
        ],
        filter=[quality_filters.store_costs, quality_filters.add_quality],
    ),
    name="report-abs-builtin-filters",
)
exp.add_report(
    AbsoluteReport(attributes=["coverage"], filter=only_two_algorithms),
    name="report-abs-p-filter",
)
exp.add_report(
    AbsoluteReport(attributes=["coverage", "error"], format="tex"),
    outfile="report-abs-combined.tex",
)
exp.add_report(
    AbsoluteReport(attributes=["coverage", "error"], format="html"),
コード例 #14
0
ファイル: mas-refetch.py プロジェクト: yanxi0830/downward
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)

exp = common_setup.IssueExperiment(
    search_revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    limits=LIMITS,
    test_suite=['depot:pfile1'],
    processes=4,
    email='*****@*****.**',
)

# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic',
                              absolute=True,
                              min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability',
                                 absolute=True,
                                 min_wins=False)
actual_search_time = Attribute('actual_search_time',
                               absolute=False,
                               min_wins=True,
                               functions=[gm])

# m&s attributes
ms_construction_time = Attribute('ms_construction_time',
                                 absolute=False,
                                 min_wins=True,
                                 functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
コード例 #15
0
ファイル: v1.py プロジェクト: yanxi0830/downward
if is_test_run():
    SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl']
    ENVIRONMENT = LocalEnvironment(processes=4)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_resource('ipdb_parser', 'ipdb-parser.py', dest='ipdb-parser.py')
exp.add_command('ipdb-parser', ['{ipdb_parser}'])
exp.add_suite(BENCHMARKS_DIR, SUITE)

# ipdb attributes
extra_attributes = [
    Attribute('hc_iterations', absolute=True, min_wins=True),
    Attribute('hc_num_patters', absolute=True, min_wins=True),
    Attribute('hc_size', absolute=True, min_wins=True),
    Attribute('hc_num_generated', absolute=True, min_wins=True),
    Attribute('hc_num_rejected', absolute=True, min_wins=True),
    Attribute('hc_max_pdb_size', absolute=True, min_wins=True),
    Attribute('hc_hill_climbing_time', absolute=False, min_wins=True, functions=[geometric_mean]),
    Attribute('hc_total_time', absolute=False, min_wins=True, functions=[geometric_mean]),
    Attribute('cpdbs_time', absolute=False, min_wins=True, functions=[geometric_mean]),
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)

exp.add_comparison_table_step(attributes=attributes)
exp.add_scatter_plot_step()
コード例 #16
0
ファイル: v1.py プロジェクト: yanxi0830/downward
def main(revisions=None):
    benchmarks_dir = os.path.expanduser('~/repos/downward/benchmarks')
    suite = suites.suite_optimal_strips()

    configs = {
        IssueConfig('rl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'
        ]),
        IssueConfig('cggl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'
        ]),
        IssueConfig('dfp-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'
        ]),
        IssueConfig('rl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'
        ]),
        IssueConfig('cggl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'
        ]),
        IssueConfig('dfp-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'
        ]),
        IssueConfig('rl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))'
        ]),
        IssueConfig('cggl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))'
        ]),
        IssueConfig('dfp-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))'
        ]),
    }

    exp = IssueExperiment(
        benchmarks_dir=benchmarks_dir,
        suite=suite,
        revisions=revisions,
        configs=configs,
        test_suite=['depot:p01.pddl'],
        processes=4,
        email='*****@*****.**',
    )
    exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
    exp.add_command('ms-parser', ['ms_parser'])

    # planner outcome attributes
    perfect_heuristic = Attribute('perfect_heuristic',
                                  absolute=True,
                                  min_wins=False)
    proved_unsolvability = Attribute('proved_unsolvability',
                                     absolute=True,
                                     min_wins=False)
    actual_search_time = Attribute('actual_search_time',
                                   absolute=False,
                                   min_wins=True,
                                   functions=[gm])

    # m&s attributes
    ms_construction_time = Attribute('ms_construction_time',
                                     absolute=False,
                                     min_wins=True,
                                     functions=[gm])
    ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                           absolute=True,
                                           min_wins=False)
    ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
    ms_out_of_memory = Attribute('ms_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
    ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
    search_out_of_memory = Attribute('search_out_of_memory',
                                     absolute=True,
                                     min_wins=True)
    search_out_of_time = Attribute('search_out_of_time',
                                   absolute=True,
                                   min_wins=True)

    extra_attributes = [
        perfect_heuristic,
        proved_unsolvability,
        actual_search_time,
        ms_construction_time,
        ms_abstraction_constructed,
        ms_final_size,
        ms_out_of_memory,
        ms_out_of_time,
        search_out_of_memory,
        search_out_of_time,
    ]
    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.extend(extra_attributes)

    exp.add_comparison_table_step()

    #if matplotlib:
    #for attribute in ["memory", "total_time"]:
    #for config in configs:
    #exp.add_report(
    #RelativeScatterPlotReport(
    #attributes=[attribute],
    #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
    #get_category=lambda run1, run2: run1.get("domain"),
    #),
    #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
    #)

    exp()
コード例 #17
0
ファイル: __init__.py プロジェクト: prost-planner/prostlab
class PlanningReport(Report):
    """
    This is the base class for Prost planner reports.

    The :py:attr:`~INFO_ATTRIBUTES` and :py:attr:`~ERROR_ATTRIBUTES`
    class members hold attributes for Fast Downward experiments by
    default. You may want to adjust the two lists in derived classes.

    """

    #: List of predefined :py:class:`~Attribute` instances. If
    #: PlanningReport receives ``attributes=['coverage']``, it converts
    #: the plain string ``'coverage'`` to the attribute instance
    #: ``Attribute('coverage', absolute=True, min_wins=False, scale='linear')``.
    #: The list can be overriden in subclasses.
    PREDEFINED_ATTRIBUTES = [
        Attribute("ipc_score", absolute=True, min_wins=False),
        
        # Attributes from prost_parser
        Attribute("total_time", function=geometric_mean),
        Attribute("total_reward", min_wins=False),
        Attribute("average_reward", min_wins=False),
        Attribute("round_reward", min_wins=False, function=elementwise_sum),
        
        # Attributes from thts_parser
        Attribute("entries_prob_state_value_cache", function=elementwise_max),
        Attribute("buckets_prob_state_value_cache", function=elementwise_max),
        Attribute("entries_prob_applicable_actions_cache", function=elementwise_max),
        Attribute("buckets_prob_applicable_actions_cache", function=elementwise_max),
        Attribute("rem_steps_first_solved_state", function=elementwise_arithmetic_mean, min_wins=False),
        Attribute("trials_first_relevant_state", function=elementwise_geometric_mean, min_wins=False),
        Attribute("search_nodes_first_relevant_state", function=elementwise_geometric_mean, min_wins=False),
        Attribute("perc_exploration_first_relevant_state", function=elementwise_sum),
        
        # Attributes from ids_parser
        Attribute("ids_learned_search_depth", min_wins=False),
        Attribute("entries_det_state_value_cache", function=elementwise_max),
        Attribute("buckets_det_state_value_cache", function=elementwise_max),
        Attribute("entries_det_applicable_actions_cache", function=elementwise_max),
        Attribute("buckets_det_applicable_actions_cache", function=elementwise_max),
        Attribute("entries_ids_reward_cache", function=elementwise_max),
        Attribute("buckets_ids_reward_cache", function=elementwise_max),
        Attribute("ids_avg_search_depth_first_relevant_state", function=elementwise_sum, min_wins=False),
        Attribute("ids_total_num_runs", function=elementwise_sum, min_wins=False),
        Attribute("ids_avg_search_depth_total", function=elementwise_sum, min_wins=False),
    ]

    #: Attributes shown in the algorithm info table. Can be overriden in
    #: subclasses.
    INFO_ATTRIBUTES = [
        "local_revision",
        "global_revision",
        "build_options",
        "parser_options",
        "driver_options",
        "search_engine",
    ]

    #: Attributes shown in the unexplained-errors table. Can be overriden
    #: in subclasses.
    ERROR_ATTRIBUTES = [
        "domain",
        "problem",
        "algorithm",
        "unexplained_errors",
        "planner_wall_clock_time",
        "raw_memory",
        "node",
    ]

    ERROR_LOG_MAX_LINES = 100

    def __init__(self, **kwargs):
        """
        See :class:`~lab.reports.Report` for inherited parameters.

        You can filter and modify runs for a report with
        :py:class:`filters <.Report>`. For example, you can include only
        a subset of algorithms or compute new attributes. If you provide
        a list for *filter_algorithm*, it will be used to determine the
        order of algorithms in the report.

        >>> # Use a filter function to select algorithms.
        >>> def only_prost2011_and_prost2014(run):
        ...     return run['algorithm'] in ['prost2011', 'prost2014']
        >>> report = PlanningReport(filter=only_prost2011_and_prost2014)

        >>> # Use "filter_algorithm" to select and *order* algorithms.
        >>> r = PlanningReport(filter_algorithm=['prost2014', 'prost2011'])

        :py:class:`Filters <.Report>` can be very helpful so we
        recommend reading up on them to use their full potential.

        """
        # Set non-default options for some attributes.
        attributes = tools.make_list(kwargs.get("attributes"))
        kwargs["attributes"] = [self._prepare_attribute(attr) for attr in attributes]

        # Remember the order of algorithms if it is given as a keyword argument filter.
        self.filter_algorithm = tools.make_list(kwargs.get("filter_algorithm"))

        super().__init__(**kwargs)

    def _prepare_attribute(self, attr):
        predefined = {str(attr): attr for attr in self.PREDEFINED_ATTRIBUTES}
        if not isinstance(attr, Attribute):
            if attr in predefined:
                return predefined[attr]
            for pattern in predefined.values():
                if fnmatch(attr, pattern):
                    return pattern.copy(attr)
        return super()._prepare_attribute(attr)

    def _apply_filter(self):
        super()._apply_filter()
        if "ipc_score" in self.attributes:
            self._compute_ipc_scores()

    def _compute_ipc_scores(self):
        max_rewards = dict()
        for run in self.props.values():
            if run["max_reward"] is None and "average_reward" in run:
                reward = run["average_reward"]
                domain_name = run["domain"]
                problem_name = run["problem"]
                if (domain_name, problem_name) not in max_rewards:
                    max_rewards[(domain_name, problem_name)] = reward
                else:
                    max_rewards[(domain_name, problem_name)] = max(max_rewards[(domain_name, problem_name)], reward)
        for run in self.props.values():
            domain_name = run["domain"]
            problem_name = run["problem"]
            if (domain_name, problem_name) in max_rewards:
                run["max_reward"] = max_rewards[(domain_name, problem_name)]

            if "average_reward" not in run:
                run["ipc_score"] = 0.0
                continue
            avg_reward = run["average_reward"]
            min_reward = run["min_reward"]
            max_reward = run["max_reward"]
            dist = avg_reward - min_reward
            if dist > 0.0:
                span = max_reward - min_reward
                assert span > 0.0
                run["ipc_score"] =  dist / span
            else:
                run["ipc_score"] = 0.0

    def _scan_data(self):
        self._scan_planning_data()
        super()._scan_data()

    def _scan_planning_data(self):
        problems = set()
        self.domains = defaultdict(list)
        self.problem_runs = defaultdict(list)
        self.domain_algorithm_runs = defaultdict(list)
        self.runs = {}
        for run in self.props.values():
            domain, problem, algo = run["domain"], run["problem"], run["algorithm"]
            problems.add((domain, problem))
            self.problem_runs[(domain, problem)].append(run)
            self.domain_algorithm_runs[(domain, algo)].append(run)
            self.runs[(domain, problem, algo)] = run
        for domain, problem in problems:
            self.domains[domain].append(problem)

        self.algorithms = self._get_algorithm_order()

        num_unexplained_errors = sum(
            int(bool(tools.get_unexplained_errors_message(run)))
            for run in self.runs.values()
        )
        func = logging.info if num_unexplained_errors == 0 else logging.error
        func(
            "Report contains {num_unexplained_errors} runs with unexplained"
            " errors.".format(**locals())
        )

        if len(problems) * len(self.algorithms) != len(self.runs):
            logging.warning(
                f"Not every algorithm has been run on every task. "
                f"However, if you applied a filter this is to be "
                f"expected. If not, there might be old properties in the "
                f"eval-dir that got included in the report. "
                f"Algorithms ({len(self.algorithms)}): {self.algorithms},"
                f"problems ({len(problems)}), domains ({len(self.domains)}): "
                f"{list(self.domains.keys())}, runs ({len(self.runs)})"
            )

        # Sort each entry in problem_runs by algorithm.
        algo_to_index = {
            algorithm: index for index, algorithm in enumerate(self.algorithms)
        }

        def run_key(run):
            return algo_to_index[run["algorithm"]]

        for problem_runs in self.problem_runs.values():
            problem_runs.sort(key=run_key)

        self.algorithm_info = self._scan_algorithm_info()

    def _scan_algorithm_info(self):
        info = {}
        for runs in self.problem_runs.values():
            for run in runs:
                info[run["algorithm"]] = {
                    attr: run.get(attr, "?") for attr in self.INFO_ATTRIBUTES
                }
            # We only need to scan the algorithms for one task.
            break
        return info

    def _get_node_names(self):
        return {
            run.get("node", "<attribute 'node' missing>") for run in self.runs.values()
        }

    def _format_unexplained_errors(self, errors):
        """
        Preserve line breaks and white space. If text has more than
        ERROR_LOG_MAX_LINES lines, omit lines in the middle of the text.
        """
        linebreak = "\\\\"
        text = f"''{errors}''".replace("\\n", linebreak).replace(
            " ", markup.ESCAPE_WHITESPACE
        )
        lines = text.split(linebreak)
        if len(lines) <= self.ERROR_LOG_MAX_LINES:
            return text
        index = (self.ERROR_LOG_MAX_LINES - 2) // 2
        text = linebreak.join(lines[:index] + ["", "[...]", ""] + lines[-index:])
        assert text.startswith("''") and text.endswith("''"), text
        return text

    def _get_warnings_text_and_table(self):
        """
        Return a :py:class:`Table <lab.reports.Table>` containing one line for
        each run where an unexplained error occured.
        """
        if not self.ERROR_ATTRIBUTES:
            logging.critical("The list of error attributes must not be empty.")

        table = Table(title="Unexplained errors")
        table.set_column_order(self.ERROR_ATTRIBUTES)

        wrote_to_slurm_err = any(
            "output-to-slurm.err" in run.get("unexplained_errors", [])
            for run in self.runs.values()
        )

        for run in self.runs.values():
            error_message = tools.get_unexplained_errors_message(run)
            if error_message:
                logging.error(error_message)
                run_dir = run["run_dir"]
                for attr in self.ERROR_ATTRIBUTES:
                    value = run.get(attr, "?")
                    if attr == "unexplained_errors":
                        value = self._format_unexplained_errors(value)
                        # Use formatted value as-is.
                        table.cell_formatters[run_dir][attr] = CellFormatter()
                    table.add_cell(run_dir, attr, value)

        errors = []

        if wrote_to_slurm_err:
            src_dir = self.eval_dir.rstrip("/")[: -len("-eval")]
            slurm_err_file = src_dir + "-grid-steps/slurm.err"
            try:
                slurm_err_content = tools.get_slurm_err_content(src_dir)
            except OSError:
                slurm_err_content = (
                    "The slurm.err file was missing while creating the report."
                )
            else:
                slurm_err_content = tools.filter_slurm_err_content(slurm_err_content)

            logging.error("There was output to {slurm_err_file}.".format(**locals()))

            errors.append(
                ' Contents of {slurm_err_file} without "memory cg"'
                " errors:\n```\n{slurm_err_content}\n```".format(**locals())
            )

        if table:
            errors.append(str(table))

        infai_1_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(1, 25)}
        infai_2_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(31, 55)}
        nodes = self._get_node_names()
        if nodes & infai_1_nodes and nodes & infai_2_nodes:
            errors.append("Report combines runs from infai_1 and infai_2 partitions.")

        return "\n".join(errors)

    def _get_algorithm_order(self):
        """
        Return a list of algorithms in the order determined by the user.

        If 'filter_algorithm' is given, algorithms are sorted in that
        order. Otherwise, they are sorted alphabetically.

        You can use the order of algorithms in your own custom report
        subclasses by accessing self.algorithms which is calculated in
        self._scan_planning_data.

        """
        all_algos = {run["algorithm"] for run in self.props.values()}
        if self.filter_algorithm:
            # Other filters may have changed the set of available algorithms by either
            # removing all runs for one algorithm or changing run['algorithm'] for a run.
            # Maintain the original order of algorithms and only keep algorithms that
            # still have runs after filtering. Then add all new algorithms
            # sorted naturally at the end.
            algo_order = [
                c for c in self.filter_algorithm if c in all_algos
            ] + tools.natural_sort(all_algos - set(self.filter_algorithm))
        else:
            algo_order = tools.natural_sort(all_algos)
        return algo_order
コード例 #18
0
ファイル: project.py プロジェクト: FlorianPommerening/lab
def parse_args():
    ARGPARSER.add_argument("--tex",
                           action="store_true",
                           help="produce LaTeX output")
    ARGPARSER.add_argument("--relative",
                           action="store_true",
                           help="make relative scatter plots")
    return ARGPARSER.parse_args()


ARGS = parse_args()
TEX = ARGS.tex
RELATIVE = ARGS.relative

EVALUATIONS_PER_TIME = Attribute("evaluations_per_time",
                                 min_wins=False,
                                 function=geometric_mean,
                                 digits=1)

# Generated by "./suites.py satisficing" in aibasel/downward-benchmarks repo.
# fmt: off
SUITE_SATISFICING = [
    "agricola-sat18-strips",
    "airport",
    "assembly",
    "barman-sat11-strips",
    "barman-sat14-strips",
    "blocks",
    "caldera-sat18-adl",
    "caldera-split-sat18-adl",
    "cavediving-14-adl",
    "childsnack-sat14-strips",
コード例 #19
0
ファイル: v5.py プロジェクト: yanxi0830/downward
def main(revisions=None):
    suite = suites.suite_optimal_with_ipc11()

    configs = {
        IssueConfig('rl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('cggl-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('dfp-b50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('rl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('cggl-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('dfp-ginf', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'
        ]),
        IssueConfig('rl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
        IssueConfig('cggl-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
        IssueConfig('dfp-f50k', [
            '--search',
            'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'
        ]),
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        suite=suite,
        test_suite=['depot:pfile1'],
        processes=4,
        email='*****@*****.**',
    )
    exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
    exp.add_command('ms-parser', ['ms_parser'])

    # planner outcome attributes
    search_out_of_memory = Attribute('search_out_of_memory',
                                     absolute=True,
                                     min_wins=True)
    search_out_of_time = Attribute('search_out_of_time',
                                   absolute=True,
                                   min_wins=True)
    perfect_heuristic = Attribute('perfect_heuristic',
                                  absolute=True,
                                  min_wins=False)
    proved_unsolvability = Attribute('proved_unsolvability',
                                     absolute=True,
                                     min_wins=False)

    # m&s attributes
    ms_construction_time = Attribute('ms_construction_time',
                                     absolute=False,
                                     min_wins=True,
                                     functions=[gm])
    ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                           absolute=True,
                                           min_wins=False)
    ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
    ms_out_of_memory = Attribute('ms_out_of_memory',
                                 absolute=True,
                                 min_wins=True)
    ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
    ms_memory_delta = Attribute('ms_memory_delta',
                                absolute=False,
                                min_wins=True)

    extra_attributes = [
        search_out_of_memory,
        search_out_of_time,
        perfect_heuristic,
        proved_unsolvability,
        ms_construction_time,
        ms_abstraction_constructed,
        ms_final_size,
        ms_out_of_memory,
        ms_out_of_time,
        ms_memory_delta,
    ]
    attributes = exp.DEFAULT_TABLE_ATTRIBUTES
    attributes.extend(extra_attributes)

    exp.add_fetcher('data/issue604-v4-eval')

    exp.add_report(CompareConfigsReport(compared_configs=[
        ('issue604-v3-rl-b50k', 'issue604-v5-rl-b50k'),
        ('issue604-v3-cggl-b50k', 'issue604-v5-cggl-b50k'),
        ('issue604-v3-dfp-b50k', 'issue604-v5-dfp-b50k'),
        ('issue604-v3-rl-ginf', 'issue604-v5-rl-ginf'),
        ('issue604-v3-cggl-ginf', 'issue604-v5-cggl-ginf'),
        ('issue604-v3-dfp-ginf', 'issue604-v5-dfp-ginf'),
        ('issue604-v3-rl-f50k', 'issue604-v5-rl-f50k'),
        ('issue604-v3-cggl-f50k', 'issue604-v5-cggl-f50k'),
        ('issue604-v3-dfp-f50k', 'issue604-v5-dfp-f50k'),
    ],
                                        attributes=attributes),
                   outfile=os.path.join(exp.eval_dir,
                                        'issue604-v3-v5-comparison.html'))

    exp.add_report(CompareConfigsReport(compared_configs=[
        ('issue604-v4-rl-b50k', 'issue604-v5-rl-b50k'),
        ('issue604-v4-cggl-b50k', 'issue604-v5-cggl-b50k'),
        ('issue604-v4-dfp-b50k', 'issue604-v5-dfp-b50k'),
        ('issue604-v4-rl-ginf', 'issue604-v5-rl-ginf'),
        ('issue604-v4-cggl-ginf', 'issue604-v5-cggl-ginf'),
        ('issue604-v4-dfp-ginf', 'issue604-v5-dfp-ginf'),
        ('issue604-v4-rl-f50k', 'issue604-v5-rl-f50k'),
        ('issue604-v4-cggl-f50k', 'issue604-v5-cggl-f50k'),
        ('issue604-v4-dfp-f50k', 'issue604-v5-dfp-f50k'),
    ],
                                        attributes=attributes),
                   outfile=os.path.join(exp.eval_dir,
                                        'issue604-v4-v5-comparison.html'))

    exp()
コード例 #20
0
        'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'
    ]
    ENVIRONMENT = LocalEnvironment(processes=4)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['{ms_parser}'])
exp.add_suite(BENCHMARKS_DIR, SUITE)

# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic',
                              absolute=True,
                              min_wins=False)

# m&s attributes
ms_construction_time = Attribute('ms_construction_time',
                                 absolute=False,
                                 min_wins=True,
                                 functions=[geometric_mean])
ms_atomic_construction_time = Attribute('ms_atomic_construction_time',
                                        absolute=False,
                                        min_wins=True,
                                        functions=[geometric_mean])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed',
                                       absolute=True,
                                       min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
コード例 #21
0
from prost_plots import ListPlot, PlotAttribute, PlotAlgorithm, PlotDomain, PlotProblem


# Create custom report class with suitable info and error attributes.
class ProstBaseReport(AbsoluteReport):
    """Base report for Prost with information attributes that are present in any run
    and in any algorithm.

    """

    INFO_ATTRIBUTES = ["time_limit", "memory_limit"]


# Attributes to be displayed in the report.
ATTRIBUTES = [
    Attribute("ipc_score", min_wins=False, functions=arithmetic_mean),
    Attribute("num_runs", min_wins=False),
    Attribute("reward_step-all", min_wins=False),
    Attribute("round_reward-all", min_wins=False),
    Attribute("total_reward", min_wins=False),
    Attribute("average_reward", min_wins=False),
    Attribute("time", min_wins=True),
]

if len(sys.argv) < 2:
    print("Usage: ./reports.py [EXP PATH] [STEPS]\n")
    print(
        "Run the script only with a valid experiment path to see the steps in detail."
    )
    print(
        "(Note that the generic usage reported by Lab is different from the one for this script.)"