Exemplo n.º 1
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites, configs
from downward.reports.compare import CompareConfigsReport

import common_setup

REVISIONS = ["issue462-base", "issue462-v1"]

exp = common_setup.IssueExperiment(
    search_revisions=REVISIONS,
    configs=configs.default_configs_satisficing(),
    suite=suites.suite_satisficing_with_ipc11(),
    limits={"search_time": 300},
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()


def grouped_configs_to_compare(config_nicks):
    grouped_configs = []
    for config_nick in config_nicks:
        col_names = ["%s-%s" % (r, config_nick) for r in REVISIONS]
        grouped_configs.append((col_names[0], col_names[1], "Diff - %s" % config_nick))
    return grouped_configs


exp.add_report(
    CompareConfigsReport(
        compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()),
Exemplo n.º 2
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward.suites import suite_satisficing_with_ipc11
from downward.configs import default_configs_satisficing
from downward.reports.scatter import ScatterPlotReport

import common_setup


REVS = ["issue214-base", "issue214-v2"]
CONFIGS = default_configs_satisficing()

TEST_RUN = True

if TEST_RUN:
    SUITE = "gripper:prob01.pddl"
    PRIORITY = None  # "None" means local experiment
else:
    SUITE = suite_satisficing_with_ipc11()
    PRIORITY = 0     # number means maia experiment


exp = common_setup.MyExperiment(
    grid_priority=PRIORITY,
    revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    parsers=['state_size_parser.py'],
    )
Exemplo n.º 3
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward import suites, configs
from downward.reports.compare import CompareConfigsReport

import common_setup

REVISIONS = ["issue425-base", "issue425-v1"]

exp = common_setup.IssueExperiment(
    search_revisions=REVISIONS,
    configs=configs.default_configs_satisficing(),
    suite=suites.suite_satisficing_with_ipc11(),
    limits={"search_time": 300})
exp.add_absolute_report_step()
exp.add_comparison_table_step()


def grouped_configs_to_compare(config_nicks):
    grouped_configs = []
    for config_nick in config_nicks:
        col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS]
        grouped_configs.append(
            (col_names[0], col_names[1], 'Diff - %s' % config_nick))
    return grouped_configs


exp.add_report(CompareConfigsReport(
    compared_configs=grouped_configs_to_compare(
        configs.configs_satisficing_core()),
Exemplo n.º 4
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from downward.suites import suite_satisficing_with_ipc11
from downward.configs import default_configs_satisficing
from downward.reports.scatter import ScatterPlotReport

import common_setup

REVS = ["issue214-base", "issue214-v2"]
CONFIGS = default_configs_satisficing()

TEST_RUN = True

if TEST_RUN:
    SUITE = "gripper:prob01.pddl"
    PRIORITY = None  # "None" means local experiment
else:
    SUITE = suite_satisficing_with_ipc11()
    PRIORITY = 0  # number means maia experiment

exp = common_setup.MyExperiment(
    grid_priority=PRIORITY,
    revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    parsers=['state_size_parser.py'],
)

exp.add_comparison_table_step(
    attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES +
    def __init__(self, path, repo, opt_or_sat, rev, base_rev=None,
                 use_core_configs=True, use_ipc_configs=True, use_extended_configs=False,
                 **kwargs):
        """
        See :py:class:`DownwardExperiment <downward.experiments.DownwardExperiment>`
        for inherited parameters.

        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. This repository
        is used to search for problem files.

        If *opt_or_sat* is 'opt', configurations for optimal planning will be
        tested on all domains suited for optimal planning. If it is 'sat',
        configurations for satisficing planning will be tested on the
        satisficing suite.

        *rev* determines the new revision to test.

        If *base_rev* is None (default), the latest revision on the branch default
        that is an ancestor of *rev* will be used.

        *use_core_configs* determines if the most common configurations are tested
        (default: True).

        *use_ipc_configs* determines if the configurations used in the IPCs are tested
        (default: True).

        *use_extended_configs* determines if some less common configurations are tested
        (default: False).

        """
        base_rev = checkouts.get_common_ancestor(repo, rev)
        combos = [(Translator(repo, rev=r),
                   Preprocessor(repo, rev=r),
                   Planner(repo, rev=r))
                  for r in (base_rev, rev)]
        DownwardExperiment.__init__(self, path, repo, combinations=combos, **kwargs)

        # ------ suites and configs ------------------------------------

        if opt_or_sat == 'opt':
            self.add_suite(suite_optimal_with_ipc11())
            configs = default_configs_optimal(use_core_configs,
                                              use_ipc_configs,
                                              use_extended_configs)
        elif opt_or_sat == 'sat':
            self.add_suite(suite_satisficing_with_ipc11())
            configs = default_configs_satisficing(use_core_configs,
                                                  use_ipc_configs,
                                                  use_extended_configs)
        else:
            logging.critical('Select to test either \'opt\' or \'sat\' configurations')

        for nick, command in configs.items():
            self.add_config(nick, command)

        # ------ reports -----------------------------------------------

        comparison = CompareRevisionsReport(base_rev,
                                            rev,
                                            attributes=COMPARED_ATTRIBUTES)
        self.add_report(comparison,
                        name='report-compare-scores',
                        outfile='report-compare-scores.html')

        for nick in configs.keys():
            config_before = '%s-%s' % (base_rev, nick)
            config_after = '%s-%s' % (rev, nick)
            for attribute in SCATTER_PLOT_ATTRIBUTES:
                name = 'scatter-%s-%s' % (attribute, nick)
                self.add_report(
                    ScatterPlotReport(
                        filter_config=[config_before, config_after],
                        attributes=[attribute],
                        get_category=lambda run1, run2: run1['domain']),
                    outfile=name)