Exemplo n.º 1
0
 def make_scatter_plot(config_nick, rev1, rev2, attribute):
     name = "-".join([self.name, rev1, rev2, attribute, config_nick])
     print "Make scatter plot for", name
     algo1 = "%s-%s" % (rev1, config_nick)
     algo2 = "%s-%s" % (rev2, config_nick)
     report = ScatterPlotReport(
         filter_config=[algo1, algo2],
         attributes=[attribute],
         get_category=lambda run1, run2: run1["domain"],
         legend_location=(1.3, 0.5))
     report(self.eval_dir,
            os.path.join(scatter_dir, rev1 + "-" + rev2, name))
Exemplo n.º 2
0
 def make_scatter_plots():
     configs = [conf[0] for conf in self.configs]
     for nick in configs:
         config_before = "%s-%s" % (revisions[0], nick)
         config_after = "%s-%s" % (revisions[1], nick)
         for attribute in attributes:
             name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
             report = ScatterPlotReport(
                 filter_config=[config_before, config_after],
                 attributes=[attribute],
                 get_category=lambda run1, run2: run1["domain"],
                 legend_location=(1.3, 0.5))
             report(self.eval_dir, os.path.join(scatter_dir, name))
Exemplo n.º 3
0
 def __init__(self,
              show_missing=True,
              get_category=None,
              xlim_left=None,
              xlim_right=None,
              ylim_bottom=None,
              ylim_top=None,
              tick_size=None,
              label_size=None,
              title_size=None,
              **kwargs):
     ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
     self.xlim_left = xlim_left
     self.xlim_right = xlim_right
     self.ylim_bottom = ylim_bottom
     self.ylim_top = ylim_top
     self.tick_size = tick_size
     self.label_size = label_size
     self.title_size = title_size
     if self.output_format == 'tex':
         self.writer = RelativeScatterPgfPlots
     else:
         self.writer = RelativeScatterMatplotlib
Exemplo n.º 4
0
 def make_scatter_plots():
     for config_nick in self._config_nicks:
         for rev1, rev2 in itertools.combinations(
                 self.revision_nicks, 2):
             algo1 = "%s-%s" % (rev1, config_nick)
             algo2 = "%s-%s" % (rev2, config_nick)
             if is_portfolio(config_nick):
                 valid_attributes = [
                     attr for attr in attributes
                     if attr in self.PORTFOLIO_ATTRIBUTES
                 ]
             else:
                 valid_attributes = attributes
             for attribute in valid_attributes:
                 name = "-".join([rev1, rev2, attribute, config_nick])
                 print "Make scatter plot for", name
                 report = ScatterPlotReport(
                     filter_config=[algo1, algo2],
                     attributes=[attribute],
                     get_category=lambda run1, run2: run1["domain"],
                     legend_location=(1.3, 0.5))
                 report(self.eval_dir, os.path.join(scatter_dir, name))
Exemplo n.º 5
0
    outfile="report-abs-combined.tex",
)
exp.add_report(
    AbsoluteReport(attributes=["coverage", "error"], format="html"),
    outfile="report-abs-combined.html",
)
exp.add_report(FilterReport(),
               outfile=os.path.join(exp.eval_dir, "filter-eval", "properties"))


def get_domain(run1, run2):
    return run1["domain"]


exp.add_report(
    ScatterPlotReport(attributes=["cost"],
                      filter_algorithm=["iter-hadd", "lama11"]),
    name="report-scatter",
    outfile=os.path.join("plots", "scatter.png"),
)

exp.add_report(
    ScatterPlotReport(
        relative=True,
        attributes=["cost"],
        filter_algorithm=["iter-hadd", "lama11"],
        scale="log",
    ),
    name="report-relative-scatter",
    outfile=os.path.join("plots", "relative-scatter.png"),
)
Exemplo n.º 6
0
 def __init__(self, show_missing=True, get_category=None, **kwargs):
     ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
     if self.output_format == 'tex':
         raise "not supported"
     else:
         self.writer = RelativeScatterMatplotlib
Exemplo n.º 7
0
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_algorithm("blind", REPO, REV, ["--search", "astar(blind())"])
exp.add_algorithm("lmcut", REPO, REV, ["--search", "astar(lmcut())"])

# Add step that writes experiment files to disk.
exp.add_step("build", exp.build)

# Add step that executes all runs.
exp.add_step("start", exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Add report step (AbsoluteReport is the standard report).
exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="report.html")

# Add scatter plot report step.
exp.add_report(
    ScatterPlotReport(attributes=["expansions"], filter_algorithm=["blind", "lmcut"]),
    outfile="scatterplot.png",
)

# Parse the commandline and show or run experiment steps.
exp.run_steps()
Exemplo n.º 8
0
    if run['domain'] in invalid_domains:
        return False
    else:
        return True


exp = Experiment('/home/blaas/work/projects/grounder/experiments/combine-with-clingo/data/ipc')
exp.add_fetcher('/home/blaas/work/projects/asp-grounding-planning/experiments/clingo-exp/data/ipc-eval',
                name='clingo')
exp.add_fetcher('/home/blaas/work/projects/grounder/experiments/first-experiment/data/ipc-eval',
                name='new-grounder-and-fd-grounder')
exp.add_report(BaseReport(attributes=['total_time'],
                           filter=[remove_timeouts, create_same_attr]),
               outfile='ipc.html')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'clingo'],
                           filter=[remove_timeouts, create_same_attr, get_valid],
                           scale='symlog',
                           format='tex'),
               outfile='ipc-new-grounder-vs-clingo.tex')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'fd-grounder'],
                           filter=[remove_timeouts, create_same_attr, get_valid],
                           scale='symlog',
                           format='tex'),
               outfile='ipc-new-grounder-vs-fd-grounder.tex')



exp.run_steps()
Exemplo n.º 9
0
# Make a basic table report with IPC scores.
ipc_scores = IPCScores()

exp.add_report(
    ProstBaseReport(
        attributes=ATTRIBUTES, filter=[ipc_scores.store_rewards, ipc_scores.add_score]
    ),
    outfile="report{}.html".format(suffix),
)

# Make a scatter plot report.
exp.add_report(
    ScatterPlotReport(
        attributes=["average_reward"],
        filter_algorithm=["IPC2011", "IPC2014"],
        xscale="linear",
        yscale="linear",
        get_category=domain_as_category,
    ),
    outfile="scatterplot{}.png".format(suffix),
)

# Make a scatter plot report for IPC scores using filters.
exp.add_report(
    ScatterPlotReport(
        attributes=["ipc_score"],
        filter_algorithm=["IPC2011", "IPC2014"],
        filter=[ipc_scores.store_rewards, ipc_scores.add_score],
        xscale="linear",
        yscale="linear",
        get_category=domain_as_category,
Exemplo n.º 10
0
exp.add_suite(['gripper:prob01.pddl'])
exp.add_suite('zenotravel:pfile2')
exp.add_config('ff', ['--search', 'lazy(single(ff()))'])
exp.add_config('add', ['--search', 'lazy(single(add()))'])
exp.add_portfolio(
    os.path.join(REPO, 'src', 'search', 'downward-seq-sat-fdss-1.py'))

exp.add_report(AbsoluteReport('problem'),
               name='make-report',
               outfile='report-abs-p.html')


def solved(run):
    return run['coverage'] == 1


exp.add_step(
    Step('suite', SuiteReport(filter=solved), exp.eval_dir,
         os.path.join(exp.eval_dir, 'suite.py')))

exp.add_step(
    Step(
        'scatter',
        ScatterPlotReport(filter_config_nick=['ff', 'add'],
                          attributes='expansions',
                          format='png'), exp.eval_dir,
        os.path.join(exp.eval_dir, 'scatter')))

exp()
Exemplo n.º 11
0
exp = common_setup.MyExperiment(
    grid_priority=PRIORITY,
    revisions=REVS,
    configs=CONFIGS,
    suite=SUITE,
    parsers=['state_size_parser.py'],
)

exp.add_comparison_table_step(
    attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES +
    ['bytes_per_state', 'variables', 'state_var_t_size'])
exp.add_scatter_plot_step()

exp.add_report(ScatterPlotReport(
    attributes=['bytes_per_state'],
    filter_config_nick='astar_blind',
),
               outfile='issue214_bytes_per_state.png')

for config_nick in [
        'astar_blind', 'astar_lmcut', 'astar_merge_and_shrink_bisim',
        'astar_ipdb'
]:
    for attr in ['memory', 'total_time']:
        exp.add_report(ScatterPlotReport(
            attributes=[attr],
            filter_config_nick=config_nick,
        ),
                       outfile='issue214_%s_%s.png' % (attr, config_nick))

exp()
Exemplo n.º 12
0

def sat_vs_opt(run):
    category = {
        'lama11': 'sat',
        'iter-hadd': 'sat',
        'ipdb': 'opt',
        'fdss': 'opt'
    }
    for nick, cat in category.items():
        if nick in run['config_nick']:
            return {cat: [(run['config'], run.get('expansions'))]}


exp.add_report(ScatterPlotReport(
    attributes=['expansions'],
    filter_config_nick=['downward-seq-opt-fdss-1.py', 'lama11']),
               name='report-scatter',
               outfile=os.path.join('plots', 'scatter.png'))

params = {
    'font.family': 'serif',
    'font.weight': 'normal',
    'font.size': 20,  # Only used if the more specific sizes are not set.
    'axes.labelsize': 20,
    'axes.titlesize': 30,
    'legend.fontsize': 22,
    'xtick.labelsize': 10,
    'ytick.labelsize': 10,
    'lines.markersize': 10,
    'lines.markeredgewidth': 0.25,
Exemplo n.º 13
0
exp.add_report(AbsoluteReport(attributes=['coverage'],
                              filter=only_two_algorithms),
               name='report-abs-p-filter')
exp.add_report(AbsoluteReport(attributes=['coverage', 'error'], format='tex'),
               outfile='report-abs-combined.tex')
exp.add_report(AbsoluteReport(attributes=['coverage', 'error'], format='html'),
               outfile='report-abs-combined.html')
exp.add_report(FilterReport(),
               outfile=os.path.join(exp.eval_dir, 'filter-eval', 'properties'))


def get_domain(run1, run2):
    return run1['domain']


exp.add_report(ScatterPlotReport(attributes=['cost'],
                                 filter_algorithm=['iter-hadd', 'lama11']),
               name='report-scatter',
               outfile=os.path.join('plots', 'scatter.png'))

matplotlib_options = {
    'font.family': 'serif',
    'font.weight': 'normal',
    'font.size': 20,  # Only used if the more specific sizes are not set.
    'axes.labelsize': 20,
    'axes.titlesize': 30,
    'legend.fontsize': 22,
    'xtick.labelsize': 10,
    'ytick.labelsize': 10,
    'lines.markersize': 10,
    'lines.markeredgewidth': 0.25,
    'lines.linewidth': 1,
    def __init__(self, path, repo, opt_or_sat, rev, base_rev=None,
                 use_core_configs=True, use_ipc_configs=True, use_extended_configs=False,
                 **kwargs):
        """
        See :py:class:`DownwardExperiment <downward.experiments.DownwardExperiment>`
        for inherited parameters.

        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. This repository
        is used to search for problem files.

        If *opt_or_sat* is 'opt', configurations for optimal planning will be
        tested on all domains suited for optimal planning. If it is 'sat',
        configurations for satisficing planning will be tested on the
        satisficing suite.

        *rev* determines the new revision to test.

        If *base_rev* is None (default), the latest revision on the branch default
        that is an ancestor of *rev* will be used.

        *use_core_configs* determines if the most common configurations are tested
        (default: True).

        *use_ipc_configs* determines if the configurations used in the IPCs are tested
        (default: True).

        *use_extended_configs* determines if some less common configurations are tested
        (default: False).

        """
        base_rev = checkouts.get_common_ancestor(repo, rev)
        combos = [(Translator(repo, rev=r),
                   Preprocessor(repo, rev=r),
                   Planner(repo, rev=r))
                  for r in (base_rev, rev)]
        DownwardExperiment.__init__(self, path, repo, combinations=combos, **kwargs)

        # ------ suites and configs ------------------------------------

        if opt_or_sat == 'opt':
            self.add_suite(suite_optimal_with_ipc11())
            configs = default_configs_optimal(use_core_configs,
                                              use_ipc_configs,
                                              use_extended_configs)
        elif opt_or_sat == 'sat':
            self.add_suite(suite_satisficing_with_ipc11())
            configs = default_configs_satisficing(use_core_configs,
                                                  use_ipc_configs,
                                                  use_extended_configs)
        else:
            logging.critical('Select to test either \'opt\' or \'sat\' configurations')

        for nick, command in configs.items():
            self.add_config(nick, command)

        # ------ reports -----------------------------------------------

        comparison = CompareRevisionsReport(base_rev,
                                            rev,
                                            attributes=COMPARED_ATTRIBUTES)
        self.add_report(comparison,
                        name='report-compare-scores',
                        outfile='report-compare-scores.html')

        for nick in configs.keys():
            config_before = '%s-%s' % (base_rev, nick)
            config_after = '%s-%s' % (rev, nick)
            for attribute in SCATTER_PLOT_ATTRIBUTES:
                name = 'scatter-%s-%s' % (attribute, nick)
                self.add_report(
                    ScatterPlotReport(
                        filter_config=[config_before, config_after],
                        attributes=[attribute],
                        get_category=lambda run1, run2: run1['domain']),
                    outfile=name)
 def __init__(self, show_missing=True, get_category=None, **kwargs):
     ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
     if self.output_format == 'tex':
         raise "not supported"
     else:
         self.writer = RelativeScatterMatplotlib
Exemplo n.º 16
0
from downward import configs, suites
from downward.reports.scatter import ScatterPlotReport

import common_setup

SEARCH_REVS = ["issue528-base", "issue528-v3"]
SUITE = suites.suite_optimal_with_ipc11()

CONFIGS = {"astar_lmcut": ["--search", "astar(lmcut())"]}

exp = common_setup.IssueExperiment(
    revisions=SEARCH_REVS,
    configs=CONFIGS,
    suite=SUITE,
)

exp.add_absolute_report_step()
exp.add_comparison_table_step()

for attr in ("memory", "total_time"):
    exp.add_report(ScatterPlotReport(
        attributes=[attr],
        filter_config=[
            "issue528-base-astar_lmcut",
            "issue528-v3-astar_lmcut",
        ],
    ),
                   outfile='issue528_base_v3_%s.png' % attr)

exp()