def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        exp.steps.insert(0, Step('rm-eval-dir', shutil.rmtree, exp.eval_dir, ignore_errors=True))
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')

    exp()
예제 #2
0
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO,
                         rev=rev), Preprocessor(REPO,
                                                rev=rev), Planner(REPO,
                                                                  rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test),
                             repo=REPO,
                             combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir]
            if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_step(
            Step('fetch-baseline-results', Fetcher(),
                 get_exp_dir('baseline', args.test) + '-eval', exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(
            Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(
            Step('rm-preprocessed-tasks', shutil.rmtree,
                 exp.preprocessed_tasks_dir))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
예제 #3
0
    def add_fetcher(self, src=None, dest=None, name=None, **kwargs):
        """
        Add a step that fetches results from experiment or
        evaluation directories into a new or existing evaluation
        directory. Use this method to combine results from multiple
        experiments.

        *src* can be an experiment or evaluation directory. It defaults to
        ``exp.path``.

        *dest* must be a new or existing evaluation directory. It
        defaults to ``exp.eval_dir``. If *dest* already contains
        data, the old and new data will be merged, not replaced.

        If no *name* is given, call this step "fetch-``basename(src)``".

        Valid keyword args:

        If *copy_all* is True (default: False), copy all files from the run
        dirs to a new directory tree at *dest*. Without this option only
        the combined properties file is written do disk.

        If *write_combined_props* is True (default), write the combined
        properties file.

        You can include only specific domains or configurations by using
        :py:class:`filters <.Report>`.

        *parsers* can be a list of paths to parser scripts. If given, each
        parser is called in each run directory and the results are added to
        the properties file which is fetched afterwards. This option is
        useful if you forgot to parse some attributes during the experiment.

        Examples:

        Merge the results from "other-exp" into this experiment's results::

            exp.add_fetcher(src='/path/to/other-exp-eval')

        Merge two evaluation directories at the location of the second one::

            exp.add_fetcher(src=eval_dir1, dest=combined_eval_dir, name='merge')

        Fetch only the runs for certain configuration from an older experiment::

            exp.add_fetcher(src='/path/to/eval-dir',
                            filter_config_nick=['config_1', 'config_5'])
        """
        src = src or self.path
        dest = dest or self.eval_dir
        name = name or 'fetch-%s' % os.path.basename(src)
        self.add_step(Step(name, Fetcher(), src, dest, **kwargs))
예제 #4
0
    def add_fetcher(
        self, src=None, dest=None, merge=None, name=None, filter=None, **kwargs
    ):
        """
        Add a step that fetches results from experiment or evaluation
        directories into a new or existing evaluation directory.

        You can use this method to combine results from multiple
        experiments.

        *src* can be an experiment or evaluation directory. It defaults
        to ``exp.path``.

        *dest* must be a new or existing evaluation directory. It
        defaults to ``exp.eval_dir``. If *dest* already contains
        data and *merge* is set to None, the user will be prompted
        whether to override the existing data or to merge the old and
        new data. Setting *merge* to True or to False has the effect
        that the old data is merged or replaced (and the user will not
        be prompted).

        If no *name* is given, call this step "fetch-``basename(src)``".

        You can fetch only a subset of runs (e.g., runs for specific
        domains or algorithms) by passing :py:class:`filters <.Report>`
        with the *filter* argument.

        Example setup:

        >>> exp = Experiment("/tmp/exp")

        Fetch all results and write a single combined properties file
        to the default evaluation directory (this step is added by
        default):

        >>> exp.add_fetcher(name="fetch")

        Merge the results from "other-exp" into this experiment's
        results:

        >>> exp.add_fetcher(src="/path/to/other-exp-eval")

        Fetch only the runs for certain algorithms:

        >>> exp.add_fetcher(filter_algorithm=["algo_1", "algo_5"])

        """
        src = src or self.path
        dest = dest or self.eval_dir
        name = name or f"fetch-{os.path.basename(src.rstrip('/'))}"
        self.add_step(name, Fetcher(), src, dest, merge=merge, filter=filter, **kwargs)
예제 #5
0
def filter_and_transform(run):
    """Remove "WORK-" from the configs and only include certain configurations.

    This also demonstrates a nested filter (one that calls another filter)."""
    if not only_two_configs(run):
        return False
    return remove_work_tag(run)


# Check that the various fetcher options work.
def eval_dir(num):
    return os.path.join(exp.eval_dir, 'test%d' % num)


exp.add_step(
    Step('fetcher-test1', Fetcher(), exp.path, eval_dir(1), copy_all=True))
exp.add_step(
    Step('fetcher-test2',
         Fetcher(),
         exp.path,
         eval_dir(2),
         copy_all=True,
         write_combined_props=True))
exp.add_step(
    Step('fetcher-test3',
         Fetcher(),
         exp.path,
         eval_dir(3),
         filter_config_nick='lama11'))
exp.add_step(
    Step('fetcher-test4',
예제 #6
0
SUITE = suites.suite_optimal_with_ipc11()

CONFIGS = {
    'astar_blind': ['--search', 'astar(blind())'],
    'astar_ipdb': ['--search', 'astar(ipdb())'],
    'astar_lmcut': ['--search', 'astar(lmcut())'],
    'astar_pdb': ['--search', 'astar(pdb())'],
}

exp = common_setup.IssueExperiment(
    revisions=SEARCH_REVS,
    configs=CONFIGS,
    suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
exp.add_step(Step('refetch', Fetcher(), exp.path,
                  parsers=['custom-parser.py']))

attributes = attributes = exp.DEFAULT_TABLE_ATTRIBUTES + [
    "successor_generator_time", "reopened_until_last_jump"
]
exp.add_comparison_table_step(attributes=attributes)

for conf in CONFIGS:
    for attr in ("memory", "search_time"):
        exp.add_report(RelativeScatterPlotReport(
            attributes=[attr],
            get_category=lambda run1, run2: run1.get("domain"),
            filter_config=["issue547-base-%s" % conf,
                           "issue547-v1-%s" % conf]),
                       outfile='issue547_base_v1_%s_%s.png' % (conf, attr))