Exemple #1
0
    def add_absolute_report_step(self, **kwargs):
        """Add step that makes an absolute report.

        Absolute reports are useful for experiments that don't
        compare revisions.

        The report is written to the experiment evaluation directory.

        All *kwargs* will be passed to the AbsoluteReport class. If
        the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_absolute_report_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        report = AbsoluteReport(**kwargs)
        # oufile is of the form <rev1>-<rev2>-...-<revn>.<format>
        outfile = ''
        for rev in self.revisions:
            outfile += rev
            outfile += '-'
        outfile = outfile[:len(outfile) - 1]
        outfile += '.'
        outfile += report.output_format
        self.add_report(report, outfile=outfile)
        self.add_step(
            Step('publish-absolute-report', subprocess.call,
                 ['publish', outfile]))
    def add_step(self, name, function, *args, **kwargs):
        """Add a step to the list of experiment steps.

        Use this method to add experiment steps like writing the
        experiment file to disk, removing directories and publishing
        results. To add fetch and report steps, use the convenience
        methods :meth:`.add_fetcher` and :meth:`.add_report`.

        *name* is a descriptive name for the step. When selecting steps
        on the command line, you may either use step names or their
        indices.

        *function* must be a callable Python object, e.g., a function
        or a class implementing `__call__`.

        *args* and *kwargs* will be passed to *function* when the step
        is executed.

        >>> import shutil
        >>> import subprocess
        >>> from lab.experiment import Experiment
        >>> exp = Experiment('/tmp/myexp')
        >>> exp.add_step('build', exp.build)
        >>> exp.add_step('start', exp.start_runs)
        >>> exp.add_step('rm-eval-dir', shutil.rmtree, exp.eval_dir)
        >>> exp.add_step('greet', subprocess.call, ['echo', 'Hello'])

        """
        if not isinstance(name, tools.string_type):
            logging.critical('Step name must be a string: {}'.format(name))
        if not name:
            logging.critical('Step name must not be empty')
        if any(step.name == name for step in self.steps):
            raise ValueError("Step names must be unique: {}".format(name))
        self.steps.append(Step(name, function, *args, **kwargs))
Exemple #3
0
    def add_scatter_plot_step(self, attributes=None):
        if attributes is None:
            attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
        revisions = self._HACK_revisions
        if revisions is None:
            # TODO: See add_comparison_table_step.
            raise NotImplementedError(
                "only supported when specifying revisions in __init__")
        if len(revisions) != 2:
            # TODO: Should generalize this, too, by offering a general
            # grouping function and then comparing any pair of
            # settings in the same group.
            raise NotImplementedError("need two revisions")
        scatter_dir = os.path.join(self.eval_dir, "scatter")
        def make_scatter_plots():
            configs = [conf[0] for conf in self.configs]
            for nick in configs:
                config_before = "%s-%s" % (revisions[0], nick)
                config_after = "%s-%s" % (revisions[1], nick)
                for attribute in attributes:
                    name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
                    report = ScatterPlotReport(
                        filter_config=[config_before, config_after],
                        attributes=[attribute],
                        get_category=lambda run1, run2: run1["domain"],
                        legend_location=(1.3, 0.5))
                    report(self.eval_dir, os.path.join(scatter_dir, name))

        self.add_step(Step("make-scatter-plots", make_scatter_plots))
Exemple #4
0
    def add_comparison_table_step(self, **kwargs):
        """Add a step that makes pairwise revision comparisons.

        Create comparative reports for all pairs of Fast Downward
        revision triples. Each report pairs up the runs of the same
        config and lists the two absolute attribute values and their
        difference for all attributes in kwargs["attributes"].

        All *kwargs* will be passed to the CompareRevisionsReport
        class. If the keyword argument *attributes* is not
        specified, a default list of attributes is used. ::

            exp.add_comparison_table_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)

        def make_comparison_tables():
            for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
                report = CompareRevisionsReport(rev1, rev2, **kwargs)
                outfile = os.path.join(self.eval_dir,
                                       "%s-%s-compare.html" % (rev1, rev2))
                report(self.eval_dir, outfile)

        self.add_step(Step("make-comparison-tables", make_comparison_tables))
Exemple #5
0
    def add_scatter_plot_step(self, attributes=None, relative=False):
        """Add a step that creates scatter plots for all revision pairs.

        Create a scatter plot for each combination of attribute,
        configuration and revision pair. If *attributes* is not
        specified, a list of common scatter plot attributes is used.
        For portfolios all attributes except "cost", "coverage" and
        "plan_length" will be ignored. ::

            exp.add_scatter_plot_step(attributes=["expansions"])

        Use `relative=True` to create relative scatter plots. ::

            exp.add_scatter_plot_step(relative=True)

        """
        if attributes is None:
            attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES

        if relative:
            scatter_plot_class = RelativeScatterPlotReport
            scatter_dir = os.path.join(self.eval_dir, "relative-scatter")
        else:
            scatter_plot_class = ScatterPlotReport
            scatter_dir = os.path.join(self.eval_dir, "scatter")

        def is_portfolio(config_nick):
            return "fdss" in config_nick

        def make_scatter_plot(config_nick, rev1, rev2, attribute):
            name = "-".join([self.name, rev1, rev2, attribute, config_nick])
            print "Make scatter plot for", name
            algo1 = "%s-%s" % (rev1, config_nick)
            algo2 = "%s-%s" % (rev2, config_nick)
            report = scatter_plot_class(
                filter_config=[algo1, algo2],
                attributes=[attribute],
                get_category=lambda run1, run2: run1["domain"],
                legend_location=(1.3, 0.5))
            report(self.eval_dir,
                   os.path.join(scatter_dir, rev1 + "-" + rev2, name))

        def make_scatter_plots():
            for config_nick in self._config_nicks:
                if is_portfolio(config_nick):
                    valid_attributes = [
                        attr for attr in attributes
                        if attr in self.PORTFOLIO_ATTRIBUTES
                    ]
                else:
                    valid_attributes = attributes
                for rev1, rev2 in itertools.combinations(
                        self.revision_nicks, 2):
                    for attribute in valid_attributes:
                        make_scatter_plot(config_nick, rev1, rev2, attribute)

        self.add_step(Step("make-scatter-plots", make_scatter_plots))
def main():
    args = parse_custom_args()

    if not args.revision:
        rev = 'WORK'
        name = 'current'
    elif args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = checkouts.get_global_rev(REPO, args.revision)
        name = rev

    combo = [(Translator(REPO, rev=rev), Preprocessor(REPO, rev=rev), Planner(REPO, rev=rev))]

    exp = DownwardExperiment(path=get_exp_dir(name, args.test), repo=REPO, combinations=combo)
    exp.add_suite(SUITES[args.test])
    for nick, config in CONFIGS[args.test]:
        exp.add_config(nick, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [path for path in [exp.preprocess_exp_path, exp.path, exp.eval_dir]
                       if os.path.exists(path)]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_step(Step('fetch-baseline-results', Fetcher(),
                          get_exp_dir('baseline', args.test) + '-eval',
                          exp.eval_dir))
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES), name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(Step('rm-preprocess-dir', shutil.rmtree, exp.preprocess_exp_path))
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-preprocessed-tasks', shutil.rmtree, exp.preprocessed_tasks_dir))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
Exemple #7
0
    def add_comparison_table_step(self, **kwargs):
        """Add a step that makes pairwise revision comparisons.

        Create comparative reports for all pairs of Fast Downward
        revisions. Each report pairs up the runs of the same config and
        lists the two absolute attribute values and their difference
        for all attributes in kwargs["attributes"].

        All *kwargs* will be passed to the CompareConfigsReport class.
        If the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_comparison_table_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)

        def make_comparison_tables():
            for rev1, rev2 in itertools.combinations(self._revisions, 2):
                compared_configs = []
                for config in self._configs:
                    config_nick = config.nick
                    compared_configs.append(
                        ("%s-%s" % (rev1, config_nick),
                         "%s-%s" % (rev2, config_nick),
                         "Diff (%s)" % config_nick))
                report = CompareConfigsReport(compared_configs, **kwargs)
                outfile = os.path.join(
                    self.eval_dir,
                    "%s-%s-%s-compare.%s" % (
                        self.name, rev1, rev2, report.output_format))
                report(self.eval_dir, outfile)

        def publish_comparison_tables():
            for rev1, rev2 in itertools.combinations(self._revisions, 2):
                outfile = os.path.join(
                    self.eval_dir,
                    "%s-%s-%s-compare.html" % (self.name, rev1, rev2))
                subprocess.call(["publish", outfile])

        self.add_step(Step("make-comparison-tables", make_comparison_tables))
        self.add_step(Step(
            "publish-comparison-tables", publish_comparison_tables))
Exemple #8
0
    def __init__(self, path, environment=None, cache_dir=None):
        """
        Create a new experiment that will be built at *path* using the methods
        provided by :ref:`Environment <environments>` *environment*. If
        *environment* is None, ``LocalEnvironment`` is used (default).

        Lab will use the *cache_dir* for storing temporary files.
        In case you run :py:class:`Fast Downward experiments
        <downward.experiments.DownwardExperiment>` this directory can become
        very large (tens of GB) since it is used to cache revisions and
        preprocessed tasks. By default *cache_dir* points to ``~/lab``.

        An experiment consists of multiple steps. Every experiment will need at
        least the following steps:

        * Build the experiment.
        * Run it.
        * Fetch the results.
        * Make a report.

        In the "Run it" step all runs that have been added to the experiment
        will be executed. Each run consists of one or multiple commands.
        """
        _Buildable.__init__(self)
        self.path = os.path.abspath(path)
        if any(char in self.path for char in (':', ',')):
            logging.critical('Path contains commas or colons: %s' % self.path)
        self.environment = environment or LocalEnvironment()
        self.environment.exp = self
        self.cache_dir = cache_dir or tools.DEFAULT_USER_DIR
        tools.makedirs(self.cache_dir)
        self.shard_size = SHARD_SIZE

        self.runs = []

        self.set_property('experiment_file', self._script)

        self.steps = Sequence()
        self.add_step(Step('build', self.build))
        self.add_step(Step('start', self.run))
        self.add_fetcher(name='fetch')
def main():
    args = parse_custom_args()

    if args.revision.lower() == 'baseline':
        rev = BASELINE
        name = 'baseline'
    else:
        rev = args.revision
        name = rev

    exp = FastDownwardExperiment(path=get_exp_dir(name, args.test),
                                 cache_dir=CACHE_DIR)
    exp.add_suite(BENCHMARKS_DIR, SUITES[args.test])
    for config_nick, config in CONFIGS[args.test]:
        exp.add_algorithm(rev + "-" + config_nick, REPO, rev, config)
    exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                   name='report')

    # Only compare results if we are not running the baseline experiment.
    if rev != BASELINE:
        dirty_paths = [
            path for path in [exp.path, exp.eval_dir] if os.path.exists(path)
        ]
        if dirty_paths:
            logging.critical(
                'The last run found a regression. Please inspect what '
                'went wrong and then delete the following directories '
                'manually: %s' % dirty_paths)
        exp.add_fetcher(src=get_exp_dir('baseline', args.test) + '-eval',
                        dest=exp.eval_dir,
                        name='fetch-baseline-results')
        exp.add_report(AbsoluteReport(attributes=ABSOLUTE_ATTRIBUTES),
                       name='comparison')
        exp.add_report(RegressionCheckReport(BASELINE, RELATIVE_CHECKS),
                       name='regression-check')
        # We abort if there is a regression and keep the directories.
        exp.add_step(Step('rm-exp-dir', shutil.rmtree, exp.path))
        exp.add_step(Step('rm-eval-dir', shutil.rmtree, exp.eval_dir))

    exp()
Exemple #10
0
    def add_fetcher(self, src=None, dest=None, name=None, **kwargs):
        """
        Add a step that fetches results from experiment or
        evaluation directories into a new or existing evaluation
        directory. Use this method to combine results from multiple
        experiments.

        *src* can be an experiment or evaluation directory. It defaults to
        ``exp.path``.

        *dest* must be a new or existing evaluation directory. It
        defaults to ``exp.eval_dir``. If *dest* already contains
        data, the old and new data will be merged, not replaced.

        If no *name* is given, call this step "fetch-``basename(src)``".

        Valid keyword args:

        If *copy_all* is True (default: False), copy all files from the run
        dirs to a new directory tree at *dest*. Without this option only
        the combined properties file is written do disk.

        If *write_combined_props* is True (default), write the combined
        properties file.

        You can include only specific domains or configurations by using
        :py:class:`filters <.Report>`.

        *parsers* can be a list of paths to parser scripts. If given, each
        parser is called in each run directory and the results are added to
        the properties file which is fetched afterwards. This option is
        useful if you forgot to parse some attributes during the experiment.

        Examples:

        Merge the results from "other-exp" into this experiment's results::

            exp.add_fetcher(src='/path/to/other-exp-eval')

        Merge two evaluation directories at the location of the second one::

            exp.add_fetcher(src=eval_dir1, dest=combined_eval_dir, name='merge')

        Fetch only the runs for certain configuration from an older experiment::

            exp.add_fetcher(src='/path/to/eval-dir',
                            filter_config_nick=['config_1', 'config_5'])
        """
        src = src or self.path
        dest = dest or self.eval_dir
        name = name or 'fetch-%s' % os.path.basename(src)
        self.add_step(Step(name, Fetcher(), src, dest, **kwargs))
Exemple #11
0
    def add_scatter_plot_step(self, relative=False, attributes=None):
        """Add step creating (relative) scatter plots for all revision pairs.

        Create a scatter plot for each combination of attribute,
        configuration and revisions pair. If *attributes* is not
        specified, a list of common scatter plot attributes is used.
        For portfolios all attributes except "cost", "coverage" and
        "plan_length" will be ignored. ::

            exp.add_scatter_plot_step(attributes=["expansions"])

        """
        if relative:
            report_class = RelativeScatterPlotReport
            scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
            step_name = "make-relative-scatter-plots"
        else:
            report_class = ScatterPlotReport
            scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
            step_name = "make-absolute-scatter-plots"
        if attributes is None:
            attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES

        def make_scatter_plot(config_nick, rev1, rev2, attribute):
            name = "-".join([self.name, rev1, rev2, attribute, config_nick])
            print "Make scatter plot for", name
            algo1 = "{}-{}".format(rev1, config_nick)
            algo2 = "{}-{}".format(rev2, config_nick)
            report = report_class(
                filter_config=[algo1, algo2],
                attributes=[attribute],
                get_category=lambda run1, run2: run1["domain"],
                legend_location=(1.3, 0.5))
            report(
                self.eval_dir,
                os.path.join(scatter_dir, rev1 + "-" + rev2, name))

        def make_scatter_plots():
            for config in self._configs:
                for rev1, rev2 in itertools.combinations(self._revisions, 2):
                    for attribute in self.get_supported_attributes(
                            config.nick, attributes):
                        make_scatter_plot(config.nick, rev1, rev2, attribute)

        self.add_step(Step(step_name, make_scatter_plots))
Exemple #12
0
    def add_report(self, report, name='', eval_dir='', outfile=''):
        """Add *report* to the list of experiment steps.

        This method is a shortcut for
        ``add_step(Step(name, report, eval_dir, outfile))``.

        If no *name* is given, use *outfile* or the *report*'s class name.

        By default, use the experiment's standard *eval_dir*.

        If *outfile* is omitted, compose a filename from *name* and the
        *report*'s format. If *outfile* is a relative path, put it under
        *eval_dir*.
        """
        name = name or os.path.basename(
            outfile) or report.__class__.__name__.lower()
        eval_dir = eval_dir or self.eval_dir
        outfile = outfile or '%s.%s' % (name, report.output_format)
        if not os.path.isabs(outfile):
            outfile = os.path.join(eval_dir, outfile)
        self.add_step(Step(name, report, eval_dir, outfile))
Exemple #13
0
    def add_custom_comparison_table_step(self, name, **kwargs):
        """Add a step that compares the configurations given in
        *compared_configs*.

        *compared_configs* must be specified. See CompareConfigsReport class.

        *name* is a custom name for the report.

        All *kwargs* will be passed to the CompareConfigsReport class.
        If the keyword argument *attributes* is not specified, a
        default list of attributes is used. ::

            exp.add_comparison_table_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        report = CompareConfigsReport(**kwargs)
        outfile = os.path.join(self.eval_dir,
                               name + "." + report.output_format)
        self.add_report(report, outfile=outfile)
        self.add_step(
            Step('publish-custom-comparison-report', subprocess.call,
                 ['publish', outfile]))
Exemple #14
0
    def add_absolute_report_step(self, **kwargs):
        """Add step that makes an absolute report.

        Absolute reports are useful for experiments that don't compare
        revisions.

        The report is written to the experiment evaluation directory.

        All *kwargs* will be passed to the AbsoluteReport class. If the
        keyword argument *attributes* is not specified, a default list
        of attributes is used. ::

            exp.add_absolute_report_step(attributes=["coverage"])

        """
        kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
        report = AbsoluteReport(**kwargs)
        outfile = os.path.join(
            self.eval_dir,
            get_experiment_name() + "." + report.output_format)
        self.add_report(report, outfile=outfile)
        self.add_step(Step(
            'publish-absolute-report', subprocess.call, ['publish', outfile]))
Exemple #15
0
ENV = LocalEnvironment(processes=6)
CONFIGS = [('lmcut', ['--search', 'astar(lmcut())'])]

ATTRIBUTES = [
    'coverage', 'expansions', 'initial_h_value', 'cost', 'hstar_to_h'
]

#All with timeout
#exp = DownwardExperiment(path=EXPPATH, repo=REPO, environment=ENV, limits={'search_time': 100})
#exp.add_suite(suites.suite_all())

#Only lmcut domains without timeout
exp = DownwardExperiment(path=EXPPATH, repo=REPO, environment=ENV)
exp.add_suite(suites.suite_lmcut_domains())

for nick, config in CONFIGS:
    exp.add_config(nick, config)

# Make a report containing absolute numbers with h*/h values.
report = os.path.join(exp.eval_dir, 'report.html')
exp.add_report(HstarToHRatio(attributes=ATTRIBUTES), outfile=report)

# "Publish" the results with "cat" for demonstration purposes.
exp.add_step(Step('publish-report', subprocess.call, ['cat', report]))

# Compress the experiment directory.
exp.add_step(Step.zip_exp_dir(exp))

# Parse the commandline and show or run experiment steps.
exp()
Exemple #16
0
                         environment=ENV,
                         limits={'search_time': 300})
exp.add_suite({'airport', 'blocks', 'freecell'})

for nick, config in CONFIGS:
    exp.add_config(nick, config)

# Make a report containing absolute numbers (this is the most common report).
file_name_for_report = 'report_' + nick + '.html'
report = os.path.join(exp.eval_dir, file_name_for_report)
file_name_for_preprocess = os.path.join(exp.eval_dir, 'preprocess')
exp.add_report(HstarToHRatioAndStatistics(nick,
                                          file_name_for_preprocess,
                                          attributes=ATTRIBUTES),
               outfile=report)

# Plot
sub_dir = 'plots_' + nick
exp.add_step(
    Step('report-plot-cat', ProblemPlotReport(), exp.eval_dir,
         os.path.join(exp.eval_dir, sub_dir)))

# "Publish" the results with "cat" for demonstration purposes.
exp.add_step(Step('publish-report', subprocess.call, ['cat', report]))

# Compress the experiment directory.
exp.add_step(Step.zip_exp_dir(exp))

# Parse the commandline and show or run experiment steps.
exp()
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        #if not repo or not os.path.isdir(repo):
        #    logging.critical('The path "%s" is not a local Fast Downward '
        #                     'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/sternron/gal-dreiman/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
    def __init__(self,
                 path=None,
                 repo=None,
                 environment=None,
                 combinations=None,
                 limits=None,
                 attributes=None,
                 derived_properties=None,
                 priority=0,
                 queue=None,
                 processes=2,
                 email=None,
                 cache_dir=CACHE_DIR,
                 **kwargs):
        if path is None:
            path = os.path.splitext(os.path.basename(sys.argv[0]))[0]

        expname = os.path.basename(path)

        remote_exppath = os.path.join(REMOTE_EXPS, path)
        local_exppath = os.path.join(LOCAL_EXPS, path)

        if REMOTE:
            exppath = remote_exppath
            repo = repo or REMOTE_REPO
            environment = environment or MaiaEnvironment(
                priority=priority, queue=queue, email=email)
        else:
            exppath = local_exppath
            repo = repo or LOCAL_REPO
            environment = environment or LocalEnvironment(processes=processes)

        DownwardExperiment.__init__(self,
                                    path=exppath,
                                    environment=environment,
                                    repo=repo,
                                    combinations=combinations,
                                    limits=limits,
                                    cache_dir=cache_dir,
                                    **kwargs)

        self.set_path_to_python(PYTHON)

        if attributes is None:
            attributes = ATTRIBUTES

        # Add report steps
        abs_report_file = os.path.join(self.eval_dir, '%s-abs.html' % expname)
        self.add_report(AbsoluteReport(attributes=attributes,
                                       colored=True,
                                       derived_properties=derived_properties),
                        name='report-abs',
                        outfile=abs_report_file)

        if REMOTE:
            # Compress the experiment directory
            self.add_step(Step.zip_exp_dir(self))
            self.add_step(
                Step('zip-eval-dir',
                     call, [
                         'tar', '-cjf', self.name + '-eval.tar.bz2',
                         self.name + '-eval'
                     ],
                     cwd=os.path.dirname(self.path)))

        self.add_step(Step.remove_exp_dir(self))
        self.add_step(
            Step('remove-eval-dir',
                 shutil.rmtree,
                 self.eval_dir,
                 ignore_errors=True))

        if not REMOTE:
            # Copy the results to local directory
            self.add_step(
                Step('scp-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval' % (SCP_LOGIN, remote_exppath),
                    '%s-eval' % local_exppath
                ]))

            # Copy the results to local directory
            self.add_step(
                Step('scp-zipped-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s-eval.tar.bz2' % local_exppath
                ]))

            # Copy the zipped experiment directory to local directory
            self.add_step(
                Step('scp-exp-dir', call, [
                    'scp', '-r',
                    '%s:%s.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s.tar.bz2' % local_exppath
                ]))

        # Unzip the experiment directory
        self.add_step(Step.unzip_exp_dir(self))
        self.add_step(
            Step('unzip-eval-dir',
                 call, ['tar', '-xjf', self.name + '-eval.tar.bz2'],
                 cwd=os.path.dirname(self.path)))
# exp.add_suite({'blocks:probBLOCKS-8-0.pddl','blocks:probBLOCKS-8-1.pddl','blocks:probBLOCKS-4-0.pddl','blocks:probBLOCKS-5-1.pddl'})
#exp.add_suite({'gripper:prob02.pddl'})
# exp.add_suite({'zenotravel','trucks-strips','sokoban-sat11-strips','philosophers'})

for nick, config in CONFIGS:
    exp.add_config(nick, config)

# Make a report containing absolute numbers (this is the most common report).
file_name_for_report = 'report_' + nick + '.html'
report = os.path.join(exp.eval_dir, file_name_for_report)
file_name_for_preprocess = os.path.join(exp.eval_dir, 'preprocess')
exp.add_report(HstarToHRatioAndStatistics(nick,
                                          file_name_for_preprocess,
                                          attributes=ATTRIBUTES),
               outfile=report)

# Plot
sub_dir = 'plots_' + nick
exp.add_step(
    Step('report-plot-cat', ProblemPlotReport(), exp.eval_dir,
         os.path.join(exp.eval_dir, sub_dir)))

# "Publish" the results with "cat" for demonstration purposes.
# exp.add_step(Step('publish-report', subprocess.call, ['cat', report]))

# Compress the experiment directory.
# exp.add_step(Step.zip_exp_dir(exp))

# Parse the commandline and show or run experiment steps.
exp()
Exemple #20
0
class PiReport(Report):
    def get_text(self):
        lines = []
        for run_id, run in self.props.items():
            lines.append('%s %s' % (run['time'], run['diff']))
        return '\n'.join(lines)

exp = Experiment(EXPPATH)
exp.add_resource('PARSER', 'pi-parser-ext.py', 'pi-parser.py')
exp.add_resource('CALC', 'calculate.py', 'calculate.py')

for rounds in [1, 5, 10, 50, 100, 500, 1000, 5000, 10000]:
    run = exp.add_run()
    run.require_resource('PARSER')
    run.require_resource('CALC')
    run.add_command('calc-pi', ['CALC', rounds], time_limit=10, mem_limit=1024)
    run.add_command('parse-pi', ['PARSER'])
    run.set_property('id', ['calc-%d' % rounds])

def good(run):
    return run['diff'] <= 0.01

exp.add_step(Step('report', Report(format='html', attributes=['pi', 'diff'],
                  filter=good), exp.eval_dir,
                  os.path.join(exp.eval_dir, 'report.html')))

exp.add_step(Step('plot', PiReport(),
                  exp.eval_dir, os.path.join(exp.eval_dir, 'plot.dat')))

exp()
Exemple #21
0
exp.add_config('iter-hadd', [
    '--heuristic', 'hadd=add()', '--search',
    'iterated([lazy_greedy([hadd]),lazy_wastar([hadd])],repeat_last=true)'
])
exp.add_config('ipdb', ["--search", "astar(ipdb())"], timeout=10)
# Use original LAMA 2011 configuration
exp.add_config('lama11',
               ['ipc', 'seq-sat-lama-2011', '--plan-file', 'sas_plan'])
exp.add_config('fdss-1', ['ipc', 'seq-sat-fdss-1', '--plan-file', 'sas_plan'])
exp.add_portfolio(
    os.path.join(REPO, 'src', 'search', 'downward-seq-opt-fdss-1.py'))

# Before we fetch the new results, delete the old ones
exp.steps.insert(
    5,
    Step('delete-old-results', shutil.rmtree, exp.eval_dir,
         ignore_errors=True))

# Before we build the experiment, delete the old experiment directory
# and the preprocess directory
exp.steps.insert(
    0, Step('delete-exp-dir', shutil.rmtree, exp.path, ignore_errors=True))
exp.steps.insert(
    0,
    Step('delete-preprocess-dir',
         shutil.rmtree,
         exp.preprocess_exp_path,
         ignore_errors=True))

# Define some filters

exp.add_suite(['gripper:prob01.pddl'])
exp.add_suite('zenotravel:pfile2')
exp.add_config('ff', ['--search', 'lazy(single(ff()))'])
exp.add_config('add', ['--search', 'lazy(single(add()))'])
exp.add_portfolio(
    os.path.join(REPO, 'src', 'search', 'downward-seq-sat-fdss-1.py'))

exp.add_report(AbsoluteReport('problem'),
               name='make-report',
               outfile='report-abs-p.html')


def solved(run):
    return run['coverage'] == 1


exp.add_step(
    Step('suite', SuiteReport(filter=solved), exp.eval_dir,
         os.path.join(exp.eval_dir, 'suite.py')))

exp.add_step(
    Step(
        'scatter',
        ScatterPlotReport(filter_config_nick=['ff', 'add'],
                          attributes='expansions',
                          format='png'), exp.eval_dir,
        os.path.join(exp.eval_dir, 'scatter')))

exp()
Exemple #23
0
    def add_PAC_fetcher(self, src=None, dest=None, name=None, **kwargs):

        src = src or self.path
        dest = dest or self.eval_dir
        name = name or 'fetch-%s' % os.path.basename(src)
        self.add_step(Step(name, PACFetcher(), src, dest, **kwargs))