Exemplo n.º 1
0
    def __init__(self, path, environment=None, cache_dir=None):
        """
        Create a new experiment that will be built at *path* using the methods
        provided by :ref:`Environment <environments>` *environment*. If
        *environment* is None, ``LocalEnvironment`` is used (default).

        Lab will use the *cache_dir* for storing temporary files.
        In case you run :py:class:`Fast Downward experiments
        <downward.experiments.DownwardExperiment>` this directory can become
        very large (tens of GB) since it is used to cache revisions and
        preprocessed tasks. By default *cache_dir* points to ``~/lab``.

        An experiment consists of multiple steps. Every experiment will need at
        least the following steps:

        * Build the experiment.
        * Run it.
        * Fetch the results.
        * Make a report.

        In the "Run it" step all runs that have been added to the experiment
        will be executed. Each run consists of one or multiple commands.
        """
        _Buildable.__init__(self)
        self.path = os.path.abspath(path)
        if any(char in self.path for char in (':', ',')):
            logging.critical('Path contains commas or colons: %s' % self.path)
        self.environment = environment or LocalEnvironment()
        self.environment.exp = self
        self.cache_dir = cache_dir or tools.DEFAULT_USER_DIR
        tools.makedirs(self.cache_dir)
        self.shard_size = SHARD_SIZE

        self.runs = []

        self.set_property('experiment_file', self._script)

        self.steps = Sequence()
        self.add_step(Step('build', self.build))
        self.add_step(Step('start', self.run))
        self.add_fetcher(name='fetch')
Exemplo n.º 2
0
def generate_experiments(configs):
    SUITE = [
        "gripper:prob01.pddl", "blocks:probBLOCKS-5-0.pddl",
        "visitall-sat11-strips:problem12.pddl", "airport:p01-airport1-p1.pddl"
    ]

    ENVIRONMENT = BaselSlurmEnvironment(email="*****@*****.**",
                                        export=EXPORTS)

    if common_setup.is_test_run():
        SUITE = IssueExperiment.DEFAULT_TEST_SUITE
        ENVIRONMENT = LocalEnvironment(processes=2)

    exp = IssueExperiment(
        revisions=REVISIONS,
        configs=configs,
        environment=ENVIRONMENT,
    )
    exp.add_suite(BENCHMARKS_DIR, SUITE)
    exp.add_absolute_report_step()
    exp.run_steps()
Exemplo n.º 3
0
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from downward.reports.taskwise import TaskwiseReport
from lab import cached_revision, reports
from lab.environments import BaselSlurmEnvironment, LocalEnvironment
from lab.reports import Attribute
from lab.reports.filter import FilterReport

DIR = os.path.dirname(os.path.abspath(__file__))
NODE = platform.node()
REMOTE = NODE.endswith(".scicore.unibas.ch") or NODE.endswith(
    ".cluster.bc2.ch")
if REMOTE:
    ENV = BaselSlurmEnvironment("*****@*****.**")
else:
    ENV = LocalEnvironment(processes=2)
REPO = os.environ["DOWNWARD_REPO"]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REV_CACHE = os.environ.get("DOWNWARD_REVISION_CACHE")
VCS = cached_revision.get_version_control_system(REPO)
REV = "default" if VCS == cached_revision.MERCURIAL else "master"


class QualityFilters(object):
    """Compute the IPC quality score.

    The IPC score is computed over the list of runs for each task. Since
    filters only work on individual runs, we can't compute the score
    with a single filter, but it is possible by using two filters:
    *store_costs* saves the list of costs per task in a dictionary
    whereas *add_quality* uses the stored costs to compute IPC quality
Exemplo n.º 4
0
CONFIGS = [
    IssueConfig(config_nick,
                config,
                build_options=[build],
                driver_options=["--build", build]) for rev in REVISIONS
    for build in BUILDS for config_nick, config in CONFIG_NICKS
]

SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2",
                                    email="*****@*****.**",
                                    export=["PATH", "DOWNWARD_BENCHMARKS"])

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)

exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
Exemplo n.º 5
0
    def __init__(self,
                 configs,
                 revisions,
                 suite,
                 build_options=None,
                 driver_options=None,
                 grid_priority=None,
                 test_suite=None,
                 email=None,
                 processes=1,
                 **kwargs):
        """Create an FastDownwardExperiment with some convenience features.
        All configs will be run on all revisions. Inherited options
        *path*, *environment* and *cache_dir* from FastDownwardExperiment
        are not supported and will be automatically set.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. nick will
        automatically get the revision prepended, e.g.
        'issue123-base-<nick>'::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *revisions* must be a non-empty list of revisions, which
        specify which planner versions to use in the experiment.
        The same versions are used for translator, preprocessor
        and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            environment = LocalEnvironment(processes=processes)
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            environment = MaiaEnvironment(priority=grid_priority, email=email)

        FastDownwardExperiment.__init__(self,
                                        environment=environment,
                                        **kwargs)

        # Automatically deduce the downward repository from the file
        repo = get_repo_base()
        self.algorithm_nicks = []
        self.revisions = revisions
        for nick, cmdline in configs.items():
            for rev in revisions:
                algo_nick = '%s-%s' % (rev, nick)
                self.add_algorithm(algo_nick, repo, rev, cmdline,
                                   build_options, driver_options)
                self.algorithm_nicks.append(algo_nick)

        benchmarks_dir = os.path.join(repo, 'benchmarks')
        self.add_suite(benchmarks_dir, suite)
        self.search_parsers = []
Exemplo n.º 6
0
    def __init__(self,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 repo=None,
                 revisions=None,
                 search_revisions=None,
                 test_suite=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. ::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        If *repo* is specified, it must be the path to the root of a
        local Fast Downward repository. If omitted, the repository
        is derived automatically from the main script's path. Example::

            script = /path/to/fd-repo/experiments/issue123/exp01.py -->
            repo = /path/to/fd-repo

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        If *search_revisions* is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All runs use the
        translator and preprocessor component of the first
        revision. ::

            IssueExperiment(search_revisions=["default", "issue123"])

        If you really need to specify the (translator, preprocessor,
        planner) triples manually, use the *combinations* parameter
        from the base class (might be deprecated soon). The options
        *revisions*, *search_revisions* and *combinations* can be
        freely mixed, but at least one of them must be given.

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        kwargs.setdefault("combinations", [])

        if not any([revisions, search_revisions, kwargs["combinations"]]):
            raise ValueError('At least one of "revisions", "search_revisions" '
                             'or "combinations" must be given')

        if revisions:
            kwargs["combinations"].extend([
                (Translator(repo, rev), Preprocessor(repo,
                                                     rev), Planner(repo, rev))
                for rev in revisions
            ])

        if search_revisions:
            base_rev = search_revisions[0]
            # Use the same nick for all parts to get short revision nick.
            kwargs["combinations"].extend([(Translator(repo,
                                                       base_rev,
                                                       nick=rev),
                                            Preprocessor(repo,
                                                         base_rev,
                                                         nick=rev),
                                            Planner(repo, rev, nick=rev))
                                           for rev in search_revisions])

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        self._config_nicks = []
        for nick, config in configs.items():
            self.add_config(nick, config)

        self.add_suite(suite)
Exemplo n.º 7
0
    def __init__(self,
                 revisions,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 test_suite=None,
                 email=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
                                                    email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   repo,
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs
Exemplo n.º 8
0
    def __init__(self,
                 path=None,
                 repo=None,
                 environment=None,
                 combinations=None,
                 limits=None,
                 attributes=None,
                 derived_properties=None,
                 priority=0,
                 queue=None,
                 processes=2,
                 email=None,
                 cache_dir=CACHE_DIR,
                 **kwargs):
        if path is None:
            path = os.path.splitext(os.path.basename(sys.argv[0]))[0]

        expname = os.path.basename(path)

        remote_exppath = os.path.join(REMOTE_EXPS, path)
        local_exppath = os.path.join(LOCAL_EXPS, path)

        if REMOTE:
            exppath = remote_exppath
            repo = repo or REMOTE_REPO
            environment = environment or MaiaEnvironment(
                priority=priority, queue=queue, email=email)
        else:
            exppath = local_exppath
            repo = repo or LOCAL_REPO
            environment = environment or LocalEnvironment(processes=processes)

        DownwardExperiment.__init__(self,
                                    path=exppath,
                                    environment=environment,
                                    repo=repo,
                                    combinations=combinations,
                                    limits=limits,
                                    cache_dir=cache_dir,
                                    **kwargs)

        self.set_path_to_python(PYTHON)

        if attributes is None:
            attributes = ATTRIBUTES

        # Add report steps
        abs_report_file = os.path.join(self.eval_dir, '%s-abs.html' % expname)
        self.add_report(AbsoluteReport(attributes=attributes,
                                       colored=True,
                                       derived_properties=derived_properties),
                        name='report-abs',
                        outfile=abs_report_file)

        if REMOTE:
            # Compress the experiment directory
            self.add_step(Step.zip_exp_dir(self))
            self.add_step(
                Step('zip-eval-dir',
                     call, [
                         'tar', '-cjf', self.name + '-eval.tar.bz2',
                         self.name + '-eval'
                     ],
                     cwd=os.path.dirname(self.path)))

        self.add_step(Step.remove_exp_dir(self))
        self.add_step(
            Step('remove-eval-dir',
                 shutil.rmtree,
                 self.eval_dir,
                 ignore_errors=True))

        if not REMOTE:
            # Copy the results to local directory
            self.add_step(
                Step('scp-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval' % (SCP_LOGIN, remote_exppath),
                    '%s-eval' % local_exppath
                ]))

            # Copy the results to local directory
            self.add_step(
                Step('scp-zipped-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s-eval.tar.bz2' % local_exppath
                ]))

            # Copy the zipped experiment directory to local directory
            self.add_step(
                Step('scp-exp-dir', call, [
                    'scp', '-r',
                    '%s:%s.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s.tar.bz2' % local_exppath
                ]))

        # Unzip the experiment directory
        self.add_step(Step.unzip_exp_dir(self))
        self.add_step(
            Step('unzip-eval-dir',
                 call, ['tar', '-xjf', self.name + '-eval.tar.bz2'],
                 cwd=os.path.dirname(self.path)))
Exemplo n.º 9
0
* add a custom result parser
* use the default report
* use additional standard steps
"""

import os

from lab.experiment import Experiment
from lab.environments import LocalEnvironment
from lab.experiment import Step
from lab.reports import Report


EXPNAME = 'simple-exp'
EXPPATH = os.path.join('/tmp', EXPNAME)
ENV = LocalEnvironment()

# Create a new experiment.
exp = Experiment(path=EXPPATH, environment=ENV)
exp.add_resource('SIMPLE_PARSER', 'simple-parser.py', 'simple-parser.py')
reportfile = os.path.join(exp.eval_dir, EXPNAME + '.html')

run = exp.add_run()
run.add_command('list-dir', ['ls', '-l'])
# Every run has to have an id in the form of a list.
run.set_property('id', ['current-dir'])
run.require_resource('SIMPLE_PARSER')
run.add_command('parse', ['SIMPLE_PARSER'])

# Make a default report.
exp.add_report(Report(attributes=['number_of_files', 'first_number']),
Exemplo n.º 10
0
def main(revisions=None):
    benchmarks_dir = os.environ["DOWNWARD_BENCHMARKS"]
    suite = [
        "agricola-sat18-strips", "airport", "barman-sat11-strips",
        "barman-sat14-strips", "blocks", "childsnack-sat14-strips",
        "data-network-sat18-strips", "depot", "driverlog",
        "elevators-sat08-strips", "elevators-sat11-strips",
        "floortile-sat11-strips", "floortile-sat14-strips", "freecell",
        "ged-sat14-strips", "grid", "gripper", "hiking-sat14-strips",
        "logistics00", "logistics98", "miconic", "movie", "mprime", "mystery",
        "nomystery-sat11-strips", "openstacks-sat08-strips",
        "openstacks-sat11-strips", "openstacks-sat14-strips",
        "openstacks-strips", "organic-synthesis-sat18-strips",
        "organic-synthesis-split-sat18-strips", "parcprinter-08-strips",
        "parcprinter-sat11-strips", "parking-sat11-strips",
        "parking-sat14-strips", "pathways", "pegsol-08-strips",
        "pegsol-sat11-strips", "pipesworld-notankage", "pipesworld-tankage",
        "psr-small", "rovers", "satellite", "scanalyzer-08-strips",
        "scanalyzer-sat11-strips", "snake-sat18-strips",
        "sokoban-sat08-strips", "sokoban-sat11-strips", "spider-sat18-strips",
        "storage", "termes-sat18-strips", "tetris-sat14-strips",
        "thoughtful-sat14-strips", "tidybot-sat11-strips", "tpp",
        "transport-sat08-strips", "transport-sat11-strips",
        "transport-sat14-strips", "trucks-strips", "visitall-sat11-strips",
        "visitall-sat14-strips", "woodworking-sat08-strips",
        "woodworking-sat11-strips", "zenotravel"
    ]
    # suite = ["elevators-sat08-strips:p01.pddl"]
    environment = LocalEnvironment(processes=48)

    BUILD_OPTIONS = ["--build", "release64"]
    DRIVER_OPTIONS = [
        "--transform-task", "builds/h2-mutexes/bin/preprocess",
        "--overall-time-limit", "30m", "--overall-memory-limit", "4096M",
        "--alias", "seq-sat-fdss-2018"
    ]

    configs = {
        IssueConfig("fdss", [],
                    build_options=BUILD_OPTIONS,
                    driver_options=DRIVER_OPTIONS)
    }

    exp = IssueExperiment(
        revisions=revisions,
        configs=configs,
        environment=environment,
    )
    exp.add_suite(benchmarks_dir, suite)

    #exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
    #exp.add_parser(exp.LAB_DRIVER_PARSER)
    #exp.add_parser(exp.EXITCODE_PARSER)
    #exp.add_parser(exp.TRANSLATOR_PARSER)
    exp.add_parser(exp.SINGLE_SEARCH_PARSER)
    #exp.add_parser(exp.PLANNER_PARSER)

    attributes = exp.DEFAULT_TABLE_ATTRIBUTES

    exp.add_step('build', exp.build)
    exp.add_step('start', exp.start_runs)
    exp.add_fetcher(name='fetch')

    # exp.add_comparison_table_step(attributes=attributes)

    exp.add_absolute_report_step(attributes=attributes)

    exp.run_steps()
        default=config['EXP_NAME'], help='path to directory to store results')
# Parse the arguments
args = ARGPARSER.parse_args()
args.TIME_LIMIT      =   config['TIME_LIMIT']   # seconds
args.MEMORY_LIMIT      =   config['MEMORY_LIMIT']   # seconds

#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx#

#---- SETUP EXPERIMENT -------------------------------------------------------#

# Setup local/remote environment
if REMOTE:
    ENV = None #To be use later for HPC infrastructure
else:
    # NOTE: if "processes = NUM_PROCESSES" is omitted, then default is #CPUs
    ENV = LocalEnvironment(processes=args.NUM_PROC)

exp =   Experiment(path=join(args.EXP_DIR, "results"), environment=ENV)
exp.add_parser(join(CWD, Path(LOG_PARSER)))

if (len(args.steps)>0 and ('1' in args.steps or 'build' in args.steps)) or \
        args.run_all_steps : 
    # Don't over-write instead create backups
    index = 0
    while( isdir(args.EXP_DIR) and index!=1000):
        index += 1
        try :
            rename(args.EXP_DIR, args.EXP_DIR+'_'+str(index))
        except :
            pass
    makedirs(args.EXP_DIR)