Beispiel #1
0
    ]),
    IssueConfig('dfp-ginf-noprune', [
        '--search',
        'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))'
    ]),
    IssueConfig('rl-f50k-noprune', [
        '--search',
        'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))'
    ]),
    IssueConfig('dfp-f50k-noprune', [
        '--search',
        'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))'
    ]),
]
SUITE = DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(priority=0, email='*****@*****.**')

if is_test_run():
    SUITE = [
        'depot:p01.pddl', 'depot:p02.pddl',
        'parcprinter-opt11-strips:p01.pddl',
        'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'
    ]
    ENVIRONMENT = LocalEnvironment(processes=4)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
Beispiel #2
0
from lab.environments import LocalEnvironment, MaiaEnvironment

import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport

DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue704-base", "issue704-v1"]
CONFIGS = [
    IssueConfig('astar-blind-ssec',
                ['--search', 'astar(blind(), pruning=stubborn_sets_ec())'])
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(priority=0,
                              email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()

for attribute in ["total_time"]:
Beispiel #3
0
REVISIONS = ["issue213-v1", "issue213-v3", "issue213-v4"]
BUILDS = ["release32", "release64"]
SEARCHES = [
    ("blind", "astar(blind())"),
]
CONFIGS = [
    IssueConfig(
        "{nick}-{build}".format(**locals()),
        ["--search", search],
        build_options=[build],
        driver_options=["--build", build])
    for nick, search in SEARCHES
    for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
    priority=0, email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()

attributes = [
    "coverage", "error", "expansions_until_last_jump", "memory",
Beispiel #4
0
    def __init__(self,
                 configs,
                 revisions,
                 suite,
                 build_options=None,
                 driver_options=None,
                 grid_priority=None,
                 test_suite=None,
                 email=None,
                 processes=1,
                 **kwargs):
        """Create an FastDownwardExperiment with some convenience features.
        All configs will be run on all revisions. Inherited options
        *path*, *environment* and *cache_dir* from FastDownwardExperiment
        are not supported and will be automatically set.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. nick will
        automatically get the revision prepended, e.g.
        'issue123-base-<nick>'::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *revisions* must be a non-empty list of revisions, which
        specify which planner versions to use in the experiment.
        The same versions are used for translator, preprocessor
        and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            environment = LocalEnvironment(processes=processes)
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            environment = MaiaEnvironment(priority=grid_priority, email=email)

        FastDownwardExperiment.__init__(self,
                                        environment=environment,
                                        **kwargs)

        # Automatically deduce the downward repository from the file
        repo = get_repo_base()
        self.algorithm_nicks = []
        self.revisions = revisions
        for nick, cmdline in configs.items():
            for rev in revisions:
                algo_nick = '%s-%s' % (rev, nick)
                self.add_algorithm(algo_nick, repo, rev, cmdline,
                                   build_options, driver_options)
                self.algorithm_nicks.append(algo_nick)

        benchmarks_dir = os.path.join(repo, 'benchmarks')
        self.add_suite(benchmarks_dir, suite)
        self.search_parsers = []
Beispiel #5
0
    'openstacks-sat08-strips', 'openstacks-sat11-strips',
    'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs',
    'parcprinter-08-strips', 'parcprinter-sat11-strips',
    'parking-sat11-strips', 'parking-sat14-strips', 'pathways',
    'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips',
    'philosophers', 'pipesworld-notankage', 'pipesworld-tankage',
    'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite',
    'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
    'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
    'tetris-sat14-strips', 'thoughtful-sat14-strips',
    'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
    'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
    'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
    'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel']

ENVIRONMENT = MaiaEnvironment(
    priority=0, email="*****@*****.**")

if is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)

exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(attributes=["total_time", "memory"])
Beispiel #6
0
    def __init__(self,
                 revisions,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 test_suite=None,
                 email=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"], ...)

        *configs* must be a non-empty list of IssueConfig objects. ::

            IssueExperiment(..., configs=[
                IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
                IssueConfig(
                    "lama", [],
                    driver_options=["--alias", "seq-sat-lama-2011"]),
            ])

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(..., suite=suites.suite_all())
            IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(..., suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(..., grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])

        If *email* is specified, it should be an email address. This
        email address will be notified upon completion of the experiments
        if it is run on the cluster.
        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
                                                    email=email)

        path = path or get_data_dir()

        FastDownwardExperiment.__init__(self, path=path, **kwargs)

        repo = get_repo_base()
        for rev in revisions:
            for config in configs:
                self.add_algorithm(get_algo_nick(rev, config.nick),
                                   repo,
                                   rev,
                                   config.component_options,
                                   build_options=config.build_options,
                                   driver_options=config.driver_options)

        self.add_suite(os.path.join(repo, "benchmarks"), suite)

        self._revisions = revisions
        self._configs = configs
Beispiel #7
0
    def __init__(self,
                 configs,
                 suite,
                 grid_priority=None,
                 path=None,
                 repo=None,
                 revisions=None,
                 search_revisions=None,
                 test_suite=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        *configs* must be a non-empty dict of {nick: cmdline} pairs
        that sets the planner configurations to test. ::

            IssueExperiment(configs={
                "lmcut": ["--search", "astar(lmcut())"],
                "ipdb":  ["--search", "astar(ipdb())"]})

        *suite* sets the benchmarks for the experiment. It must be a
        single string or a list of strings specifying domains or
        tasks. The downward.suites module has many predefined
        suites. ::

            IssueExperiment(suite=["grid", "gripper:prob01.pddl"])

            from downward import suites
            IssueExperiment(suite=suites.suite_all())
            IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
            IssueExperiment(suite=suites.suite_optimal())

        Use *grid_priority* to set the job priority for cluster
        experiments. It must be in the range [-1023, 0] where 0 is the
        highest priority. By default the priority is 0. ::

            IssueExperiment(grid_priority=-500)

        If *path* is specified, it must be the path to where the
        experiment should be built (e.g.
        /home/john/experiments/issue123/exp01/). If omitted, the
        experiment path is derived automatically from the main
        script's filename. Example::

            script = experiments/issue123/exp01.py -->
            path = experiments/issue123/data/issue123-exp01/

        If *repo* is specified, it must be the path to the root of a
        local Fast Downward repository. If omitted, the repository
        is derived automatically from the main script's path. Example::

            script = /path/to/fd-repo/experiments/issue123/exp01.py -->
            repo = /path/to/fd-repo

        If *revisions* is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search. ::

            IssueExperiment(revisions=["issue123", "4b3d581643"])

        If *search_revisions* is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All runs use the
        translator and preprocessor component of the first
        revision. ::

            IssueExperiment(search_revisions=["default", "issue123"])

        If you really need to specify the (translator, preprocessor,
        planner) triples manually, use the *combinations* parameter
        from the base class (might be deprecated soon). The options
        *revisions*, *search_revisions* and *combinations* can be
        freely mixed, but at least one of them must be given.

        Specify *test_suite* to set the benchmarks for experiment test
        runs. By default the first gripper task is used.

            IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])

        """

        if is_test_run():
            kwargs["environment"] = LocalEnvironment()
            suite = test_suite or self.DEFAULT_TEST_SUITE
        elif "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        kwargs.setdefault("combinations", [])

        if not any([revisions, search_revisions, kwargs["combinations"]]):
            raise ValueError('At least one of "revisions", "search_revisions" '
                             'or "combinations" must be given')

        if revisions:
            kwargs["combinations"].extend([
                (Translator(repo, rev), Preprocessor(repo,
                                                     rev), Planner(repo, rev))
                for rev in revisions
            ])

        if search_revisions:
            base_rev = search_revisions[0]
            # Use the same nick for all parts to get short revision nick.
            kwargs["combinations"].extend([(Translator(repo,
                                                       base_rev,
                                                       nick=rev),
                                            Preprocessor(repo,
                                                         base_rev,
                                                         nick=rev),
                                            Planner(repo, rev, nick=rev))
                                           for rev in search_revisions])

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        self._config_nicks = []
        for nick, config in configs.items():
            self.add_config(nick, config)

        self.add_suite(suite)
Beispiel #8
0
from common_setup import IssueConfig, IssueExperiment, is_test_run

REVS = ["issue551-base", "issue551-v4"]
BENCHMARKS = os.path.expanduser('~/downward-benchmarks')
SUITE = suites.suite_optimal()

CONFIGS = [
    IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
    IssueConfig("seq-opt-bjolp-ocp", [
        "--landmarks", "lm=lm_merged([lm_rhw(),lm_hm(m=1)])",
        "--heuristic", "hlm=lmcount(lm,admissible=true,optimal=true)",
        "--search",    "astar(hlm,mpd=true)"]),
]

ENVIRONMENT = MaiaEnvironment(
    priority=0, email="*****@*****.**")

if is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVS,
    configs=CONFIGS,
    environment=ENVIRONMENT
)

exp.add_suite(BENCHMARKS, SUITE)

exp.add_comparison_table_step()
exp.add_comparison_table_step(attributes=["memory","total_time", "search_time", "landmarks_generation_time"])
Beispiel #9
0
    def __init__(self, configs=None, grid_priority=None, path=None,
                 repo=None, revisions=None, search_revisions=None,
                 combinations=None, suite=None, do_test_run="auto",
                 test_suite=DEFAULT_TEST_SUITE, **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If "configs" is specified, it should be a dict of {nick:
        cmdline} pairs that sets the planner configurations to test.

        If "grid_priority" is specified and no environment is
        specifically requested in **kwargs, use the maia environment
        with the specified priority.

        If "path" is not specified, the experiment data path is
        derived automatically from the main script's filename.

        If "repo" is not specified, the repository base is derived
        automatically from the main script's path.

        If "combinations" is specified, it should be a non-empty list
        of revision triples of the form (translator_rev,
        preprocessor_rev, search_rev).

        If "revisions" is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search.

        If "search_revisions" is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All experiments use the
        translator and preprocessor component of the first
        revision.

        It is possible to specify a mixture of"combinations",
        "revisions" and "search_revisions".

        If "suite" is specified, it should specify a problem suite.

        If "do_test_run" is true, the "grid_priority" and
        "environment" (from the base class) arguments are ignored and
        a local experiment with default arguments is run instead. In
        this case, the "suite" argument is replaced by the "test_suite"
        argument.

        If "do_test_run" is the string "auto" (the default), then
        do_test_run is set to False when run on a grid machine and
        to True otherwise. A grid machine is identified as one whose
        node name ends with ".cluster".
        """

        if do_test_run == "auto":
            do_test_run = not is_on_grid()

        if do_test_run:
            # In a test run, overwrite certain arguments.
            grid_priority = None
            kwargs.pop("environment", None)
            suite = test_suite

        if grid_priority is not None and "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        combinations, self._combination_names = build_combos_with_names(
            repo=repo,
            combinations=combinations,
            revisions=revisions,
            search_revisions=search_revisions)
        kwargs["combinations"] = combinations

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        if configs is not None:
            for nick, config in configs.items():
                self.add_config(nick, config)

        if suite is not None:
            self.add_suite(suite)

        self._report_prefix = get_experiment_name()
Beispiel #10
0
    def __init__(self,
                 configs=None,
                 grid_priority=None,
                 path=None,
                 repo=None,
                 revisions=None,
                 search_revisions=None,
                 suite=None,
                 parsers=None,
                 **kwargs):
        """Create a DownwardExperiment with some convenience features.

        If "configs" is specified, it should be a dict of {nick:
        cmdline} pairs that sets the planner configurations to test.

        If "grid_priority" is specified and no environment is
        specifically requested in **kwargs, use the maia environment
        with the specified priority.

        If "path" is not specified, the experiment data path is
        derived automatically from the main script's filename.

        If "repo" is not specified, the repository base is derived
        automatically from the main script's path.

        If "revisions" is specified, it should be a non-empty
        list of revisions, which specify which planner versions to use
        in the experiment. The same versions are used for translator,
        preprocessor and search.

        If "search_revisions" is specified, it should be a non-empty
        list of revisions, which specify which search component
        versions to use in the experiment. All experiments use the
        translator and preprocessor component of the first
        revision.

        If "suite" is specified, it should specify a problem suite.

        If "parsers" is specified, it should be a list of paths to 
        parsers that should be run in addition to search_parser.py.

        Options "combinations" (from the base class), "revisions" and
        "search_revisions" are mutually exclusive."""

        if grid_priority is not None and "environment" not in kwargs:
            kwargs["environment"] = MaiaEnvironment(priority=grid_priority)

        if path is None:
            path = get_data_dir()

        if repo is None:
            repo = get_repo_base()

        num_rev_opts_specified = (int(revisions is not None) +
                                  int(search_revisions is not None) +
                                  int(kwargs.get("combinations") is not None))

        if num_rev_opts_specified > 1:
            raise ValueError('must specify exactly one of "revisions", '
                             '"search_revisions" or "combinations"')

        # See add_comparison_table_step for more on this variable.
        self._HACK_revisions = revisions

        if revisions is not None:
            if not revisions:
                raise ValueError("revisions cannot be empty")
            combinations = [(Translator(repo, rev), Preprocessor(repo, rev),
                             Planner(repo, rev)) for rev in revisions]
            kwargs["combinations"] = combinations

        if search_revisions is not None:
            if not search_revisions:
                raise ValueError("search_revisions cannot be empty")
            base_rev = search_revisions[0]
            translator = Translator(repo, base_rev)
            preprocessor = Preprocessor(repo, base_rev)
            combinations = [(translator, preprocessor, Planner(repo, rev))
                            for rev in search_revisions]
            kwargs["combinations"] = combinations

        self._additional_parsers = parsers or []

        DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)

        if configs is not None:
            for nick, config in configs.items():
                self.add_config(nick, config)

        if suite is not None:
            self.add_suite(suite)

        self._report_prefix = get_experiment_name()
    def __init__(self,
                 path=None,
                 repo=None,
                 environment=None,
                 combinations=None,
                 limits=None,
                 attributes=None,
                 derived_properties=None,
                 priority=0,
                 queue=None,
                 processes=2,
                 email=None,
                 cache_dir=CACHE_DIR,
                 **kwargs):
        if path is None:
            path = os.path.splitext(os.path.basename(sys.argv[0]))[0]

        expname = os.path.basename(path)

        remote_exppath = os.path.join(REMOTE_EXPS, path)
        local_exppath = os.path.join(LOCAL_EXPS, path)

        if REMOTE:
            exppath = remote_exppath
            repo = repo or REMOTE_REPO
            environment = environment or MaiaEnvironment(
                priority=priority, queue=queue, email=email)
        else:
            exppath = local_exppath
            repo = repo or LOCAL_REPO
            environment = environment or LocalEnvironment(processes=processes)

        DownwardExperiment.__init__(self,
                                    path=exppath,
                                    environment=environment,
                                    repo=repo,
                                    combinations=combinations,
                                    limits=limits,
                                    cache_dir=cache_dir,
                                    **kwargs)

        self.set_path_to_python(PYTHON)

        if attributes is None:
            attributes = ATTRIBUTES

        # Add report steps
        abs_report_file = os.path.join(self.eval_dir, '%s-abs.html' % expname)
        self.add_report(AbsoluteReport(attributes=attributes,
                                       colored=True,
                                       derived_properties=derived_properties),
                        name='report-abs',
                        outfile=abs_report_file)

        if REMOTE:
            # Compress the experiment directory
            self.add_step(Step.zip_exp_dir(self))
            self.add_step(
                Step('zip-eval-dir',
                     call, [
                         'tar', '-cjf', self.name + '-eval.tar.bz2',
                         self.name + '-eval'
                     ],
                     cwd=os.path.dirname(self.path)))

        self.add_step(Step.remove_exp_dir(self))
        self.add_step(
            Step('remove-eval-dir',
                 shutil.rmtree,
                 self.eval_dir,
                 ignore_errors=True))

        if not REMOTE:
            # Copy the results to local directory
            self.add_step(
                Step('scp-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval' % (SCP_LOGIN, remote_exppath),
                    '%s-eval' % local_exppath
                ]))

            # Copy the results to local directory
            self.add_step(
                Step('scp-zipped-eval-dir', call, [
                    'scp', '-r',
                    '%s:%s-eval.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s-eval.tar.bz2' % local_exppath
                ]))

            # Copy the zipped experiment directory to local directory
            self.add_step(
                Step('scp-exp-dir', call, [
                    'scp', '-r',
                    '%s:%s.tar.bz2' % (SCP_LOGIN, remote_exppath),
                    '%s.tar.bz2' % local_exppath
                ]))

        # Unzip the experiment directory
        self.add_step(Step.unzip_exp_dir(self))
        self.add_step(
            Step('unzip-eval-dir',
                 call, ['tar', '-xjf', self.name + '-eval.tar.bz2'],
                 cwd=os.path.dirname(self.path)))
Beispiel #12
0
        ['--search', 'astar(const(infinity))'],
        build_options=["debug32"],
        driver_options=["--build=debug32"],
    ),
    IssueConfig(
        'debug-astar-blind',
        ['--search', 'astar(blind())'],
        build_options=["debug32"],
        driver_options=["--build=debug32"],
    ),
]
SUITE = list(
    sorted(
        set(common_setup.DEFAULT_OPTIMAL_SUITE)
        | set(common_setup.DEFAULT_SATISFICING_SUITE)))
ENVIRONMENT = MaiaEnvironment(priority=0, email="*****@*****.**")

if common_setup.is_test_run():
    SUITE = IssueExperiment.DEFAULT_TEST_SUITE
    ENVIRONMENT = LocalEnvironment(processes=1)

exp = IssueExperiment(
    revisions=REVISIONS,
    configs=CONFIGS,
    environment=ENVIRONMENT,
)

exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py')
exp.add_command('sg-parser', ['{sg_parser}'])