Esempio n. 1
0
    def __init__(self, path=None, environment=None, revision_cache=None):
        """
        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        *revision_cache* is the directory for caching Fast Downward
        revisions. It defaults to ``<scriptdir>/data/revision-cache``.
        This directory can become very large since each revision uses
        about 30 MB.

        >>> from lab.environments import BaselSlurmEnvironment
        >>> env = BaselSlurmEnvironment(email="*****@*****.**")
        >>> exp = FastDownwardExperiment(environment=env)

        You can add parsers with :meth:`.add_parser()`. See
        :ref:`parsing` for how to write custom parsers and
        :ref:`downward-parsers` for the list of built-in parsers. Which
        parsers you should use depends on the algorithms you're running.
        For single-search experiments, we recommend adding the following
        parsers in this order:

        >>> exp.add_parser(exp.EXITCODE_PARSER)
        >>> exp.add_parser(exp.TRANSLATOR_PARSER)
        >>> exp.add_parser(exp.SINGLE_SEARCH_PARSER)
        >>> exp.add_parser(exp.PLANNER_PARSER)

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), 'revision-cache')

        self._suites = defaultdict(list)

        # Use OrderedDict to ensure that names are unique and ordered.
        self._algorithms = OrderedDict()

        self.add_command('remove-output-sas', ['rm', '-f', 'output.sas'])
    def __init__(self, path=None, environment=None, revision_cache=None):
        """
        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        *revision_cache* is the directory for caching Fast Downward
        revisions. It defaults to ``<scriptdir>/data/revision-cache``.
        This directory can become very large since each revision uses
        about 30 MB.

        >>> from lab.environments import MaiaEnvironment
        >>> env = MaiaEnvironment(priority=-2)
        >>> exp = FastDownwardExperiment(environment=env)

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), 'revision-cache')

        self._suites = defaultdict(list)

        # Use OrderedDict to ensure that names are unique and ordered.
        self._algorithms = OrderedDict()
Esempio n. 3
0
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        #if not repo or not os.path.isdir(repo):
        #    logging.critical('The path "%s" is not a local Fast Downward '
        #                     'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/sternron/gal-dreiman/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
Esempio n. 4
0
    def __init__(
        self,
        suites,
        num_runs=30,
        time_per_step=1.0,
        initial_port=2000,
        rddlsim_seed=0,
        rddlsim_enforces_runtime=False,
        revision_cache=None,
        time_buffer=300,
        memory_limit=int(3.5 * 1024),
        soft_stdout_limit=10 * 1024,
        hard_stdout_limit=20 * 1024,
        soft_stderr_limit=64,
        hard_stderr_limit=10 * 1024,
        path=None,
        environment=None,
    ):
        """
        *suites* is a list of :class:'prostlab.suites.Problem' objects that describes
        the set of benchmarks used in the experiment.

        *num_runs* is the number of times each algorithm is executed on each instance.

        *time_per_step* is the time in seconds each algorithm has per step. A total time
        per instance is computed from this, the *num_runs*, the instance horizon and the 
        *time_buffer*.

        *initial_port* is the first port that is used for TCP/IP communication between
        an algorithm and rddlsim. 

        *rddlsim_seed* is the value with which rddlsim is seeded.

        If *rddlsim_enforces_runtime* is True, rddlsim terminates after the time that is
        computed as the product of *num_runs*, *time_per_step* and the instance horizon.

        *revision_cache* is the directory for caching Prost revisions. It defaults to 
        ``<scriptdir>/data/revision-cache``.

        *time_buffer* is the time that is allows in addtion to the product of *num_runs*, 
        *time_per_step* and the instance horizon. This must include the time the parser
        requires.

        *memory_limit* is the hard memory limit in MiB. *memory_limit* - 512 MiB is
        furthermore passed as a (soft) memory limit to Prost.

        *soft_stdout_limit*, *hard_stdout_limit*, *soft_stderr_limit* and 
        *hard_stderr_limit* limit the amount of data each experiment may write to disk,

        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        >>> from lab.environments import BaselSlurmEnvironment
        >>> env = BaselSlurmEnvironment(email="*****@*****.**")
        >>> exp = ProstExperiment(environment=env)

        You can add parsers with :meth:`.add_parser()`. See
        :ref:`parsing` for how to write custom parsers.

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.suites = suites
        self.num_runs = num_runs
        self.time_per_step = time_per_step
        self.initial_port = initial_port
        self.rddlsim_seed = rddlsim_seed
        self.rddlsim_enforces_runtime = rddlsim_enforces_runtime

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), "revision-cache")

        self.time_buffer = time_buffer
        self.memory_limit = memory_limit
        self.soft_stdout_limit = soft_stdout_limit
        self.hard_stdout_limit = hard_stdout_limit
        self.soft_stderr_limit = soft_stderr_limit
        self.hard_stderr_limit = hard_stderr_limit

        # Use OrderedDict to ensure that names are unique and ordered.
        self.configs = OrderedDict()
Esempio n. 5
0
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        if not repo or not os.path.isdir(repo):
            logging.critical('The path "%s" is not a local Fast Downward '
                             'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/gal-d/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')