Exemple #1
0
    def build(self, **kwargs):
        """Add Fast Downward code, runs and write everything to disk.

        This method is called by the second experiment step.

        """
        if not self._algorithms:
            logging.critical("You must add at least one algorithm.")

        # We convert the problems in suites to strings to avoid errors when converting
        # properties to JSON later. The clean but more complex solution would be to add
        # a method to the JSONEncoder that recognizes and correctly serializes the class
        # Problem.
        serialized_suites = {
            benchmarks_dir: [str(problem) for problem in benchmarks]
            for benchmarks_dir, benchmarks in self._suites.items()
        }
        self.set_property("suite", serialized_suites)
        self.set_property("algorithms", list(self._algorithms.keys()))

        self._cache_revisions()
        self._add_code()
        self._add_runs()

        Experiment.build(self, **kwargs)
    def build(self, stage, **kwargs):
        """Write the experiment to disk.

        Overriding methods cannot add resources or new files here, because we
        clear those lists in this method.
        """
        # Save the experiment stage in the properties
        self.set_property('stage', stage)
        self.set_property('suite', self.suites)
        self.set_property('algorithms', [algo.nick for algo in self._algorithms])
        self.set_property('repo', self.repo)
        self.set_property('default_limits', self.limits)

        self.runs = []
        self.new_files = []
        self.resources = []

        self._adapt_path(stage)
        self._setup_ignores(stage)
        self._checkout_and_compile(stage, **kwargs)

        if stage == 'preprocess':
            self._make_preprocess_runs()
        elif stage == 'search':
            self._make_search_runs()
        else:
            logging.critical('There is no stage "%s"' % stage)

        Experiment.build(self, **kwargs)
        self.path = self.orig_path
Exemple #3
0
    def build(self, stage, **kwargs):
        """Write the experiment to disk.

        Overriding methods cannot add resources or new files here, because we
        clear those lists in this method.
        """
        # Save the experiment stage in the properties
        self.set_property('stage', stage)
        self.set_property('suite', self.suites)
        self.set_property('algorithms', [algo.nick for algo in self._algorithms])
        self.set_property('repo', self.repo)
        self.set_property('default_limits', self.limits)

        self.runs = []
        self.new_files = []
        self.resources = []

        self._adapt_path(stage)
        self._setup_ignores(stage)
        self._checkout_and_compile(stage, **kwargs)

        if stage == 'preprocess':
            self._make_preprocess_runs()
        elif stage == 'search':
            self._make_search_runs()
        else:
            logging.critical('There is no stage "%s"' % stage)

        Experiment.build(self, **kwargs)
        self.path = self.orig_path
Exemple #4
0
    def run(self, stage):
        """Run the specified experiment stage.

        *stage* can be "preprocess" or "search".

        """
        self._adapt_path(stage)
        Experiment.run(self)
        self.path = self.orig_path
    def run(self, stage):
        """Run the specified experiment stage.

        *stage* can be "preprocess" or "search".

        """
        self._adapt_path(stage)
        Experiment.run(self)
        self.path = self.orig_path
    def build(self, **kwargs):
        """Add Fast Downward code, runs and write everything to disk.

        This method is called by the second experiment step.

        """
        if not self._algorithms:
            logging.critical('You must add at least one algorithm.')

        self.set_property('suite', self._suites)
        self.set_property('algorithms', self._algorithms.keys())

        self._cache_revisions()
        self._add_code()
        self._add_runs()

        Experiment.build(self, **kwargs)
Exemple #7
0
    def build(self, **kwargs):
        """Add Prost code, runs and write everything to disk.

        This method is called by the second experiment step.

        """
        if not self.configs:
            logging.critical("You must add at least one config.")

        self.set_property("algorithms", list(self.configs.keys()))
        self.set_property("num_runs", self.num_runs)
        self.set_property("time_per_step", self.time_per_step)
        self.set_property("rddlsim_seed", self.rddlsim_seed)
        self.set_property("initial_port", self.initial_port)
        self.set_property("rddlsim_enforces_runtime",
                          self.rddlsim_enforces_runtime)

        self._cache_revisions()
        self._add_code()
        self._add_runs()

        Experiment.build(self, **kwargs)
    def __init__(self, path=None, environment=None, revision_cache=None):
        """
        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        *revision_cache* is the directory for caching Fast Downward
        revisions. It defaults to ``<scriptdir>/data/revision-cache``.
        This directory can become very large since each revision uses
        about 30 MB.

        >>> from lab.environments import BaselSlurmEnvironment
        >>> env = BaselSlurmEnvironment(email="*****@*****.**")
        >>> exp = FastDownwardExperiment(environment=env)

        You can add parsers with :meth:`.add_parser()`. See
        :ref:`parsing` for how to write custom parsers and
        :ref:`downward-parsers` for the list of built-in parsers. Which
        parsers you should use depends on the algorithms you're running.
        For single-search experiments, we recommend adding the following
        parsers in this order:

        >>> exp.add_parser(exp.EXITCODE_PARSER)
        >>> exp.add_parser(exp.TRANSLATOR_PARSER)
        >>> exp.add_parser(exp.SINGLE_SEARCH_PARSER)
        >>> exp.add_parser(exp.PLANNER_PARSER)

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), 'revision-cache')

        self._suites = defaultdict(list)

        # Use OrderedDict to ensure that names are unique and ordered.
        self._algorithms = OrderedDict()

        self.add_command('remove-output-sas', ['rm', '-f', 'output.sas'])
    def __init__(self, path=None, environment=None, revision_cache=None):
        """
        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        *revision_cache* is the directory for caching Fast Downward
        revisions. It defaults to ``<scriptdir>/data/revision-cache``.
        This directory can become very large since each revision uses
        about 30 MB.

        >>> from lab.environments import MaiaEnvironment
        >>> env = MaiaEnvironment(priority=-2)
        >>> exp = FastDownwardExperiment(environment=env)

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), 'revision-cache')

        self._suites = defaultdict(list)

        # Use OrderedDict to ensure that names are unique and ordered.
        self._algorithms = OrderedDict()
Exemple #10
0
# That is the hacky part: To avoid changing Lab, we *delete* the experiment path
# from the arguments list!
del sys.argv[1]

if not os.path.isdir(EXP_PATH):
    print("Please define a valid experiment path.")
    exit(1)

suffix = ""
if EXP_PATH.startswith("results/prost_"):
    suffix = "_{}".format(EXP_PATH[14:])
    if suffix[-1] == "/":
        suffix = suffix[:-1]

# Create a new experiment.
exp = Experiment(path=EXP_PATH)

# Add Prost parser.
exp.add_parser("parser.py")
exp.add_parse_again_step()

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Make a basic table report with IPC scores.
ipc_scores = IPCScores()

exp.add_report(
    ProstBaseReport(
        attributes=ATTRIBUTES, filter=[ipc_scores.store_rewards, ipc_scores.add_score]
Exemple #11
0
    if 'total_time' in run:
        if run['total_time'] > 1780:
            run['ground'] = 0
            run['total_time'] = None
    return run


def create_same_attr(run):
    if 'translator_time_computing_model' in run:
        run['total_time'] = run['translator_time_computing_model']
    return run


def domain_as_category(run1, run2):
    # run2['domain'] has the same value, because we always
    # compare two runs of the same problem.
    return run1['domain']


exp = Experiment('data/combined-htg')
exp.add_fetcher('data/htg-eval', name='term-class')
exp.add_fetcher(
    '/home/blaas/work/projects/grounder/experiments/first-experiment/data/htg-eval',
    name='master')

exp.add_report(BaseReport(attributes=['total_time'],
                          filter=[remove_timeouts, create_same_attr]),
               outfile='htg.html')

exp.run_steps()
Exemple #12
0
PROPERTIES = {
    "ff-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "ff",
        "coverage": 1,
        "expansions": 1234,
    },
    "blind-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "blind",
        "coverage": 1,
        "expansions": 6543,
    },
}


def write_properties(eval_dir):
    tools.makedirs(eval_dir)
    with open(os.path.join(eval_dir, "properties"), "w") as f:
        json.dump(PROPERTIES, f)


# Create new experiment. The file <EXP_DIR>-eval/properties must exist.
exp = Experiment(EXP_DIR)
exp.add_report(AbsoluteReport(attributes=["coverage", "expansions"]))

write_properties(exp.eval_dir)
exp.run_steps()
Exemple #13
0
import project

ATTRIBUTES = [
    "error",
    "run_dir",
    "planner_time",
    "initial_h_value",
    "coverage",
    "cost",
    "evaluations",
    "memory",
    project.EVALUATIONS_PER_TIME,
]

exp = Experiment()
exp.add_step("remove-combined-properties", project.remove_file,
             Path(exp.eval_dir) / "properties")

project.fetch_algorithm(exp,
                        "2020-09-11-A-cg-vs-ff",
                        "20.06:01-cg",
                        new_algo="cg")
project.fetch_algorithm(exp,
                        "2020-09-11-A-cg-vs-ff",
                        "20.06:02-ff",
                        new_algo="ff")

filters = [project.add_evaluations_per_time]

project.add_absolute_report(exp,
Exemple #14
0
#! /usr/bin/env python
"""
Example lab experiment that approximates the number pi.

This file contains the simplest version of the experiment where a basic
approximation is not calculated, but simply printed.

You can find a more advanced experiment in pi-ext.py .
"""

from lab.experiment import Experiment

EXPPATH = 'exp-pi'

exp = Experiment(EXPPATH)

run = exp.add_run()
run.add_command('calc-pi', ['echo', 'Pi:', '3.14'])
run.set_property('id', ['echo-1'])

exp()
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        #if not repo or not os.path.isdir(repo):
        #    logging.critical('The path "%s" is not a local Fast Downward '
        #                     'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/sternron/gal-dreiman/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
Exemple #16
0
# That is the hacky part: To avoid changing Lab, we *delete* the experiment path
# from the arguments list!
del sys.argv[1]

if not os.path.isdir(EXP_PATH):
    print("Please define a valid experiment path.")
    exit(1)

suffix = ""
if EXP_PATH.startswith("results/prost_"):
    suffix = "_{}".format(EXP_PATH[14:])
    if suffix[-1] == "/":
        suffix = suffix[:-1]

# Create a new experiment.
exp = Experiment(path=EXP_PATH)

# Add Prost parser.
exp.add_parser("parser.py")
exp.add_parse_again_step()

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name="fetch")

# Make a basic table report with IPC scores.
ipc_scores = IPCScores()

exp.add_report(
    ProstBaseReport(attributes=ATTRIBUTES,
                    filter=[ipc_scores.store_rewards, ipc_scores.add_score]),
Exemple #17
0
SUITE = [
    "grid", "gripper:prob01.pddl", "miconic:s1-0.pddl", "mystery:prob07.pddl"
]
ATTRIBUTES = [
    "coverage",
    "error",
    "evaluations",
    "plan",
    "times",
    "trivially_unsolvable",
]
TIME_LIMIT = 1800
MEMORY_LIMIT = 2048

# Create a new experiment.
exp = Experiment(environment=ENV)
# Add custom parser for FF.
exp.add_parser("ff-parser.py")

for task in suites.build_suite(BENCHMARKS_DIR, SUITE):
    run = exp.add_run()
    # Create symbolic links and aliases. This is optional. We
    # could also use absolute paths in add_command().
    run.add_resource("domain", task.domain_file, symlink=True)
    run.add_resource("problem", task.problem_file, symlink=True)
    # 'ff' binary has to be on the PATH.
    # We could also use exp.add_resource().
    run.add_command(
        "run-planner",
        ["ff", "-o", "{domain}", "-f", "{problem}"],
        time_limit=TIME_LIMIT,
Exemple #18
0
from lab.experiment import Experiment

parser = argparse.ArgumentParser('Test arguments')

parser.add_argument('--n_estimators', type=int, dest='n_estimators')
args = parser.parse_args()

n_estimators = args.n_estimators

if n_estimators is None:
    n_estimators = 100
    max_depth = 2

if __name__ == "__main__":
    e = Experiment(dataset='iris_75')

    @e.start_run
    def train():
        iris = datasets.load_iris()
        X = iris.data
        y = iris.target

        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.25,
                                                            random_state=42)

        e.log_features(
            ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])
        clf = RandomForestClassifier(n_estimators=n_estimators)
"""

import os

from lab.experiment import Experiment
from lab.environments import LocalEnvironment
from lab.experiment import Step
from lab.reports import Report


EXPNAME = 'simple-exp'
EXPPATH = os.path.join('/tmp', EXPNAME)
ENV = LocalEnvironment()

# Create a new experiment.
exp = Experiment(path=EXPPATH, environment=ENV)
exp.add_resource('SIMPLE_PARSER', 'simple-parser.py', 'simple-parser.py')
reportfile = os.path.join(exp.eval_dir, EXPNAME + '.html')

run = exp.add_run()
run.add_command('list-dir', ['ls', '-l'])
# Every run has to have an id in the form of a list.
run.set_property('id', ['current-dir'])
run.require_resource('SIMPLE_PARSER')
run.add_command('parse', ['SIMPLE_PARSER'])

# Make a default report.
exp.add_report(Report(attributes=['number_of_files', 'first_number']),
               outfile=reportfile)

# Compress the experiment directory.
Exemple #20
0
    SUITE = BHOSLIB_GRAPHS + RANDOM_GRAPHS
else:
    ENV = LocalEnvironment(processes=2)
    # Use smaller suite for local tests.
    SUITE = BHOSLIB_GRAPHS[:1] + RANDOM_GRAPHS[:1]
ATTRIBUTES = [
    "cover",
    "cover_size",
    "error",
    "solve_time",
    "solver_exit_code",
    Attribute("solved", absolute=True),
]

# Create a new experiment.
exp = Experiment(environment=ENV)
# Add solver to experiment and make it available to all runs.
exp.add_resource("solver", os.path.join(SCRIPT_DIR, "solver.py"))
# Add custom parser.
exp.add_parser("parser.py")

for algo in ALGORITHMS:
    for task in SUITE:
        run = exp.add_run()
        # Create a symbolic link and an alias. This is optional. We
        # could also use absolute paths in add_command().
        run.add_resource("task", task, symlink=True)
        run.add_command(
            "solve",
            ["{solver}", "--seed",
             str(SEED), "{task}", algo],
Exemple #21
0
from lab.experiment import Experiment
from lab.steps import Step
from lab.reports import Report


EXPPATH = 'exp-pi'

class PiReport(Report):
    def get_text(self):
        lines = []
        for run_id, run in self.props.items():
            lines.append('%s %s' % (run['time'], run['diff']))
        return '\n'.join(lines)

exp = Experiment(EXPPATH)
exp.add_resource('PARSER', 'pi-parser-ext.py', 'pi-parser.py')
exp.add_resource('CALC', 'calculate.py', 'calculate.py')

for rounds in [1, 5, 10, 50, 100, 500, 1000, 5000, 10000]:
    run = exp.add_run()
    run.require_resource('PARSER')
    run.require_resource('CALC')
    run.add_command('calc-pi', ['CALC', rounds], time_limit=10, mem_limit=1024)
    run.add_command('parse-pi', ['PARSER'])
    run.set_property('id', ['calc-%d' % rounds])

def good(run):
    return run['diff'] <= 0.01

exp.add_step(Step('report', Report(format='html', attributes=['pi', 'diff'],
Exemple #22
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from lab.experiment import Experiment
from lab.steps import Step
from downward.reports.compare import CompareConfigsReport
from common_setup import get_experiment_name, get_data_dir, get_repo_base

import os

DATADIR = os.path.join(os.path.dirname(__file__), 'data')

exp = Experiment(get_data_dir())

exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'),
                filter_config_nick="astar_pho_seq_no_onesafe")
exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'),
                filter_config_nick="astar_occ_seq")

exp.add_report(
    CompareConfigsReport(
        [
            ('869fec6f843b-astar_pho_seq_no_onesafe',
             'issue527-v2-astar_occ_seq'),
        ],
        attributes=[
            'coverage',
            'total_time',
            'expansions',
            'evaluations',
            'generated',
Exemple #23
0
        extra_options='#SBATCH --cpus-per-task=3',
        setup="%s\n%s" %
        (BaselSlurmEnvironment.DEFAULT_SETUP,
         "source /infai/blaas/virtualenvs/grounder/bin/activate\n"),
        export=["PATH", "DOWNWARD_BENCHMARKS", "POWER_LIFTED_DIR"])
else:
    SUITE = ['organic-synthesis-alkene:p2.pddl']
    ENV = LocalEnvironment(processes=4)

TIME_LIMIT = 1800
MEMORY_LIMIT = 16384

ATTRIBUTES = ['atoms', 'grounding_time', 'parsing_time', 'total_time']

# Create a new experiment.
exp = Experiment(environment=ENV)

# Add custom parser for Power Lifted.
exp.add_parser('parser.py')

NEW_GROUNDER_CONFIGS = [Configuration('new-grounder', [])]
FD_CONFIGS = [Configuration('fd-grounder', [])]

# Create one run for each instance and each configuration
for config in NEW_GROUNDER_CONFIGS:
    for task in suites.build_suite(BENCHMARKS_DIR, SUITE):
        run = exp.add_run()
        run.add_resource('domain', task.domain_file, symlink=True)
        run.add_resource('problem', task.problem_file, symlink=True)
        run.add_command(
            'run-search',
Exemple #24
0
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784, )))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

e = Experiment()


@e.start_run
def train():

    # Create a temporary directory for tensorboard logs
    output_dir = tempfile.mkdtemp()
    print("Writing TensorBoard events locally to %s\n" % output_dir)
    tensorboard = TensorBoard(log_dir=output_dir)

    # During Experiment execution, tensorboard can be viewed through:
    # tensorboard --logdir=[output_dir]

    model.fit(x_train,
              y_train,
Exemple #25
0
from lab.experiment import Experiment

from downward.reports import PlanningReport
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport

import common_setup

DIR = os.path.dirname(os.path.abspath(__file__))
ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2",
                                    email="*****@*****.**")

if common_setup.is_test_run():
    ENVIRONMENT = LocalEnvironment(processes=4)

exp = Experiment()


class TranslatorDiffReport(PlanningReport):
    def get_cell(self, run):
        return ";".join(run.get(attr) for attr in self.attributes)

    def get_text(self):
        lines = []
        for runs in self.problem_runs.values():
            hashes = set([r.get("translator_output_sas_hash") for r in runs])
            if len(hashes) > 1 or None in hashes:
                lines.append(";".join([self.get_cell(r) for r in runs]))
        return "\n".join(lines)

Exemple #26
0

def get_valid(run):
    invalid_domains = ['ged-opt14-strips',
                       'ged-sat14-strips',
                       'storage',
                       'tidybot-opt11-strips',
                       'tidybot-opt14-strips',
                       'tidybot-sat11-strips']
    if run['domain'] in invalid_domains:
        return False
    else:
        return True


exp = Experiment('/home/blaas/work/projects/grounder/experiments/combine-with-clingo/data/ipc')
exp.add_fetcher('/home/blaas/work/projects/asp-grounding-planning/experiments/clingo-exp/data/ipc-eval',
                name='clingo')
exp.add_fetcher('/home/blaas/work/projects/grounder/experiments/first-experiment/data/ipc-eval',
                name='new-grounder-and-fd-grounder')
exp.add_report(BaseReport(attributes=['total_time'],
                           filter=[remove_timeouts, create_same_attr]),
               outfile='ipc.html')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'clingo'],
                           filter=[remove_timeouts, create_same_attr, get_valid],
                           scale='symlog',
                           format='tex'),
               outfile='ipc-new-grounder-vs-clingo.tex')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'fd-grounder'],
Exemple #27
0
    def __init__(
        self,
        suites,
        num_runs=30,
        time_per_step=1.0,
        initial_port=2000,
        rddlsim_seed=0,
        rddlsim_enforces_runtime=False,
        revision_cache=None,
        time_buffer=300,
        memory_limit=int(3.5 * 1024),
        soft_stdout_limit=10 * 1024,
        hard_stdout_limit=20 * 1024,
        soft_stderr_limit=64,
        hard_stderr_limit=10 * 1024,
        path=None,
        environment=None,
    ):
        """
        *suites* is a list of :class:'prostlab.suites.Problem' objects that describes
        the set of benchmarks used in the experiment.

        *num_runs* is the number of times each algorithm is executed on each instance.

        *time_per_step* is the time in seconds each algorithm has per step. A total time
        per instance is computed from this, the *num_runs*, the instance horizon and the 
        *time_buffer*.

        *initial_port* is the first port that is used for TCP/IP communication between
        an algorithm and rddlsim. 

        *rddlsim_seed* is the value with which rddlsim is seeded.

        If *rddlsim_enforces_runtime* is True, rddlsim terminates after the time that is
        computed as the product of *num_runs*, *time_per_step* and the instance horizon.

        *revision_cache* is the directory for caching Prost revisions. It defaults to 
        ``<scriptdir>/data/revision-cache``.

        *time_buffer* is the time that is allows in addtion to the product of *num_runs*, 
        *time_per_step* and the instance horizon. This must include the time the parser
        requires.

        *memory_limit* is the hard memory limit in MiB. *memory_limit* - 512 MiB is
        furthermore passed as a (soft) memory limit to Prost.

        *soft_stdout_limit*, *hard_stdout_limit*, *soft_stderr_limit* and 
        *hard_stderr_limit* limit the amount of data each experiment may write to disk,

        See :class:`lab.experiment.Experiment` for an explanation of
        the *path* and *environment* parameters.

        >>> from lab.environments import BaselSlurmEnvironment
        >>> env = BaselSlurmEnvironment(email="*****@*****.**")
        >>> exp = ProstExperiment(environment=env)

        You can add parsers with :meth:`.add_parser()`. See
        :ref:`parsing` for how to write custom parsers.

        """
        Experiment.__init__(self, path=path, environment=environment)

        self.suites = suites
        self.num_runs = num_runs
        self.time_per_step = time_per_step
        self.initial_port = initial_port
        self.rddlsim_seed = rddlsim_seed
        self.rddlsim_enforces_runtime = rddlsim_enforces_runtime

        self.revision_cache = revision_cache or os.path.join(
            get_default_data_dir(), "revision-cache")

        self.time_buffer = time_buffer
        self.memory_limit = memory_limit
        self.soft_stdout_limit = soft_stdout_limit
        self.hard_stdout_limit = hard_stdout_limit
        self.soft_stderr_limit = soft_stderr_limit
        self.hard_stderr_limit = hard_stderr_limit

        # Use OrderedDict to ensure that names are unique and ordered.
        self.configs = OrderedDict()
#SUITE = ['zenotravel:p01.pddl', 'zenotravel:p06.pddl', 'logistics00:probLOGISTICS-4-0.pddl']

#SUITE = ['organic-synthesis-split-opt18-strips:p01.pddl']

ATTRIBUTES = ['coverage', 'found_plans', 'total_time', 'num_iterations', 'plan_files', 'all_plan_costs', 'min_plan_cost', 'max_plan_cost', 'actual_cost_bound']

config_name = 'features' 
config_date = '2019-11-04'
report_name = '%s-%s' % (config_name,config_date)

planner_name = os.path.join(get_base_dir(), 'extract_planning_features.py')
   
ENV = OracleGridEngineEnvironment(queue='all.q')

# Create a new experiment.
exp = Experiment(environment=ENV)
# Add built-in parsers.
#exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
#exp.add_parser(exp.LAB_DRIVER_PARSER)
#exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
#exp.add_parser(exp.SINGLE_SEARCH_PARSER)
#exp.add_parser(exp.PLANNER_PARSER)

# Add custom parser.
#exp.add_parser('topq-iterative-parser.py')

def add_exp():
    for task in suites.build_suite(BENCHMARKS_DIR, SUITE):
        run = exp.add_run()
        # Create symbolic links and aliases. This is optional. We
Exemple #29
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from lab.experiment import Experiment
from lab.steps import Step
from downward.reports.compare import CompareConfigsReport
from common_setup import get_experiment_name, get_data_dir, get_repo_base

import os

DATADIR = os.path.join(os.path.dirname(__file__), 'data')

exp = Experiment(get_data_dir())

exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'), filter_config_nick="astar_pho_seq_no_onesafe")
exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'), filter_config_nick="astar_occ_seq")

exp.add_report(CompareConfigsReport(
    [
     ('869fec6f843b-astar_pho_seq_no_onesafe', 'issue527-v2-astar_occ_seq'),
    ],
    attributes=[
                'coverage',
                'total_time',
                'expansions',
                'evaluations',
                'generated',
                'expansions_until_last_jump',
                'error',
                ],
    )
Exemple #30
0
#! /usr/bin/env python
"""
Example lab experiment that approximates the number pi.

This file contains the simplest version of the experiment where a basic
approximation is not calculated, but simply printed.

You can find a more advanced experiment in pi-ext.py .
"""

from lab.experiment import Experiment


EXPPATH = 'exp-pi'

exp = Experiment(EXPPATH)

run = exp.add_run()
run.add_command('calc-pi', ['echo', 'Pi:', '3.14'])
run.set_property('id', ['echo-1'])

exp()
Exemple #31
0
MEMORY_LIMIT = 2048

if REMOTE:
    ENV = BaselSlurmEnvironment(email="*****@*****.**")
    SUITE = BHOSLIB_GRAPHS + RANDOM_GRAPHS
else:
    ENV = LocalEnvironment(processes=4)
    # Use smaller suite for local tests.
    SUITE = BHOSLIB_GRAPHS[:1] + RANDOM_GRAPHS[:1]
ATTRIBUTES = [
    'cover', 'cover_size', 'error', 'solve_time', 'solver_exit_code',
    Attribute('solved', absolute=True)
]

# Create a new experiment.
exp = Experiment(environment=ENV)
# Add solver to experiment and make it available to all runs.
exp.add_resource('solver', os.path.join(SCRIPT_DIR, 'solver.py'))
# Add custom parser.
exp.add_parser('parser.py')

for algo in ALGORITHMS:
    for task in SUITE:
        run = exp.add_run()
        # Create a symbolic link and an alias. This is optional. We
        # could also use absolute paths in add_command().
        run.add_resource('task', task, symlink=True)
        run.add_command('solve',
                        ['{solver}', '--seed',
                         str(SEED), '{task}', algo],
                        time_limit=TIME_LIMIT,
PROPERTIES = {
    "ff-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "ff",
        "coverage": 1,
        "expansions": 1234,
    },
    "blind-gripper-prob01.pddl": {
        "domain": "gripper",
        "problem": "prob01.pddl",
        "algorithm": "blind",
        "coverage": 1,
        "expansions": 6543,
    },
}


def write_properties(eval_dir):
    tools.makedirs(eval_dir)
    with open(os.path.join(eval_dir, 'properties'), 'w') as f:
        json.dump(PROPERTIES, f)


# Create new experiment. The file <EXP_DIR>-eval/properties must exist.
exp = Experiment(EXP_DIR)
exp.add_report(AbsoluteReport(attributes=['coverage', 'expansions']))

write_properties(exp.eval_dir)
exp.run_steps()
Exemple #33
0
    TIME_LIMIT = 1800
else:
    SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "mystery:prob07.pddl"]
    ENVIRONMENT = LocalEnvironment(processes=2)
    TIME_LIMIT = 5

ATTRIBUTES = [
    "cost",
    "coverage",
    "error",
    "g_values_over_time",
    "run_dir",
    "runtime",
]

exp = Experiment(environment=ENVIRONMENT)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_parser(os.path.join(DIR, "singularity-parser.py"))


def get_image(name):
    planner = name.replace("-", "_")
    image = os.path.join(IMAGES_DIR, name + ".img")
    assert os.path.exists(image), image
    return planner, image


IMAGES = [get_image("fd1906-lama-first")]
Exemple #34
0
from lab.experiment import Experiment
from lab.environments import BaselSlurmEnvironment
import os
ENV = BaselSlurmEnvironment(partition="infai_1",
                            email="*****@*****.**",
                            memory_per_cpu="6354M")
DIR = os.path.dirname(os.path.abspath(__file__))
exp = Experiment(environment=ENV)
exp.add_resource("solver", os.path.join(DIR, "hardInstanceSearch"))
for i in [13, 16, 21, 48, 55, 59, 81, 98]:
    run = exp.add_run()
    run.add_resource("PDB_1",
                     os.path.join(DIR,
                                  "STP(4,4)-0-0;11;12;13;14;15-8bpe-lex.pdb"),
                     symlink=True)
    run.add_resource("PDB_2",
                     os.path.join(DIR,
                                  "STP(4,4)-0-0;1;2;3;4;5;6;7-8bpe-lex.pdb"),
                     symlink=True)
    run.add_resource("PDB_3",
                     os.path.join(DIR, "STP(4,4)-0-0;8;9;12;13-8bpe-lex.pdb"),
                     symlink=True)
    run.add_command("solve", ["{solver}", str(i)], 1800, 6354)
    run.set_property("id", [str(i)])
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.run_steps()
Exemple #35
0
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        if not repo or not os.path.isdir(repo):
            logging.critical('The path "%s" is not a local Fast Downward '
                             'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/gal-d/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')