예제 #1
0
    for task in suites.build_suite(BENCHMARKS_DIR, SUITE):
        run = exp.add_run()
        run.add_resource('domain', task.domain_file, symlink=True)
        run.add_resource('problem', task.problem_file, symlink=True)
        run.add_command('run-search', [
            RUN_SCRIPT_DIR + '/src/translate/build_model.py', task.domain_file,
            task.problem_file
        ],
                        time_limit=TIME_LIMIT,
                        memory_limit=MEMORY_LIMIT)
        run.set_property('domain', task.domain)
        run.set_property('problem', task.problem)
        run.set_property('algorithm', config.name)
        run.set_property('id', [config.name, task.domain, task.problem])

        # Add step that writes experiment files to disk.
exp.add_step('build', exp.build)

# Add step that executes all runs.
exp.add_step('start', exp.start_runs)

# Add step that collects properties from run directories and
# writes them to *-eval/properties.
exp.add_fetcher(name='fetch')

# Make a report.
exp.add_report(BaseReport(attributes=ATTRIBUTES), outfile='report.html')

# Parse the commandline and run the specified steps.
exp.run_steps()
예제 #2
0
from lab.experiment import Experiment
from lab.environments import BaselSlurmEnvironment
import os
ENV = BaselSlurmEnvironment(partition="infai_1",
                            email="*****@*****.**",
                            memory_per_cpu="6354M")
DIR = os.path.dirname(os.path.abspath(__file__))
exp = Experiment(environment=ENV)
exp.add_resource("solver", os.path.join(DIR, "hardInstanceSearch"))
for i in [13, 16, 21, 48, 55, 59, 81, 98]:
    run = exp.add_run()
    run.add_resource("PDB_1",
                     os.path.join(DIR,
                                  "STP(4,4)-0-0;11;12;13;14;15-8bpe-lex.pdb"),
                     symlink=True)
    run.add_resource("PDB_2",
                     os.path.join(DIR,
                                  "STP(4,4)-0-0;1;2;3;4;5;6;7-8bpe-lex.pdb"),
                     symlink=True)
    run.add_resource("PDB_3",
                     os.path.join(DIR, "STP(4,4)-0-0;8;9;12;13-8bpe-lex.pdb"),
                     symlink=True)
    run.add_command("solve", ["{solver}", str(i)], 1800, 6354)
    run.set_property("id", [str(i)])
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.run_steps()
예제 #3
0
def get_valid(run):
    invalid_domains = ['ged-opt14-strips',
                       'ged-sat14-strips',
                       'storage',
                       'tidybot-opt11-strips',
                       'tidybot-opt14-strips',
                       'tidybot-sat11-strips']
    if run['domain'] in invalid_domains:
        return False
    else:
        return True


exp = Experiment('/home/blaas/work/projects/grounder/experiments/combine-with-clingo/data/ipc')
exp.add_fetcher('/home/blaas/work/projects/asp-grounding-planning/experiments/clingo-exp/data/ipc-eval',
                name='clingo')
exp.add_fetcher('/home/blaas/work/projects/grounder/experiments/first-experiment/data/ipc-eval',
                name='new-grounder-and-fd-grounder')
exp.add_report(BaseReport(attributes=['total_time'],
                           filter=[remove_timeouts, create_same_attr]),
               outfile='ipc.html')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'clingo'],
                           filter=[remove_timeouts, create_same_attr, get_valid],
                           scale='symlog',
                           format='tex'),
               outfile='ipc-new-grounder-vs-clingo.tex')
exp.add_report(ScatterPlotReport(attributes=['total_time'],
                           filter_algorithm=['new-grounder', 'fd-grounder'],
                           filter=[remove_timeouts, create_same_attr, get_valid],
                           scale='symlog',
예제 #4
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from lab.experiment import Experiment
from lab.steps import Step
from downward.reports.compare import CompareConfigsReport
from common_setup import get_experiment_name, get_data_dir, get_repo_base

import os

DATADIR = os.path.join(os.path.dirname(__file__), 'data')

exp = Experiment(get_data_dir())

exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'),
                filter_config_nick="astar_pho_seq_no_onesafe")
exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'),
                filter_config_nick="astar_occ_seq")

exp.add_report(
    CompareConfigsReport(
        [
            ('869fec6f843b-astar_pho_seq_no_onesafe',
             'issue527-v2-astar_occ_seq'),
        ],
        attributes=[
            'coverage',
            'total_time',
            'expansions',
            'evaluations',
            'generated',
예제 #5
0
    if 'total_time' in run:
        if run['total_time'] > 1780:
            run['ground'] = 0
            run['total_time'] = None
    return run


def create_same_attr(run):
    if 'translator_time_computing_model' in run:
        run['total_time'] = run['translator_time_computing_model']
    return run


def domain_as_category(run1, run2):
    # run2['domain'] has the same value, because we always
    # compare two runs of the same problem.
    return run1['domain']


exp = Experiment('data/combined-htg')
exp.add_fetcher('data/htg-eval', name='term-class')
exp.add_fetcher(
    '/home/blaas/work/projects/grounder/experiments/first-experiment/data/htg-eval',
    name='master')

exp.add_report(BaseReport(attributes=['total_time'],
                          filter=[remove_timeouts, create_same_attr]),
               outfile='htg.html')

exp.run_steps()
예제 #6
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from lab.experiment import Experiment
from lab.steps import Step
from downward.reports.compare import CompareConfigsReport
from common_setup import get_experiment_name, get_data_dir, get_repo_base

import os

DATADIR = os.path.join(os.path.dirname(__file__), 'data')

exp = Experiment(get_data_dir())

exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'), filter_config_nick="astar_pho_seq_no_onesafe")
exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'), filter_config_nick="astar_occ_seq")

exp.add_report(CompareConfigsReport(
    [
     ('869fec6f843b-astar_pho_seq_no_onesafe', 'issue527-v2-astar_occ_seq'),
    ],
    attributes=[
                'coverage',
                'total_time',
                'expansions',
                'evaluations',
                'generated',
                'expansions_until_last_jump',
                'error',
                ],
    )
예제 #7
0
파일: fetch.py 프로젝트: Eldeeqq/bi-zum
    def _get_task(self, run):
        return (run['domain'], run['problem'])

    def store_values(self, run):
        value = run.get(self._attribute)
        self._tasks_to_values[self._get_task(run)].append(value)
        # Don't filter this run, yet.
        return True

    def filter_tasks_with_equal_values(self, run):
        values = self._tasks_to_values[self._get_task(run)]
        return len(set(values)) != 1


exp.add_fetcher(src='data/issue939-base-eval')
exp.add_fetcher(src='data/issue939-v1-eval', merge=True)

ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
#exp.add_comparison_table_step(attributes=ATTRIBUTES)

same_value_filters = SameValueFilters("translator_output_sas_hash")
# exp.add_comparison_table_step(
#     name="filtered",
#     attributes=ATTRIBUTES,
#     filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])

exp.add_report(TranslatorDiffReport(
    attributes=["domain", "problem", "algorithm", "run_dir"]),
               outfile="different_output_sas.csv")