]), IssueConfig('sbf-miasm-allrnd-b50k', [ '--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))' ]), ] SUITE = DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment(priority=0, email='*****@*****.**') if is_test_run(): SUITE = [ 'depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl' ] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['{ms_parser}']) exp.add_suite(BENCHMARKS_DIR, SUITE) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions ]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([(Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite)
import os import subprocess import sys from lab.steps import Step from lab.environments import LocalEnvironment from downward.experiment import DownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.hstar_2_h_stat import HstarToHRatioAndStatistics from downward import suites from downward.reports.MyPlot import ProblemPlotReport EXPPATH = 'run_from_args_test' REPO = os.path.expanduser('~/downward') ENV = LocalEnvironment(processes=6) # os.environ["exp_name"] ='0' # os.environ["epsilon"] ='0' # os.environ["delta"] ='0' # os.environ["weight"] ='0' tmp = 'lazy_anytime_wastar(lmcut(), w={0}, delta={1}, epsilon={2}'.format( os.environ["weight"], os.environ["delta"], os.environ["epsilon"]) # print tmp # print 'exiting...' # sys.exit(0) # print 'didnt succeed....' CONFIGS = [('lmcut', ['--search', tmp])]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm(get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(benchmarks_dir, suite) self._revisions = revisions self._configs = configs
def __init__(self, path=None, repo=None, environment=None, combinations=None, limits=None, attributes=None, derived_properties=None, priority=0, queue=None, processes=2, email=None, cache_dir=CACHE_DIR, **kwargs): if path is None: path = os.path.splitext(os.path.basename(sys.argv[0]))[0] expname = os.path.basename(path) remote_exppath = os.path.join(REMOTE_EXPS, path) local_exppath = os.path.join(LOCAL_EXPS, path) if REMOTE: exppath = remote_exppath repo = repo or REMOTE_REPO environment = environment or MaiaEnvironment( priority=priority, queue=queue, email=email) else: exppath = local_exppath repo = repo or LOCAL_REPO environment = environment or LocalEnvironment(processes=processes) DownwardExperiment.__init__(self, path=exppath, environment=environment, repo=repo, combinations=combinations, limits=limits, cache_dir=cache_dir, **kwargs) self.set_path_to_python(PYTHON) if attributes is None: attributes = ATTRIBUTES # Add report steps abs_report_file = os.path.join(self.eval_dir, '%s-abs.html' % expname) self.add_report(AbsoluteReport(attributes=attributes, colored=True, derived_properties=derived_properties), name='report-abs', outfile=abs_report_file) if REMOTE: # Compress the experiment directory self.add_step(Step.zip_exp_dir(self)) self.add_step( Step('zip-eval-dir', call, [ 'tar', '-cjf', self.name + '-eval.tar.bz2', self.name + '-eval' ], cwd=os.path.dirname(self.path))) self.add_step(Step.remove_exp_dir(self)) self.add_step( Step('remove-eval-dir', shutil.rmtree, self.eval_dir, ignore_errors=True)) if not REMOTE: # Copy the results to local directory self.add_step( Step('scp-eval-dir', call, [ 'scp', '-r', '%s:%s-eval' % (SCP_LOGIN, remote_exppath), '%s-eval' % local_exppath ])) # Copy the results to local directory self.add_step( Step('scp-zipped-eval-dir', call, [ 'scp', '-r', '%s:%s-eval.tar.bz2' % (SCP_LOGIN, remote_exppath), '%s-eval.tar.bz2' % local_exppath ])) # Copy the zipped experiment directory to local directory self.add_step( Step('scp-exp-dir', call, [ 'scp', '-r', '%s:%s.tar.bz2' % (SCP_LOGIN, remote_exppath), '%s.tar.bz2' % local_exppath ])) # Unzip the experiment directory self.add_step(Step.unzip_exp_dir(self)) self.add_step( Step('unzip-eval-dir', call, ['tar', '-xjf', self.name + '-eval.tar.bz2'], cwd=os.path.dirname(self.path)))
* add a custom result parser * use the default report * use additional standard steps """ import os from lab.experiment import Experiment from lab.environments import LocalEnvironment from lab.experiment import Step from lab.reports import Report EXPNAME = 'simple-exp' EXPPATH = os.path.join('/tmp', EXPNAME) ENV = LocalEnvironment() # Create a new experiment. exp = Experiment(path=EXPPATH, environment=ENV) exp.add_resource('SIMPLE_PARSER', 'simple-parser.py', 'simple-parser.py') reportfile = os.path.join(exp.eval_dir, EXPNAME + '.html') run = exp.add_run() run.add_command('list-dir', ['ls', '-l']) # Every run has to have an id in the form of a list. run.set_property('id', ['current-dir']) run.require_resource('SIMPLE_PARSER') run.add_command('parse', ['SIMPLE_PARSER']) # Make a default report. exp.add_report(Report(attributes=['number_of_files', 'first_number']),
def main(revisions=None): benchmarks_dir = os.environ["DOWNWARD_BENCHMARKS"] suite = [ "agricola-sat18-strips", "airport", "barman-sat11-strips", "barman-sat14-strips", "blocks", "childsnack-sat14-strips", "data-network-sat18-strips", "depot", "driverlog", "elevators-sat08-strips", "elevators-sat11-strips", "floortile-sat11-strips", "floortile-sat14-strips", "freecell", "ged-sat14-strips", "grid", "gripper", "hiking-sat14-strips", "logistics00", "logistics98", "miconic", "movie", "mprime", "mystery", "nomystery-sat11-strips", "openstacks-sat08-strips", "openstacks-sat11-strips", "openstacks-sat14-strips", "openstacks-strips", "organic-synthesis-sat18-strips", "organic-synthesis-split-sat18-strips", "parcprinter-08-strips", "parcprinter-sat11-strips", "parking-sat11-strips", "parking-sat14-strips", "pathways", "pegsol-08-strips", "pegsol-sat11-strips", "pipesworld-notankage", "pipesworld-tankage", "psr-small", "rovers", "satellite", "scanalyzer-08-strips", "scanalyzer-sat11-strips", "snake-sat18-strips", "sokoban-sat08-strips", "sokoban-sat11-strips", "spider-sat18-strips", "storage", "termes-sat18-strips", "tetris-sat14-strips", "thoughtful-sat14-strips", "tidybot-sat11-strips", "tpp", "transport-sat08-strips", "transport-sat11-strips", "transport-sat14-strips", "trucks-strips", "visitall-sat11-strips", "visitall-sat14-strips", "woodworking-sat08-strips", "woodworking-sat11-strips", "zenotravel" ] # suite = ["elevators-sat08-strips:p01.pddl"] environment = LocalEnvironment(processes=48) BUILD_OPTIONS = ["--build", "release64"] DRIVER_OPTIONS = [ "--transform-task", "builds/h2-mutexes/bin/preprocess", "--overall-time-limit", "30m", "--overall-memory-limit", "4096M", "--alias", "seq-sat-fdss-2018" ] configs = { IssueConfig("fdss", [], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS) } exp = IssueExperiment( revisions=revisions, configs=configs, environment=environment, ) exp.add_suite(benchmarks_dir, suite) #exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER) #exp.add_parser(exp.LAB_DRIVER_PARSER) #exp.add_parser(exp.EXITCODE_PARSER) #exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) #exp.add_parser(exp.PLANNER_PARSER) attributes = exp.DEFAULT_TABLE_ATTRIBUTES exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') # exp.add_comparison_table_step(attributes=attributes) exp.add_absolute_report_step(attributes=attributes) exp.run_steps()
default=config['EXP_NAME'], help='path to directory to store results') # Parse the arguments args = ARGPARSER.parse_args() args.TIME_LIMIT = config['TIME_LIMIT'] # seconds args.MEMORY_LIMIT = config['MEMORY_LIMIT'] # seconds #xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx# #---- SETUP EXPERIMENT -------------------------------------------------------# # Setup local/remote environment if REMOTE: ENV = None #To be use later for HPC infrastructure else: # NOTE: if "processes = NUM_PROCESSES" is omitted, then default is #CPUs ENV = LocalEnvironment(processes=args.NUM_PROC) exp = Experiment(path=join(args.EXP_DIR, "results"), environment=ENV) exp.add_parser(join(CWD, Path(LOG_PARSER))) if (len(args.steps)>0 and ('1' in args.steps or 'build' in args.steps)) or \ args.run_all_steps : # Don't over-write instead create backups index = 0 while( isdir(args.EXP_DIR) and index!=1000): index += 1 try : rename(args.EXP_DIR, args.EXP_DIR+'_'+str(index)) except : pass makedirs(args.EXP_DIR)