def doit(filename): skip_size = 0 skip_dep = 0 if skip_size == 0 and skip_dep == 0: write_file(filename, 'agents,depth,fluents,inf-size,closed-size,reduced-size,inf-query,closed-query,reduced-query,inf-update,closed-update,reduced-update') for size in range(SIZE[0], SIZE[1] + 10, 10): for dep in range(DEPTH[0], DEPTH[1]+1): if size < skip_size: continue elif size == skip_size and dep < skip_dep: continue print print "--------------" print " %d x %d" % (size, dep) print "--------------" (times, sizes) = get_size_and_time(size, dep, FLUENTS) print print "-------------------------" append_file(filename, "\n%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (size, dep, len(FLUENTS), sizes[0], sizes[1], sizes[2], times[0], times[1], times[2], times[3], times[4], times[5]))
def test_settings(domain, CMD): problems = get_file_list("exp-input/%s" % domain) toggles = ['PP', 'CA', 'NCB', 'CC', 'IBCP'] results = exp.run_experiment(base_command="%s -q" % CMD, single_arguments={ 'problem': problems, 'PP': ['-noPP', ''], 'CA': ['-noCA', ''], 'CC': ['-noCC', ''], 'NCB': ['-noNCB', ''], 'IBCP': ['-noIBCP', ''] }, time_limit=TIMEOUT, memory_limit=MEMORY, results_dir="results-%s-settings" % domain, processors=CORES, progress_file=None) #-- Compile file_output = "%s,problem,size,runtime\n" % ','.join(toggles) for result in results.get_ids(): res = results[result] for tog in toggles: if '' == res.single_args[tog]: file_output += '1,' else: file_output += '0,' file_output += "%s,%d,%f\n" % _get_file_size_time_bdg(res) write_file(domain + '-results.csv', file_output)
def doit(): skip_ag = 0 skip_dep = 0 if skip_ag == 0 and skip_dep == 0: write_file( 'aamas.csv', 'agents,depth,fluents,inf-size,closed-size,reduced-size,inf-query,closed-query,reduced-query,inf-update,closed-update,reduced-update' ) for ag in range(AGENTS[0], AGENTS[1] + 1): for dep in range(DEPTH[0], DEPTH[1] + 1): if ag < skip_ag: continue elif ag == skip_ag and dep < skip_dep: continue print print "--------------" print " %d x %d" % (ag, dep) print "--------------" (times, sizes) = get_size_and_time(ag, dep, FLUENTS) print print "-------------------------" append_file( 'aamas.csv', "\n%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (ag, dep, len(FLUENTS), sizes[0], sizes[1], sizes[2], times[0], times[1], times[2], times[3], times[4], times[5]))
def writeCNF(self, sourceFile): output = '' output += 'c\n' output += 'c SAT instance in DIMACS CNF input format.\n' output += 'c\n' output += "p wcnf %d %d %d" % (self.num_vars, self.num_clauses, self.top_weight) output += '\n' for (cls, weight) in self.clauses: if -1 == weight: output += "%d " % self.top_weight else: output += "%d " % weight for lit in cls: if isinstance(lit, Not): output += "-%d" % self.mapping[lit.obj] else: output += "%d" % self.mapping[lit] output += ' ' output += '0\n' write_file(sourceFile, output)
def writeCNF(self, sourceFile, hard=False): output = '' output += 'c\n' output += 'c SAT instance in DIMACS CNF input format.\n' output += 'c\n' if hard: output += "p cnf %d %d" % (self.num_vars, len(self.hard_clauses)) else: output += "p wcnf %d %d %d" % (self.num_vars, self.num_clauses, self.top_weight) output += '\n' if hard: for cls in self.hard_clauses: output += "%s 0\n" % (' '.join(map(str, cls))) write_file(sourceFile, output) return TOPW = self.top_weight for cls in self.hard_clauses: output += "%d %s 0\n" % (TOPW, ' '.join(map(str, cls))) for (cls, weight) in self.clauses: output += "%d %s 0\n" % (weight, ' '.join(map(str, cls))) write_file(sourceFile, output)
def writeCNF(self, sourceFile): output = '' output += 'c\n' output += 'c SAT instance in DIMACS CNF input format.\n' output += 'c\n' output += "p wcnf %d %d %d" % (self.num_vars, self.num_clauses, self.top_weight) output += '\n' level_weights = {} total = 0 for level in sorted(self.clauses.keys()): level_weights[level] = total total += self.level_weight(level, total) for level in self.clauses.keys(): for (cls, weight) in self.clauses[level]: if -1 == weight: output += "%d " % self.top_weight else: output += "%d " % (weight + level_weights[level]) for lit in cls: if isinstance(lit, Not): output += "-%d" % self.mapping[lit.obj] else: output += "%d" % self.mapping[lit] output += ' ' output += '0\n' write_file(sourceFile, output)
def solve(self): # Write the domain pddl file in case we want to debug write_file('pdkb-domain.pddl', self.domain.pddl()) # Map the actions we need to refer to from the provided plan self.act_map = {} for act in self.domain.actions: self.act_map[act.name] = act for a in self.actions: assert a in self.act_map, "%s not in %s" % ( str(a), str(sorted(set(self.act_map.keys())))) # Simulate the plan current_state = self.init self.executable = True for a in self.actions: if not self.act_map[a].applicable(current_state): self.first_fail = a self.failed_state = current_state self.executable = False break current_state = self.act_map[a].apply(current_state)[0] # Check that the goal holds / store the result if self.executable: self.final_state = current_state self.goal_holds = (self.goal.rmls <= self.final_state.rmls) self.goal_violation = (self.goal.rmls - self.final_state.rmls) else: self.final_state = None self.goal_holds = False
def writeCNF(self, sourceFile, hard=False): output = '' output += 'c\n' output += 'c SAT instance in DIMACS CNF input format.\n' output += 'c\n' if hard: output += "p cnf %d %d" % (self.num_vars, len(self.hard_clauses)) else: output += "p wcnf %d %d %d" % (self.num_vars, self.num_clauses, self.top_weight) output += '\n' if hard: for cls in self.hard_clauses: output += "%s 0\n" % (' '.join(map(str, cls))) write_file(sourceFile, output) return level_weights = {} total = 0 for level in sorted(self.clauses.keys()): level_weights[level] = total total += self.level_weight(level, total) for cls in self.hard_clauses: output += "%d %s 0\n" % (self.top_weight, ' '.join(map(str, cls))) for level in self.clauses.keys(): for (cls, weight) in self.clauses[level]: output += "%d %s 0\n" % (weight + level_weights[level], ' '.join(map(str, cls))) write_file(sourceFile, output)
def convert_PRE(input, output): def cpyrtn(old, new): new.append(old[0]) return old.pop(0) old_lines = read_file(input) new_lines = [] #-- Pop the top MPT flag old_lines.pop(0) #-- Convert the operators back to normal form #- Find the operator section while 'end_goal' != old_lines[0]: new_lines.append(old_lines.pop(0)) new_lines.append(old_lines.pop(0)) #- Get the number of operators num_ops = int(old_lines.pop(0)) new_lines.append(str(num_ops)) for op_num in range(num_ops): assert('begin_operator' == cpyrtn(old_lines, new_lines)) op_name = cpyrtn(old_lines, new_lines) num_prevail = int(cpyrtn(old_lines, new_lines)) for prev_num in range(num_prevail): cpyrtn(old_lines, new_lines) num_effects = int(cpyrtn(old_lines, new_lines)) for eff_num in range(num_effects): preconds = [] num_precond = old_lines.pop(0) for precond_num in range(int(num_precond)): var, val = old_lines.pop(0).split() preconds.append(var + " " + val) effect = old_lines.pop(0) new_lines.append(num_precond + " " + " ".join(preconds) + " " + effect) op_cost = int(cpyrtn(old_lines, new_lines)) assert('end_operator' == cpyrtn(old_lines, new_lines)) #-- Get rid of SG and everything after it. while 'begin_SG' != old_lines[0]: new_lines.append(old_lines.pop(0)) write_file(output, new_lines)
def test_size_time(domain): print "\nTesting domain '%s'." % domain problems = get_file_list("exp-input/%s" % domain) results_size = {} results_time = {} print "Doing ddnnf..." results_ddnnf = exp.run_experiment(base_command="./sharpSAT-ddnnf -q", single_arguments={'problem': problems}, time_limit=TIMEOUT, results_dir="results-%s-ddnnf" % domain, processors=CORES, memory_limit=MEMORY, progress_file=None) for result_id in results_ddnnf.get_ids(): file, size, time = _get_file_size_time_bdg(results_ddnnf[result_id]) results_size[file] = [size] results_time[file] = [time] print "\t...done!" print "Doing c2d..." results_c2d = exp.run_experiment(base_command="c2d", parameters={'-in': problems}, time_limit=TIMEOUT, results_dir="results-%s-c2d" % domain, processors=CORES, memory_limit=MEMORY, progress_file=None) for result_id in results_c2d.get_ids(): file, size, time = _get_file_size_time_c2d(results_c2d[result_id]) results_size[file].append(size) results_time[file].append(time) print "\t...done!" #-- Compile file_output = "problem,ddnnf runtime,c2d runtime,ddnnf size,c2d size\n" for prob in results_size.keys(): file_output += "%s,%f,%f,%d,%d\n" % ( prob, results_time[prob][0], results_time[prob][1], results_size[prob][0], results_size[prob][1]) write_file(domain + '-results.csv', file_output) #-- Cleanup os.system("rm exp-input/%s/*.nnf" % domain)
def solve(self): print "\n\nCond effs (orig): %d (%d)" % (self.comp_cond_count, self.orig_cond_count) # Write the pddl files write_file('pdkb-domain.pddl', self.domain.pddl()) write_file('pdkb-problem.pddl', self.pddl()) # Solve the problem planner_path = os.path.dirname(os.path.abspath(__file__)) chosen_planner = 'siw-then-bfsf' # Can use bfs_f, siw, or siw-then-bfsf run_command( "%s/planners/%s --domain pdkb-domain.pddl --problem pdkb-problem.pddl --output pdkb-plan.txt" % (planner_path, chosen_planner), output_file='pdkb-plan.out', MEMLIMIT=2000, TIMELIMIT=1800) self.plan = parse_output_ipc('pdkb-plan.txt')
def test_kripke(kb, rmls): print print kb print "\nClosing...\n" kb.logically_close() print kb print "Consistent: %s" % str(kb.is_consistent()) print "\nGenerating compressed kripke..." M = kb.generate_kripke(True) print print "Generating dot export..." M.generate_dot("graph.dot", True) lines = read_file("graph.dot") lines = [lines[0], " rankdir=LR;"] + lines[1:] write_file("graph.dot", lines) os.system("dot -Tpng graph.dot > graph.png") print "\nGenerating full kripke..." M = kb.generate_kripke(False) print print M.get_stats() print for rml in rmls: print "Assessing %s: %s" % (str(rml), M.assess_rml(Belief(0, rml))) print all_match = all([M.assess_rml(rml) for rml in kb.perspectival_view()]) print "All rmls match: %s" % str(all_match) if not all_match: print "Non-matching:" for rml in kb.construct_closed_world(): if not M.assess_rml(rml): print " %s" % str(rml) print
def writeCNF(self, sourceFile): output = '' output += 'c\n' output += 'c SAT instance in DIMACS CNF input format.\n' for comment in self.comments: output += 'c ' + comment + '\n' output += 'c\n' output += "p cnf %d %d" % (self.num_vars, self.num_clauses) output += '\n' for cls in self.clauses: for lit in cls: if isinstance(lit, Not): output += "-%d" % self.mapping[lit.obj] else: output += "%d" % self.mapping[lit] output += ' ' output += '0\n' write_file(sourceFile, output)
def writeMapping(self, sourceFile): write_file(sourceFile, [ "%d %s" % (k, v) for (k, v) in sorted([(self.mapping[var], str(var)) for var in self.variables]) ])
def handle_single(dom): towrite = 'domains = [\n' extra_domain = False domdata = {} domdata['name'] = get_name(dom) domdata['description'] = domain_description[get_name(dom)] # Special Cases: # IPC-2000: freecell (non-pfiles) # IPC-2002: satellite (p#-pfile#.pddl) # IPC-2002: freecell (pfile#) if './freecell' == dom: extra_domain = True domdata['problems'] = [ ((dom + '/domain.pddl')[2:], prob[2:]) for prob in sorted( get_file_list(dom, forbidden_list=forbidden_files + ['pfile', '/domain.pddl'])) ] domdata['ipc'] = '2000' domdata2 = {} domdata2['name'] = domdata['name'] domdata2['description'] = domain_description[get_name(dom)] domdata2['problems'] = [ ((dom + '/domain.pddl')[2:], prob[2:]) for prob in sorted( get_file_list(dom, forbidden_list=forbidden_files + ['/domain.pddl'], match_list=['pfile'])) ] domdata2['ipc'] = '2002' elif './satellite' == dom: extra_domain = True domdata['problems'] = [ ((dom + '/domain.pddl')[2:], prob[2:]) for prob in sorted( get_file_list( dom, forbidden_list=forbidden_files + ['/domain.pddl'])) ] domdata['ipc'] = ipc_map.get(dom[2:]) domdata2 = {} domdata2['name'] = domdata['name'] domdata2['description'] = domain_description[get_name(dom)] domdata2['problems'] = [ ((dom + '/domain.pddl')[2:], prob[2:]) for prob in sorted( get_file_list(dom, forbidden_list=forbidden_files + ['/domain.pddl', '-HC-'])) ] domdata2['ipc'] = '2002' else: domdata['problems'] = [(( dom + '/domain.pddl' )[2:], prob[2:]) for prob in sorted( get_file_list( dom, forbidden_list=forbidden_files + ['/domain.pddl', '/domain-nosplit.pddl', '/orig-domain.pddl'])) ] domdata['ipc'] = ipc_map.get(dom[2:]) towrite += pprint.pformat(domdata) if extra_domain: towrite += ',\n' towrite += pprint.pformat(domdata2) towrite += '\n]' #print "To be printed:\n-------" #print towrite #print "-------\n" print "Handling single domain: %s" % dom write_file(dom + '/api.py', towrite)
def handle_double(dom): towrite = 'domains = [\n' domdata = {} domdata['name'] = get_name(dom) domdata['description'] = domain_description[get_name(dom)] domfiles = get_file_list(dom, match_list=['domain'], forbidden_list=forbidden_files) prbfiles = get_file_list(dom, forbidden_list=forbidden_files + ['domain']) if len(domfiles) == len(prbfiles): def remdom(dom): toret = dom for s in ['-domain', 'domain_']: toret = ''.join(toret.split(s)) return toret dmap = {remdom(d): d for d in domfiles} if all([k in prbfiles for k in dmap]): print "Handling multi-domain: %s" % dom assert len(set(dmap.keys())) == len(set(prbfiles)) domdata['problems'] = [(dmap[prob][2:], prob[2:]) for prob in sorted(prbfiles)] domdata['ipc'] = ipc_map.get(dom[2:]) elif dom in ['./psr-small', './airport']: print "Handling custom 50-problem domain: %s" % dom assert 100 == len( get_file_list(dom, match_list=['pddl'], forbidden_list=forbidden_files)) probs = [] for i in range(1, 51): d = get_file_list(dom, match_list=["p%02d-domain" % i], forbidden_list=forbidden_files) p = get_file_list(dom, match_list=["p%02d-" % i], forbidden_list=forbidden_files + ['domain']) assert 1 == len(d), str(d) assert 1 == len(p), str(p) probs.append((d[0][2:], p[0][2:])) domdata['problems'] = sorted(probs) domdata['ipc'] = ipc_map.get(dom[2:]) else: print "Unhandled balanced multi-domain: %s" % dom return else: print "Unhandled lopsided multi-domain: %s" % dom towrite += pprint.pformat(domdata) towrite += '\n]' #print "To be printed:\n-------" #print towrite #print "-------\n" write_file(dom + '/api.py', towrite)
import os from krrt.utils import get_file_list, read_file, write_file, get_lines domains = get_file_list('.', ['fixed', 'fip'], ['d_']) print domains for dom in domains: # Fix the original domain lines = read_file(dom) fixed_dom = '.'+''.join(dom.split('.')[:-1])+'-fixed.pddl' write_file(fixed_dom, [lines[0]] + ["(:requirements :typing :strips :non-deterministic)"] + lines[1:]) # Fix the fip version preface = get_lines(dom, upper_bound = ':action') fixed_dom_fip = fixed_dom + '.fip' write_file(fixed_dom_fip, preface) print "python ../../src/translate/determinizer.py %s p_1_1.pddl >> %s" % (fixed_dom, fixed_dom_fip) os.system("python ../../src/translate/determinizer.py %s p_1_1.pddl >> %s" % (fixed_dom, fixed_dom_fip)) os.system('echo ")" >> ' + fixed_dom_fip)
redundant: Run the comparison for domains that have redundancy ablation: Run with various features disabled to see the impact they have. test: Run a complete test of all parameter settings (make sure to limit the domains) test-planlocal: Test the impact of the various planlocal settings test-deadend: Test the impact of the various deadend settings test-optscd: Test the impact of optimized-scd """ if not os.path.exists('settings.py'): print "\nNo settings detected. Creating settings.py...\n" s = "TRIALS = 10\n" s += "CORES = 1\n" s += "MEM_LIMIT = 2000\n" s += "TIME_LIMIT = 1800\n" s += "SHOW_DATA = True\n" write_file('settings.py', s) from settings import * BASEDIR = os.path.abspath(os.path.curdir) PARAMETERS = [ 'jic-limit', 'trials', 'forgetpolicy', 'fullstate', 'planlocal', 'partial-planlocal', 'plan-with-policy', 'limit-planlocal', 'detect-deadends', 'generalize-deadends', 'online-deadends', 'optimized-scd' ] PRP_PARAMS = { 'best': { '--jic-limit': [18000],
def encode_POP(dom, prob, pop, output, flags): # For sanitization, make sure we close the pop pop.transativly_close() allF, allA, I, G = parse_problem(dom, prob) F = pop.F A = pop.A I = pop.I G = pop.G init = pop.init goal = pop.goal adders = {} deleters = {} for f in F: adders[f] = set([]) deleters[f] = set([]) for a in A: for f in a.adds: adders[f].add(a) for f in a.dels: deleters[f].add(a) VARNUM = 1 # Create the vars for each action v2a = {} a2v = {} for a in A: v2a[VARNUM] = a a2v[a] = VARNUM VARNUM += 1 # Create the vars for each action ordering v2o = {} o2v = {} for a1 in A: for a2 in A: v2o[VARNUM] = (a1, a2) o2v[(a1, a2)] = VARNUM VARNUM += 1 # Create the vars for each possible action support v2s = {} s2v = {} for a2 in A: for p in a2.precond: for a1 in adders[p]: v2s[VARNUM] = (a1, p, a2) s2v[(a1, p, a2)] = VARNUM VARNUM += 1 formula = OptimizedLevelWeightedFormula() # Add the antisymmetric ordering constraints for a in A: formula.addClause([-o2v[(a, a)]]) # Add the transitivity constraints for a1 in A: for a2 in A: for a3 in A: formula.addClause([-o2v[(a1, a2)], -o2v[(a2, a3)], o2v[(a1, a3)]]) # Add the ordering -> actions constraints for a1 in A: for a2 in A: formula.addClause([-o2v[(a1, a2)], a2v[a1]]) formula.addClause([-o2v[(a1, a2)], a2v[a2]]) # Make sure everything comes after the init, and before the goal for a in A: if a is not init: formula.addClause([-a2v[a], o2v[(init, a)]]) if a is not goal: formula.addClause([-a2v[a], o2v[(a, goal)]]) # Ensure that we have a goal and init action. formula.addClause([a2v[init]]) formula.addClause([a2v[goal]]) # Satisfy all the preconditions for a2 in A: for p in a2.precond: formula.addClause([-a2v[a2]] + [s2v[(a1, p, a2)] for a1 in filter(lambda x: x is not a2, adders[p])]) # Create unthreatened support for a2 in A: for p in a2.precond: for a1 in filter(lambda x: x is not a2, adders[p]): # Support implies ordering formula.addClause([-s2v[(a1, p, a2)], o2v[(a1, a2)]]) # Forbid threats # print "%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(deleters[p])) for ad in filter(lambda x: x not in set([a1, a2]), deleters[p]): # print "...%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(ad)) formula.addClause([-s2v[(a1, p, a2)], -a2v[ad], o2v[(ad, a1)], o2v[(a2, ad)]]) # Add the main constraints that satisfy preconditions without threats # for a in A: # total = [] # for p in a.precond: # subtheories = [] # for achiever in adders[p]: # if achiever is not a: # subtheory = [set([o2v[(achiever, a)]])] # for deleter in deleters[p]: # if deleter is not a: # subtheory.append(set([-a2v[deleter], o2v[(deleter, achiever)], o2v[(a, deleter)]])) # subtheories.append(subtheory) # mastertheory = subtheories.pop() # while subtheories: # currenttheory = subtheories.pop() # mastertheory = [i | j for i in mastertheory for j in currenttheory] # total.extend(mastertheory) # total = [item | set([-a2v[a]]) for item in total] # for cls in total: # assert cls.__class__ == set # hard_clauses.append(cls) if 'SERIAL' in flags: for a1 in A: for a2 in A: if a1 is not a2: formula.addClause([-a2v[a1], -a2v[a2], o2v[(a1, a2)], o2v[(a2, a1)]]) if 'ALLACT' in flags: for a in A: formula.addClause([a2v[a]]) if 'DEORDER' in flags: for (ai, aj) in pop.get_links(): formula.addClause([-o2v[(aj, ai)]]) # Now add the soft clauses. for a1 in A: for a2 in A: formula.addClause([-o2v[(a1, a2)]], 1, 1) # formula.addClause([Not(a1)], COST, 2) formula.addClause([-a2v[a1]], 1, 2) formula.writeCNF(output + '.wcnf') formula.writeCNF(output + '.cnf', hard=True) mapping_lines = [] for v in v2a: mapping_lines.append("%d %s in plan" % (v, str(v2a[v]))) for v in v2o: mapping_lines.append("%d %s is ordered before %s" % (v, str(v2o[v][0]), str(v2o[v][1]))) for v in v2s: mapping_lines.append("%d %s supports %s with %s" % (v, str(v2s[v][0]), str(v2s[v][2]), str(v2s[v][1]))) write_file(output + '.map', mapping_lines) print '' print "Vars: %d" % formula.num_vars print "Clauses: %d" % formula.num_clauses print "Soft: %d" % len(formula.getSoftClauses()) print "Hard: %d" % len(formula.getHardClauses()) print "Max Weight: %d" % formula.top_weight print ''
def encode_POP_v2(dom, prob, pop, flags, popfile): # For sanitization, make sure we close the pop pop.transativly_close() allF, allA, I, G = parse_problem(dom, prob) F = pop.F A = pop.A I = pop.I G = pop.G init = pop.init goal = pop.goal adders = {} deleters = {} needers = {} for f in F: adders[f] = set([]) deleters[f] = set([]) needers[f] = set([]) for a in A: for f in a.adds: adders[f].add(a) for f in a.dels: deleters[f].add(a) for f in a.precond: needers[f].add(a) times = [time.time()] # Create a new model m = Model("min_reorder") m.Params.Threads = 1 # Create the vars for each action v2a = {} a2v = {} for a in A: a2v[a] = m.addVar(vtype=GRB.BINARY, name="act_%s" % str(a)) m.update() v2a[a2v[a]] = a # Create the vars for each action ordering v2o = {} o2v = {} for a1 in A: for a2 in A: o2v[(a1, a2)] = m.addVar(vtype=GRB.BINARY, name="ord_%s_%s" % (str(a1), str(a2))) m.update() v2o[o2v[(a1, a2)]] = (a1, a2) # Create the vars for each possible action support v2s = {} s2v = {} for a2 in A: for p in a2.precond: for a1 in adders[p]: s2v[(a1, p, a2)] = m.addVar(vtype=GRB.BINARY, name="sup_%s_%s_%s" % (str(a1), str(p), str(a2))) m.update() v2s[s2v[(a1, p, a2)]] = (a1, p, a2) # Integrate new variables m.update() order_count = 1 + len(o2v.keys()) # Set objective # Use the first if only optimizing for the number of ordering constraints m.setObjective(quicksum(v2o.keys()), GRB.MINIMIZE) # m.setObjective(quicksum(v2o.keys() + [order_count * var for var in v2a.keys()]), GRB.MINIMIZE) ################# ## Constraints ## ################# # Uncomment the following if every action should be included for a in A: m.addConstr(a2v[a] == 1) # Add the antisymmetric ordering constraints for a in A: m.addConstr(o2v[(a, a)] == 0) # Add the transitivity constraints for a1 in A: for a2 in A: for a3 in A: x = o2v[(a1, a2)] y = o2v[(a2, a3)] z = o2v[(a1, a3)] m.addConstr((1 - x) + (1 - y) + z >= 1) # Add the ordering -> actions constraints for a1 in A: for a2 in A: m.addConstr(o2v[(a1, a2)] <= a2v[a1]) m.addConstr(o2v[(a1, a2)] <= a2v[a2]) # Init and goal m.addConstr(o2v[(init, goal)] == 1) for a in A - {init, goal}: m.addConstr((1 - a2v[a]) + o2v[(init, a)] == 1) m.addConstr((1 - a2v[a]) + o2v[(a, goal)] == 1) # Orderings exclude one another for a1 in A: for a2 in A: m.addConstr(o2v[(a1, a2)] + o2v[(a2, a1)] <= 1) # Ensure that we have a goal and init action. m.addConstr(a2v[init] == 1) m.addConstr(a2v[goal] == 1) # Satisfy all the preconditions for a2 in A: for p in a2.precond: m.addConstr((1 - a2v[a2]) + quicksum([ s2v[(a1, p, a2)] for a1 in filter(lambda x: x is not a2, adders[p]) ]) >= 1) # Create unthreatened support for a2 in A: for p in a2.precond: # Can't support yourself (not strictly neccessary, but useful for visualizing output) if (a2, p, a2) in s2v: m.addConstr(s2v[(a2, p, a2)] == 0) for a1 in filter(lambda x: x is not a2, adders[p]): # Support implies ordering m.addConstr((1 - s2v[(a1, p, a2)]) + o2v[(a1, a2)] >= 1) # Forbid threats # print "\n%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(deleters[p])) for ad in filter(lambda x: x not in set([a1, a2]), deleters[p]): # print "...%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(ad)) m.addConstr( (1 - s2v[(a1, p, a2)]) + (1 - a2v[ad]) + o2v[(ad, a1)] + o2v[(a2, ad)] >= 1) ############################# times.append(time.time()) m.optimize() # for v in m.getVars(): # print v.varName, v.x print '\nObj:', m.objVal print "Actions: %d / %d" % (sum([int(v.x) for v in v2a.keys()]), len(A)) print 'Orderings:', sum([int(v.x) for v in v2o.keys()]) times.append(time.time()) print "Encoding Time: %f" % (times[1] - times[0]) print "Solving Time: %f\n" % (times[2] - times[1]) if popfile: p = POP() for act in A: p.add_action(act) for v in v2s.keys(): if 1 == int(v.x): p.link_actions(v2s[v][0], v2s[v][2], str(v2s[v][1])) for v in v2o.keys(): if 1 == int(v.x): p.link_actions(v2o[v][0], v2o[v][1], '') ###################### ## OUTPUT SETTINGS ## ###################### # Comment out if you want to see all of the edges in the closure p.transitivly_reduce() # Comment out if you want the initial state dummy action to be included # p.remove_action(p.init) # Change to True if you want just the nodes / edges and not the labels write_file(popfile, p.dot(False)) print "POP ENCODING DONE!\n" return p
] results = run_experiment( base_directory=".", base_command='./../../src/plan-prp', single_arguments={'domprob': domprobs}, time_limit=1800, # 15minute time limit (900 seconds) memory_limit=1000, # 1gig memory limit (1000 megs) results_dir="%s/results" % domain, progress_file=None, # Print the progress to stdout processors=6, # You've got 8 cores, right? sandbox='fd_out', clean_sandbox=False) good_results = results.filter(lambda result: not result.timed_out) good_results = [good_results[i] for i in good_results.get_ids()] print "Coverage: %d / %d" % (len(good_results), len(domprobs)) try: data = ['runtime(s),size(nodes)'] + [ "%f,%d" % (res.runtime, get_value(res.output_file, '.*State-Action Pairs: (\d+)\n.*', int)) for res in good_results ] except: pass write_file("%s.csv" % domain, data)
def benchmark_domain(planner, bound, dom): from krrt.utils import get_value, match_value, run_experiment, write_file print print "Benchmarking %s..." % dom if TYPE == OLD: domprob_args = [" --bound %s --domain %s/%s/%s --problem %s/%s/%s" % (bound,ipc,dom,domain,ipc,dom,problem) for (domain, problem) in benchmark[dom]] elif TYPE == NEW: domprob_args = ["%s/%s/%s %s/%s/%s o/dev/null" % (ipc,dom,domain,ipc,dom,problem) for (domain, problem) in benchmark[dom]] else: assert False, "What the deuce?" if os.path.exists(results_directory) is False: os.mkdir(results_directory) results = run_experiment(base_directory=".", base_command=planner, single_arguments={'domprob': domprob_args}, time_limit=timelimit, memory_limit=memorylimit, results_dir=results_directory, progress_file=None, processors=cores, sandbox=None) data = [] for resid in results.get_ids(): res = results[resid] if TYPE == OLD: prob = res.single_args['domprob'].split(' ')[-1].split('/')[-1] elif TYPE == NEW: prob = res.single_args['domprob'].split(' ')[-2].split('/')[-1] else: assert False, "What the deuce?" if 'old-' in planner: cmd("tail -26 %s > TMP_OUTPUT" % res.output_file) outfile = "TMP_OUTPUT" else: outfile = res.output_file path, filename = os.path.split(outfile) os.system("cp %s %s"%(outfile, path+"/"+dom+"_"+prob+".log")) if res.timed_out: data.append("%s,time,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*std::bad_alloc.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*MemoryError.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*cannot allocate memory.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*Segmentation fault.*'): data.append("%s,seg,-1,-1,-1,-1" % prob) lines = open(outfile) quality = 0 generated = 0 expanded = 0 pruned = 0 width = 0 h2=0 h1=0 time=0.0 plan="" goal="" for line in lines: if 'Plan found with cost:' in line: quality = int(line.split(':')[-1]) if 'Nodes generated during search:' in line: generated = int(line.split(':')[-1]) if 'Nodes expanded during search:' in line: expanded = int(line.split(':')[-1]) if 'Nodes pruned by bound:' in line: pruned = int(line.split(':')[-1]) if 'Effective width:' in line: width = int(line.split(':')[-1]) if 'Total time:' in line: time = float(line.split(':')[-1]) if 'h1:' in line: if "inf" in line: h1 = 99999 else: h1 = int(line.split(':')[-1]) if 'h2:' in line: if "inf" in line: h2 = 99999 else: h2 = int(line.split(':')[-1]) if 'plan:' in line: plan = (line.split(':')[-1]).strip() if 'Goal:' in line: goal = (line.split(':')[-1]).strip() if '****' in line: data.append("%s,%s,ok,%f,%d,%d,%d,%d,%d,%d,%d,%s" % (prob, goal, time, generated, pruned, expanded, width, quality,h1,h2,plan)) quality = 0 generated = 0 expanded = 0 pruned = 0 width = 0 h2=0 h1=0 time=0.0 plan="" goal="" os.system("rm "+res.output_file) os.system("rm %s.err"%res.output_file) data.sort() data = ['problem,status,goal,runtime,generated,pruned,expanded,width,quality,h1,h2,plan'] + data write_file("%s/%s.csv" %( results_directory, dom), data)
def benchmark_domain(planner, bound, dom): from krrt.utils import get_value, match_value, run_experiment, write_file print print "Benchmarking %s..." % dom if TYPE == OLD: domprob_args = [ " --bound %s --domain %s/%s/%s --problem %s/%s/%s" % (bound, ipc, dom, domain, ipc, dom, problem) for (domain, problem) in benchmark[dom] ] elif TYPE == NEW: domprob_args = [ "%s/%s/%s %s/%s/%s o/dev/null" % (ipc, dom, domain, ipc, dom, problem) for (domain, problem) in benchmark[dom] ] else: assert False, "What the deuce?" if os.path.exists(results_directory) is False: os.mkdir(results_directory) results = run_experiment(base_directory=".", base_command=planner, single_arguments={'domprob': domprob_args}, time_limit=timelimit, memory_limit=memorylimit, results_dir=results_directory, progress_file=None, processors=cores, sandbox=None) data = [] for resid in results.get_ids(): res = results[resid] if TYPE == OLD: prob = res.single_args['domprob'].split(' ')[-1].split('/')[-1] elif TYPE == NEW: prob = res.single_args['domprob'].split(' ')[-2].split('/')[-1] else: assert False, "What the deuce?" if 'old-' in planner: cmd("tail -26 %s > TMP_OUTPUT" % res.output_file) outfile = "TMP_OUTPUT" else: outfile = res.output_file path, filename = os.path.split(outfile) os.system("cp %s %s" % (outfile, path + "/" + dom + "_" + prob + ".log")) if res.timed_out: data.append("%s,time,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*std::bad_alloc.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*MemoryError.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*cannot allocate memory.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*Segmentation fault.*'): data.append("%s,seg,-1,-1,-1,-1" % prob) lines = open(outfile) quality = 0 generated = 0 expanded = 0 pruned = 0 width = 0 h2 = 0 h1 = 0 time = 0.0 plan = "" goal = "" for line in lines: if 'Plan found with cost:' in line: quality = int(line.split(':')[-1]) if 'Nodes generated during search:' in line: generated = int(line.split(':')[-1]) if 'Nodes expanded during search:' in line: expanded = int(line.split(':')[-1]) if 'Nodes pruned by bound:' in line: pruned = int(line.split(':')[-1]) if 'Effective width:' in line: width = int(line.split(':')[-1]) if 'Total time:' in line: time = float(line.split(':')[-1]) if 'h1:' in line: if "inf" in line: h1 = 99999 else: h1 = int(line.split(':')[-1]) if 'h2:' in line: if "inf" in line: h2 = 99999 else: h2 = int(line.split(':')[-1]) if 'plan:' in line: plan = (line.split(':')[-1]).strip() if 'Goal:' in line: goal = (line.split(':')[-1]).strip() if '****' in line: data.append("%s,%s,ok,%f,%d,%d,%d,%d,%d,%d,%d,%s" % (prob, goal, time, generated, pruned, expanded, width, quality, h1, h2, plan)) quality = 0 generated = 0 expanded = 0 pruned = 0 width = 0 h2 = 0 h1 = 0 time = 0.0 plan = "" goal = "" os.system("rm " + res.output_file) os.system("rm %s.err" % res.output_file) data.sort() data = [ 'problem,status,goal,runtime,generated,pruned,expanded,width,quality,h1,h2,plan' ] + data write_file("%s/%s.csv" % (results_directory, dom), data)
def benchmark_domain(planner, dom): from krrt.utils import get_value, match_value, run_experiment, write_file print print "Benchmarking %s..." % dom if TYPE == OLD: domprob_args = [ "--domain %s/%s/%s --problem %s/%s/%s" % (ipc, dom, domain, ipc, dom, problem) for (domain, problem) in benchmark[dom] ] elif TYPE == NEW: domprob_args = [ "%s/%s/%s %s/%s/%s o/dev/null" % (ipc, dom, domain, ipc, dom, problem) for (domain, problem) in benchmark[dom] ] else: assert False, "What the deuce?" if os.path.exists(results_directory) is False: os.mkdir(results_directory) results = run_experiment(base_directory=".", base_command=planner, single_arguments={'domprob': domprob_args}, time_limit=timelimit, memory_limit=memorylimit, results_dir=results_directory, progress_file=None, processors=cores, sandbox=None) data = [] for resid in results.get_ids(): res = results[resid] if TYPE == OLD: prob = res.single_args['domprob'].split(' ')[-1].split('/')[-1] elif TYPE == NEW: prob = res.single_args['domprob'].split(' ')[-2].split('/')[-1] else: assert False, "What the deuce?" if 'old-' in planner: cmd("tail -26 %s > TMP_OUTPUT" % res.output_file) outfile = "TMP_OUTPUT" else: outfile = res.output_file os.system("cp %s %s" % (outfile, outfile + dom)) if res.timed_out: data.append("%s,time,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*std::bad_alloc.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*MemoryError.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*cannot allocate memory.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*Segmentation fault.*'): data.append("%s,seg,-1,-1,-1,-1" % prob) else: if match_value(outfile, '.*Effective Width during search*'): lines = open(outfile) quality = 0 generated = 0 expanded = 0 width = 0 for line in lines: if 'Plan found with cost:' in line: quality = int(line.split(':')[-1]) if 'Nodes generated during search:' in line: generated = int(line.split(':')[-1]) if 'Nodes expanded during search:' in line: expanded = int(line.split(':')[-1]) if 'Effective Width during search:' in line: width = int(line.split(':')[-1]) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, width)) elif match_value(outfile, '([0-9]+).* .*actions in the plan.'): quality = get_value(outfile, '([0-9]+).* .*actions in the plan.', int) generated = 0 expanded = 0 backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value( outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Plan found with cost: ([0-9]+).*'): quality = get_value(outfile, '.*Plan found with cost: ([0-9]+).*', int) generated = get_value( outfile, '.*Nodes generated during search: ([0-9]+).*', int) expanded = get_value( outfile, '.*Nodes expanded during search: ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value( outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Length : ([0-9]+).*'): quality = get_value(outfile, '.*Length : ([0-9]+).*', int) generated = get_value(outfile, '.*Expanded nodes : ([0-9]+).*', int) expanded = get_value(outfile, '.*Evaluated nodes : ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value( outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Plan cost: ([0-9]+\.[0-9]+), steps.*'): quality = get_value(outfile, '.*Plan cost: ([0-9]+\.[0-9]+), steps.*', float) generated = get_value(outfile, '.*Generated: ([0-9]+).*', int) expanded = get_value(outfile, '.*Expanded: ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value( outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*NOT I-REACHABLE.*'): backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value( outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,not-i,%f,-1,-1,-1,%d" % (prob, res.runtime, backtracks)) else: print "Error with %s" % prob data.append("%s,err,%f,-1,-1,-1" % (prob, res.runtime)) if 'old-' in planner: cmd("rm TMP_OUTPUT") data.sort() data = ['problem,status,runtime,quality,generated,expanded'] + data write_file("%s/%s.csv" % (results_directory, dom), data)
) (:action start-zooming :parameters (?a - aircraft ?c1 ?c2 - city ?l1 ?l2 ?l3 - flevel) :precondition (and (at-aircraft ?a ?c1) (fuel-level ?a ?l1) (next ?l2 ?l1) (next ?l3 ?l2) (not-refueling ?a)\n""" out += " " out += ' '.join( ["(not-boarding p%d) (not-debarking p%d)" % (i, i) for i in range(peeps)]) out += ")\n" out += """ :effect (and (not (at-aircraft ?a ?c1)) (zooming ?a ?c2)) ) (:action complete-zooming :parameters (?a - aircraft ?c2 - city ?l1 ?l2 ?l3 - flevel) :precondition (and (zooming ?a ?c2) (fuel-level ?a ?l1) (next ?l2 ?l1) (next ?l3 ?l2)) :effect (oneof (and) (and (not (zooming ?a ?c2)) (at-aircraft ?a ?c2) (not (fuel-level ?a ?l1)) (fuel-level ?a ?l3))) ) (:action start-refueling :parameters (?a - aircraft ?c - city ?l ?l1 - flevel) :precondition (and (at-aircraft ?a ?c) (not-refueling ?a) (fuel-level ?a ?l) (next ?l ?l1)) :effect (and (refueling ?a) (not (not-refueling ?a))) ) (:action complete-refuling :parameters (?a - aircraft ?l ?l1 - flevel) :precondition (and (refueling ?a) (fuel-level ?a ?l) (next ?l ?l1)) :effect (oneof (and) (and (not (refueling ?a)) (not-refueling ?a) (fuel-level ?a ?l1) (not (fuel-level ?a ?l)))) ) )""" write_file("d%s" % prob[1:], out)
def benchmark_domain(planner, dom, extra_args, lmcut=False): from krrt.utils import get_value, match_value, run_experiment, write_file, get_lines print "Benchmarking %s..." % dom global ipc if lmcut and '../' != ipc[:3]: ipc = '../' + ipc if TYPE == OLD: domprob_args = ["--domain %s/%s/%s --problem %s/%s/%s %s" % (ipc,dom,domain,ipc,dom,problem,extra_args) for (domain, problem) in benchmark[dom]] elif TYPE == NEW: #domprob_args = ["%s/%s/%s %s/%s/%s o/dev/null" % (ipc,dom,domain,ipc,dom,problem) for (domain, problem) in benchmark[dom]] domprob_args = ["%s/%s/%s %s/%s/%s %s" % (ipc,dom,domain,ipc,dom,problem,extra_args) for (domain, problem) in benchmark[dom]] else: assert False, "What the deuce?" if os.path.exists(results_directory) is False: os.mkdir(results_directory) sand = None if lmcut: sand = 'lmcut-'+dom results = run_experiment(base_directory=".", base_command=planner, single_arguments={'domprob': domprob_args}, time_limit=timelimit, memory_limit=memorylimit, results_dir=results_directory, progress_file=None, processors=cores, clean_sandbox=True, sandbox=sand) data = [] for resid in results.get_ids(): res = results[resid] if lmcut: prob = res.single_args['domprob'].split()[1].split('/')[-1] elif TYPE == OLD: prob = res.single_args['domprob'].split(' ')[-1].split('/')[-1] elif TYPE == NEW: prob = res.single_args['domprob'].split(' ')[-4].split('/')[-1] else: assert False, "What the deuce?" if 'old-' in planner: cmd("tail -26 %s > TMP_OUTPUT" % res.output_file) outfile = "TMP_OUTPUT" else: outfile = res.output_file os.system("cp %s %s"%(outfile, outfile+dom)) if res.timed_out: data.append("%s,time,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*std::bad_alloc.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*MemoryError.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*cannot allocate memory.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*Segmentation fault.*'): data.append("%s,seg,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*This configuration does not support axioms.*'): data.append("%s,ax,-1,-1,-1,-1" % prob) else: if match_value(outfile, '.*Plan found with cost: ([0-9]+).*'): quality = get_value(outfile, '.*Plan found with cost: ([0-9]+).*', int) generated = get_value(outfile, '.*Nodes generated during search: ([0-9]+).*', int) expanded = get_value(outfile, '.*Nodes expanded during search: ([0-9]+).*', int) data.append("%s,ok,%.2f,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded)) else: print "Error with %s" % prob data.append("%s,err,%.2f,-1,-1,-1" % (prob, res.runtime)) if 'old-' in planner: cmd("rm TMP_OUTPUT") data.sort() header = ['problem,status,runtime,quality,generated,expanded'] data = header + data write_file("%s/%s.csv" %( results_directory, dom), data)
'./ged-opt14-strips', './trucks-strips', './grid', './transport-opt14-strips', './satellite', './woodworking-opt11-strips' ]) domains = get_file_list('.', forbidden_list=['.py']) def gen_js(domain): toret = "\t\tDomain.forge({dom_name:'%s', description:'%s'}).save().then(function(model) {\n" % ( domain['name'], domain['description']) for (d, p) in domain['problems']: pname = p.split('/')[-1] domurl = "classical/%s" % d proburl = "classical/%s" % p toret += "\t\t\tProblem.forge({prob_name:'%s', domain:model.id, dom_url:'%s', prob_url:'%s'}).save();\n" % ( pname, domurl, proburl) toret += "\t\t});\n" return toret dbcode = '' for dom in domains: if dom not in done_domains: done_domains.add(dom) mod = importlib.import_module(dom[2:] + '.api') for d in mod.domains: dbcode += '\n' dbcode += gen_js(d) print "\ndone_domains = %s\n" % str(done_domains) write_file('out.js.py', dbcode)
def benchmark_domain(planner, dom): from krrt.utils import get_value, match_value, run_experiment, write_file print print "Benchmarking %s..." % dom if TYPE == OLD: domprob_args = ["--domain %s/%s/%s --problem %s/%s/%s" % (ipc,dom,domain,ipc,dom,problem) for (domain, problem) in benchmark[dom]] elif TYPE == NEW: domprob_args = ["%s/%s/%s %s/%s/%s o/dev/null" % (ipc,dom,domain,ipc,dom,problem) for (domain, problem) in benchmark[dom]] else: assert False, "What the deuce?" if os.path.exists(results_directory) is False: os.mkdir(results_directory) results = run_experiment(base_directory=".", base_command=planner, single_arguments={'domprob': domprob_args}, time_limit=timelimit, memory_limit=memorylimit, results_dir=results_directory, progress_file=None, processors=cores, sandbox=None) data = [] for resid in results.get_ids(): res = results[resid] if TYPE == OLD: prob = res.single_args['domprob'].split(' ')[-1].split('/')[-1] elif TYPE == NEW: prob = res.single_args['domprob'].split(' ')[-2].split('/')[-1] else: assert False, "What the deuce?" if 'old-' in planner: cmd("tail -26 %s > TMP_OUTPUT" % res.output_file) outfile = "TMP_OUTPUT" else: outfile = res.output_file os.system("cp %s %s"%(outfile, outfile+dom)) if res.timed_out: data.append("%s,time,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*std::bad_alloc.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*MemoryError.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*cannot allocate memory.*'): data.append("%s,mem,-1,-1,-1,-1" % prob) elif match_value("%s.err" % res.output_file, '.*Segmentation fault.*'): data.append("%s,seg,-1,-1,-1,-1" % prob) else: if match_value(outfile, '.*Effective Width during search*'): lines = open(outfile) quality = 0 generated = 0 expanded = 0 width = 0 for line in lines: if 'Plan found with cost:' in line: quality = int(line.split(':')[-1]) if 'Nodes generated during search:' in line: generated = int(line.split(':')[-1]) if 'Nodes expanded during search:' in line: expanded = int(line.split(':')[-1]) if 'Effective Width during search:' in line: width = int(line.split(':')[-1]) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, width)) elif match_value(outfile, '([0-9]+).* .*actions in the plan.'): quality = get_value(outfile, '([0-9]+).* .*actions in the plan.', int) generated = 0 expanded = 0 backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value(outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Plan found with cost: ([0-9]+).*'): quality = get_value(outfile, '.*Plan found with cost: ([0-9]+).*', int) generated = get_value(outfile, '.*Nodes generated during search: ([0-9]+).*', int) expanded = get_value(outfile, '.*Nodes expanded during search: ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value(outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Length : ([0-9]+).*'): quality = get_value(outfile, '.*Length : ([0-9]+).*', int) generated = get_value(outfile, '.*Expanded nodes : ([0-9]+).*', int) expanded = get_value(outfile, '.*Evaluated nodes : ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value(outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*Plan cost: ([0-9]+\.[0-9]+), steps.*'): quality = get_value(outfile, '.*Plan cost: ([0-9]+\.[0-9]+), steps.*', float) generated = get_value(outfile, '.*Generated: ([0-9]+).*', int) expanded = get_value(outfile, '.*Expanded: ([0-9]+).*', int) backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value(outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,ok,%f,%d,%d,%d,%d" % (prob, res.runtime, quality, generated, expanded, backtracks)) elif match_value(outfile, '.*NOT I-REACHABLE.*'): backtracks = 0 if match_value(outfile, '.*Backtracks during search: ([0-9]+).*'): backtracks = get_value(outfile, '.*Backtracks during search: ([0-9]+).*', int) data.append("%s,not-i,%f,-1,-1,-1,%d" % (prob, res.runtime, backtracks)) else: print "Error with %s" % prob data.append("%s,err,%f,-1,-1,-1" % (prob, res.runtime)) if 'old-' in planner: cmd("rm TMP_OUTPUT") data.sort() data = ['problem,status,runtime,quality,generated,expanded'] + data write_file("%s/%s.csv" %( results_directory, dom), data)