def __progress_model(model): """ update the model to a set of models by doing any possible action """ universe, assignment, default_value = model functions_sorts = context_operator.get_functions_sorts() actions = context_operator.get_actions() actions_sorts = [(fun, sorts[0:len(sorts) - 1]) for fun, sorts in functions_sorts.iteritems() if fun in actions] new_model_list = list() for action_name, sort_list in actions_sorts: paras_list = list( itertools.product(*[universe[sort] for sort in sort_list])) actions = [ "%s(%s)" % (action_name, ','.join(list(paras))) for paras in paras_list ] model_list = [ model_progress.progress(action, model) for action in actions if model_progress.poss(action, model) ] new_model_list.extend(model_list) return new_model_list
def __get_actions(universe, player): actions = context_operator.get_actions() action_sorts_list = [ (f, sort_list) for f, sort_list in context_operator.get_functions_sorts().iteritems() if f in actions] p_sort = [sort for sort, consts in context_operator.get_sort_symbols_dict().iteritems() if player in consts].pop() action_tuple_list = list() for a, sort_list in action_sorts_list: paras_list = itertools.product(*[ universe[s] if s!=p_sort else [player] for s in sort_list[0: len(sort_list)-1] ]) action_tuple_list.extend([(a,paras) for paras in paras_list ]) return action_tuple_list
def __generate_pi_action(player): """ generate a program denoting doing any possible action """ functions_sorts = context_operator.get_functions_sorts() sort_consts_dict = context_operator.get_sort_symbols_dict() p_sort = [ sort for sort, consts in sort_consts_dict.iteritems() if player in consts ].pop() actions_sorts_vars = [ (fun, sorts[0:len(sorts)-1] , \ [ context_operator.get_new_var() if s!=p_sort else player for s in sorts[0:len(sorts)-1]] ) \ for fun, sorts in functions_sorts.iteritems() if fun in context_operator.get_actions() ] action_list = [ "pi(%s)[%s(%s)]"%(','.join(["%s:%s"%(v,s) for (v,s) in zip(v_list,s_list) if s!=p_sort]) \ , action, ','.join(v_list)) for (action, s_list, v_list) in actions_sorts_vars] return '#'.join(action_list)
def generate_pi_action(): functions_sorts = context_operator.get_functions_sorts() actions_sorts = [ (fun, sorts) for fun, sorts in functions_sorts.iteritems() if fun in context_operator.get_actions() ] action_vars_sorts = [ (action, __get_vars(sorts[0:len(sorts)-1])) for action, sorts in actions_sorts] actions = "#".join([ "%s(%s)" % (action ,",".join(zip(*elem)[0])) for action, elem in action_vars_sorts]) #print "---------",actions vars_sorts = ','.join([elem[0] + ":" + elem[1] for action, elem_list in action_vars_sorts for elem in elem_list]) return "pi(" + vars_sorts + ")[" + actions +"]"
def parser(filename): with open('./input/%s'%filename,"read") as sc_file, open('./input/default_axioms.sc',"read") as basic_file,\ open('./temp/game_rule_info','write') as sc_temp: full_txt = " ".join(sc_file.readlines()).replace("\n", " ").replace( "\t", " ") full_txt += " ".join(basic_file.readlines()).replace( "\n", " ").replace("\t", " ") + ";" full_txt = full_txt.replace(' and ', '&').replace(' or ', '|').replace(' ', "") #logger.debug(full_txt) rule_list = pattern.rule3.findall(full_txt) + pattern.rule4.findall( full_txt) + pattern.rule5.findall(full_txt) sc_temp.writelines('\n'.join([str(elem) for elem in rule_list])) rule_list = __pre_parse(rule_list) sc_temp.writelines('\n') sc_temp.writelines('\n\n') sc_temp.writelines('\n'.join([str(elem) for elem in rule_list])) for k, g in groupby(sorted(rule_list, key=itemgetter(0)), key=itemgetter(0)): m_group = list(g) #print "-------",k, m_group eval("apply(__parse_" + k + "," + str(m_group) + ")") predicates = __generate_predicates( context_operator.get_fluents(), context_operator.get_symbol_sorts_dict().keys()) fun_fluents = [ fluent for fluent in context_operator.get_fluents() if fluent not in predicates ] #print context_operator.get_feature() sc_temp.writelines('\n\n') sc_temp.writelines('feature pattern for regression:\n') sc_temp.writelines('\n'.join( [str(elem) for elem in context_operator.get_feature()])) #exit(0) sc_temp.writelines('\n') sc_temp.writelines('\n actions:' + str(context_operator.get_actions())) sc_temp.writelines('\n fluents:' + str(context_operator.get_fluents())) sc_temp.writelines('\n 0arity-fluents:' + str(context_operator.get_zero_fluents())) sc_temp.writelines('\n predicates:' + str(predicates)) sc_temp.writelines('\n functional fluents:' + str(fun_fluents)) #logger.debug("\n actions :%s \n fluents %s"%(context_operator.get_actions(), context_operator.get_fluents())) #print sort_system.get_function_sort('numStone') #print context_operator.get_sort_symbols_dict() sort_const = __get_sort_const_with_fluents( context_operator.get_sort_symbols_dict(), context_operator.get_fluents()) sort_const["Bool"] = ['True', 'False'] sort_funs = __get_funs_sorts(context_operator.get_fluents() + context_operator.get_actions()) sort_preds = __generate_predicate_sorts(predicates, sort_funs) sc_temp.writelines('\n') sc_temp.writelines('\n sort for constants:' + str(sort_const)) sc_temp.writelines('\n sort for functions:' + str(sort_funs)) sc_temp.writelines('\n sort for predicates:' + str(sort_preds)) context_operator.set_sort_symbols_dict(sort_const) context_operator.set_functions_sorts(sort_funs) context_operator.add_predicates(predicates) context_operator.add_predicate_sorts(sort_preds) context_operator.add_nregx_function_patterns( __generate_nregx_function_patterns(fun_fluents)) #print z3_header context_operator.set_function_regress_lambda( __generate_fun_regress_lambda(fun_fluents, sort_funs)) context_operator.set_predicate_regress_lambda( __generate_pred_regress_lambda(predicates, sort_preds)) #exit(0) domain_name = filename.split('.')[0] __load_state_constaints(domain_name)