def compute_optimal_matching(fact_stores, partners, guards, match_steps): if len(partners) == 0: return match_steps curr_max_score = -100 curr_guard_steps = [] curr_guards_rest = [] curr_best_partner = None curr_propagated = None curr_index = -1 curr_free_terms = None for index in xrange(0, len(partners)): propagated, partner = partners[index] num_of_joins = 0 num_of_free = 0 num_of_const = 0 free_terms = [] for term in partner.get_terms(): if term.is_var(): if term.is_binded(): num_of_joins += 1 else: free_terms.append(term) num_of_free += 1 elif term.is_const(): num_of_const += 1 partner.exist_bind_terms() guard_steps, guards_rest = schedule_guards(guards) guard_count = len(guard_steps) new_max_score = num_of_joins * 10 + guard_count + num_of_const - num_of_free if curr_max_score <= new_max_score: curr_max_score = new_max_score curr_guard_steps = guard_steps curr_guards_rest = guards_rest curr_best_partner = partner curr_propagated = propagated curr_index = index curr_free_terms = free_terms for term in free_terms: term.unbind() lookup_info = fact_stores[curr_best_partner.sym_id].generate_lookup( curr_best_partner) best_match_step = { 'is_lookup': True, 'propagated': curr_propagated, 'lookup_index': lookup_info['lookup_index'], 'fact_pat': make_fact_pat(curr_best_partner), 'filter': lookup_info['filter'], 'free_terms': curr_free_terms } match_steps.append(best_match_step) for curr_guard_step in curr_guard_steps: match_steps.append(curr_guard_step) curr_best_partner.exist_bind_terms() return compute_optimal_matching( fact_stores, partners[:curr_index] + partners[(curr_index + 1):], curr_guards_rest, match_steps)
def compute_optimal_matching(fact_stores, partners, guards, match_steps): if len(partners) == 0: return match_steps curr_max_score = -100 curr_guard_steps = [] curr_guards_rest = [] curr_best_partner = None curr_propagated = None curr_index = -1 curr_free_terms = None for index in xrange(0,len(partners)): propagated,partner = partners[index] num_of_joins = 0 num_of_free = 0 num_of_const = 0 free_terms = [] for term in partner.get_terms(): if term.is_var(): if term.is_binded(): num_of_joins += 1 else: free_terms.append(term) num_of_free += 1 elif term.is_const(): num_of_const += 1 partner.exist_bind_terms() guard_steps,guards_rest = schedule_guards(guards) guard_count = len(guard_steps) new_max_score = num_of_joins*10 + guard_count + num_of_const - num_of_free if curr_max_score <= new_max_score: curr_max_score = new_max_score curr_guard_steps = guard_steps curr_guards_rest = guards_rest curr_best_partner = partner curr_propagated = propagated curr_index = index curr_free_terms = free_terms for term in free_terms: term.unbind() lookup_info = fact_stores[curr_best_partner.sym_id].generate_lookup(curr_best_partner) best_match_step = { 'is_lookup' : True , 'propagated' : curr_propagated , 'lookup_index' : lookup_info['lookup_index'] , 'fact_pat' : make_fact_pat(curr_best_partner) , 'filter' : lookup_info['filter'] , 'free_terms' : curr_free_terms } match_steps.append( best_match_step ) for curr_guard_step in curr_guard_steps: match_steps.append( curr_guard_step ) curr_best_partner.exist_bind_terms() return compute_optimal_matching(fact_stores, partners[:curr_index]+partners[(curr_index+1):], curr_guards_rest, match_steps)
def interpret_rule(rule, fact_stores): rule_entries = map(lambda s: (False, s), rule.simplify()) + map( lambda p: (True, p), rule.propagate()) rule_lhs = map(lambda t: t[1], rule_entries) guards = rule.guards() variables = rule.get_vars() if len(rule.simplify()) == 0: has_no_simplify = True else: has_no_simplify = False rule_name = get_all_rule_classes()[rule.rule_id].__name__ interp_rule = [] for i in xrange(0, len(rule_entries)): propagated, rule_entry = rule_entries[i] partners = rule_entries[:i] + rule_entries[(i + 1):] rule_entry.exist_bind_terms() early_guard_steps, guards_rest = schedule_guards(guards) match_steps = compute_optimal_matching(fact_stores, partners, guards_rest, early_guard_steps) for var in variables: var.unbind() interp_rule.append({ 'rule_id': rule.rule_id, 'occ_id': i, 'propagated': propagated, 'entry': make_fact_pat(rule_entry), 'match_steps': match_steps, 'has_no_simplify': has_no_simplify, 'rhs': rule.consequents, 'exist_locs': rule.get_exist_locs, 'has_exist_locs': len(rule.exist_locations) > 0 }) return interp_rule
def interpret_rule(rule, fact_stores): rule_entries = map(lambda s: (False,s),rule.simplify()) + map(lambda p: (True,p),rule.propagate()) rule_lhs = map(lambda t: t[1],rule_entries) guards = rule.guards() variables = rule.get_vars() if len(rule.simplify()) == 0: has_no_simplify = True else: has_no_simplify = False rule_name = get_all_rule_classes()[rule.rule_id].__name__ interp_rule = [] for i in xrange(0,len(rule_entries)): propagated,rule_entry = rule_entries[i] partners = rule_entries[:i] + rule_entries[(i+1):] rule_entry.exist_bind_terms() early_guard_steps,guards_rest = schedule_guards(guards) match_steps = compute_optimal_matching(fact_stores, partners, guards_rest, early_guard_steps) for var in variables: var.unbind() interp_rule.append( { 'rule_id' : rule.rule_id, 'occ_id' : i, 'propagated' : propagated, 'entry' : make_fact_pat(rule_entry), 'match_steps' : match_steps, 'has_no_simplify' : has_no_simplify, 'rhs' : rule.consequents, 'exist_locs' : rule.get_exist_locs, 'has_exist_locs' : len(rule.exist_locations) > 0 } ) return interp_rule