def test_hill_climbing(): initial = 0 goal = 10 limits = (-goal, goal) p = AnnotatedProblem( EasyProblem(initial, initial_cost=initial, extra=limits)) sol = next(hill_climbing(p, graph=False)) assert abs(sol.state_node.state - limits[0]) <= 0.1 p2 = HillProblem(initial, goal=goal, initial_cost=initial, extra=limits) sol = next(hill_climbing(p2)) assert abs(sol.state_node.state - goal) <= 0.1 p3 = AnnotatedProblem( HillProblem(initial, goal=initial, initial_cost=initial, extra=limits)) sol = next(hill_climbing(p3)) assert p3.nodes_expanded == 0 assert p3.goal_tests == 1 assert abs(sol.state_node.state - initial) <= 0.1 p4 = AnnotatedProblem(PlateauProblem(initial)) sol = next(hill_climbing(p4, random_restarts=3)) p5 = AnnotatedProblem(PlateauProblemWithGoal(initial)) sols = list(hill_climbing(p5, random_restarts=3))
def test_hill_climbing(): initial = 0 goal = 10 limits = (-goal, goal) p = AnnotatedProblem(EasyProblem(initial, initial_cost=initial, extra=limits)) sol = next(hill_climbing(p, graph_search=False)) assert abs(sol.state - limits[0]) <= 0.1
def flat_match(target, base, initial_mapping=None): """ Given a base (usually concept) and target (instance or concept av table) this function returns a mapping that can be used to rename components in the target. Search is used to find a mapping that maximizes the expected number of correct guesses in the concept after incorporating the instance. The current approach is to refine the initially provided mapping using a local hill-climbing search. If no initial mapping is provided then one is generated using the Munkres / Hungarian matching on object-to-object assignment (no relations). This initialization approach is polynomial in the size of the base. :param target: An instance or concept.av_counts object to be mapped to the base concept. :type target: :ref:`Instance<instance-rep>` or av_counts obj from concept :param base: A concept to map the target to :type base: TrestleNode :param initial_mapping: An initial mapping to seed the local search :type initial_mapping: A mapping dict :return: a mapping for renaming components in the instance. :rtype: dict """ inames = frozenset(get_component_names(target)) cnames = frozenset(get_component_names(base.av_counts)) if (len(inames) == 0 or len(cnames) == 0): return {} if len(inames.intersection(cnames)) > 0: raise Exception("Objects in target and base must not collide. " "Consider running NameStandardizer first.") # TODO consider flipping target and base when one is larger than the other. if initial_mapping is None: initial_mapping = hungarian_mapping(inames, cnames, target, base) else: initial_mapping = frozenset([(a, v) for a, v in initial_mapping if a in inames and v in cnames]) unmapped = cnames - frozenset(dict(initial_mapping).values()) # print("MATCHING", initial_mapping, target, base) initial_cost = mapping_cost(initial_mapping, target, base) op_problem = StructureMappingOptimizationProblem( (initial_mapping, unmapped), initial_cost=initial_cost, extra=(target, base)) solution = next(hill_climbing(op_problem)) return dict(solution.state_node.state[0])
def flat_match(target, base, initial_mapping=None): """ Given a base (usually concept) and target (instance or concept av table) this function returns a mapping that can be used to rename components in the target. Search is used to find a mapping that maximizes the expected number of correct guesses in the concept after incorporating the instance. The current approach is to refine the initially provided mapping using a local hill-climbing search. If no initial mapping is provided then one is generated using the Munkres / Hungarian matching on object-to-object assignment (no relations). This initialization approach is polynomial in the size of the base. :param target: An instance or concept.av_counts object to be mapped to the base concept. :type target: :ref:`Instance<instance-rep>` or av_counts obj from concept :param base: A concept to map the target to :type base: TrestleNode :param initial_mapping: An initial mapping to seed the local search :type initial_mapping: A mapping dict :return: a mapping for renaming components in the instance. :rtype: dict """ inames = frozenset(get_component_names(target)) cnames = frozenset(get_component_names(base.av_counts)) if(len(inames) == 0 or len(cnames) == 0): return {} if len(inames.intersection(cnames)) > 0: raise Exception("Objects in target and base must not collide. " "Consider running NameStandardizer first.") # TODO consider flipping target and base when one is larger than the other. if initial_mapping is None: initial_mapping = hungarian_mapping(inames, cnames, target, base) else: initial_mapping = frozenset([(a, v) for a, v in initial_mapping if a in inames and v in cnames]) unmapped = cnames - frozenset(dict(initial_mapping).values()) # print("MATCHING", initial_mapping, target, base) initial_cost = mapping_cost(initial_mapping, target, base) op_problem = StructureMappingOptProblem((initial_mapping, unmapped), initial_cost=initial_cost, extra=(target, base)) solution = next(hill_climbing(op_problem)) return dict(solution.state_node.state[0])
def steepest_hill(problem): return hill_climbing(problem)
def hill(problem): return hill_climbing(problem)
def steepest_hill(problem): return hill_climbing(problem, cost_limit=0)
def local_generalization_search(h, x): """ This tries to find a maximal partial match between h and x. To do it it antiunifies each of the terms in h and x, then uses the hungarian algorithm to compute the best bipartite match between the literals in h and x. This constitutes an initial match. Then hill climbing is performed over the space of all possible flips of the bipartite matches. The hungarian algorithm doesn't consider the benefit of creating antiunifications that share variables. The local search is an effort to overcome this limitation. Note, this is not guranteed to return a maximal partial match, but it is probably good enough. """ ### CODE FOR AD HOC HANDLING OF NEGATIONS # x = set(l for l in x) # # Need to compute any implicit h negations # neg_relations = {} # for l in h: # if l[0] == 'not': # key = "%s/%i" % (l[1][0], len(l[1][1:])) # if key not in neg_relations: # neg_relations[key] = {} # for i, v in enumerate(l[1][1:]): # if i not in neg_relations[key]: # neg_relations[key][i] = set() # if not is_variable(v): # neg_relations[key][i].add(v) # for l in x: # key = "%s/%i" % (l[0], len(l[1:])) # if key not in neg_relations: # continue # for i, v in enumerate(l[1:]): # if i not in neg_relations[key]: # neg_relations[key][i] = set() # neg_relations[key][i].add(v) # print(neg_relations) # # get neg relations # # compute domain of each arg of neg relation # # add all possible negs given these domains # for key in neg_relations: # head, arity = key.split("/") # arity = int(arity) # args = [neg_relations[key][i] for i in range(arity)] # for t in product(*args): # new_lit = (head,) + t # if new_lit not in x: # x.add(('not', new_lit)) # # print("NEG AUGMENTED X", x) au_table = build_antiunify_table(h, x) # Below is a guided search for a single specialization. It doesn't produce # the true antiunification, just a single possible specialization. if len(x) < len(h): temp = h h = x x = temp h = tuple(e for e in h) x = tuple(e for e in x) # m = tuple(i for i in range(len(h))) m = hungarian_mapping(h, x, au_table) r = evaluate_reward(m, h, x, au_table) const_count, var_counts = get_counts(m, h, x, au_table) # print(h) # print(x) # print("REWARD", r) problem = LocalAntiUnifyProblem( m, initial_cost=-r, extra=(h, x, [len(h) + i for i in range(len(x) - len(h))], au_table, const_count, var_counts)) sol = next(hill_climbing(problem)) # print("FINAL SOLUTION", sol.state, sol.cost()) new_h = [] for i, a in enumerate(h): # print(a, 'with', x[m[i]]) new_l = au_table[frozenset([a, x[sol.state[i]]])] if not isinstance(new_l, frozenset): new_h.append(new_l) # print(antiunify_reward(new_h)) # print(variablize_hypothesis(new_h)) return variablize_hypothesis(new_h)