Example #1
0
 def successors(self, node):
     index = build_index(node.state)
     for o in self.operators:
         # TODO check that operators cannot have unbound variables in
         # effects.
         for m in pattern_match(o.conditions, index):
             dels = frozenset(
                 execute_functions(e, m)
                 if is_functional_term(e) else subst(m, e)
                 for e in o.del_effects)
             adds = frozenset(
                 execute_functions(e, m)
                 if is_functional_term(e) else subst(m, e)
                 for e in o.add_effects)
             new_state = node.state.difference(dels).union(adds)
             yield Node(new_state, node, (o, m), node.cost() + o.cost)
Example #2
0
    def swap_unnamed(self, o1, o2, mapping, unmapped_cnames, target, base,
                     node):
        """
        Returns the child node generated from assigning an unmapped component
        object to one of the instance objects.
        """
        new_mapping = {a: mapping[a] for a in mapping}
        new_unmapped_cnames = set(unmapped_cnames)
        new_unmapped_cnames.remove(o2)
        if mapping[o1] != o1:
            new_unmapped_cnames.add(new_mapping[o1])
        new_mapping[o1] = o2
        new_mapping = frozenset(new_mapping.items())

        return Node((new_mapping,
                    frozenset(new_unmapped_cnames)), extra=node.extra)
Example #3
0
    def successors(self, node):
        """
        Successor nodes are possible next pattern elements that can be unified.
        """
        sub = dict(node.state)
        terms, f_terms, index = node.extra

        # Figure out best term to match (only need to choose 1 and don't need
        # to backtrack over choice).
        p_terms = [(len(index[subst(sub, t)]) if t in index else 0,
                    len(necessary), random(), t) for necessary in terms
                   if meets_requirements(necessary, sub) for t in
                   terms[necessary] if not is_negated_term(t)]
        if len(p_terms) == 0:
            return

        p_terms.sort()
        term = p_terms[0][3]

        # TODO need to figure out how to handle positiver terms with functional

        # Pretty sure this is ok AND faster.
        key = index_key(subst(sub, term))
        # key = index_key(term)
        if key not in index:
            return

        facts = [f for f in index[key]]

        # could do something here where I pick the fact that yields
        # substitutions that are the LEAST constraining.
        # I'm not sure if it is worth the time though.
        shuffle(facts)

        for fact in facts:
            # TODO what do we do when the term contains a functional?
            new_sub = unify(term, fact, sub)
            if new_sub is None:
                continue

            new_terms = update_terms(terms, f_terms, new_sub, index)
            if new_terms is None:
                continue

            yield Node(frozenset(new_sub.items()), node, None, 0,
                       (new_terms, f_terms, index))
Example #4
0
    def successors(self, node):
        """
        Generate all permutations of rows.
        """
        for r1 in range(len(node.state.state)):
            c1 = node.state.state[r1]

            for r2 in range(r1 + 1, len(node.state.state)):
                c2 = node.state.state[r2]

                new_state = node.state.copy()
                ns = [i for i in node.state.state]
                ns[r1] = c2
                ns[r2] = c1
                new_state.state = tuple(ns)
                cost = new_state.num_conflicts()
                yield Node(new_state, node, ('swap', (r1, c1), (r2, c2)), cost)
Example #5
0
 def random_successor(self, node):
     clause_vector = node.state
     possible_literals, flip_weights, constraints, pset, nset = node.extra
     index = weighted_choice(flip_weights)
     new_j = choice([
         j for j in range(len(possible_literals[index]))
         if j != clause_vector[index]
     ])
     new_clause_vector = tuple(new_j if i == index else j
                               for i, j in enumerate(clause_vector))
     score = clause_vector_score(new_clause_vector, possible_literals,
                                 constraints, pset, nset)
     return Node(new_clause_vector,
                 None,
                 None,
                 -1 * score,
                 extra=node.extra)
Example #6
0
    def swap_two(self, o1, o2, mapping, unmapped_cnames, target, base, node):
        """
        returns the child node generated from swapping two mappings.
        """
        new_mapping = {a: mapping[a] for a in mapping}

        if mapping[o2] == o2:
            new_mapping[o1] = o1
        else:
            new_mapping[o1] = mapping[o2]

        if mapping[o1] == o1:
            new_mapping[o2] = o2
        else:
            new_mapping[o2] = mapping[o1]

        new_mapping = frozenset(new_mapping.items())
        return Node((new_mapping, unmapped_cnames), extra=node.extra)
Example #7
0
    def successors(self, node):
        h = node.state
        old_h_literals, e_literals, au_table, prev_vars = node.extra

        for ohl in old_h_literals:
            for el in e_literals:
                new_l = au_table[frozenset([ohl, el])]
                if isinstance(new_l, frozenset):
                    continue

                ele_count = self.count_elements(new_l, prev_vars)
                # au_cost = self.antiunify_cost(new_l, prev_vars)
                new_vars = self.get_vars(new_l)

                yield Node(h.union([new_l]), node, ('antiunify', ohl, el),
                           node.cost() - ele_count, (old_h_literals.difference(
                               [ohl]), e_literals.difference(
                                   [el]), au_table, prev_vars.union(new_vars)))
Example #8
0
    def successors(self, node):
        """
        An iterator that yields the sucessors of the provided node.
        """
        state = node.state
        costs, unassigned = node.extra

        for i, v in enumerate(state):
            if v is None:
                for u in unassigned:
                    new_state = tuple(
                        [u if i == j else k for j, k in enumerate(state)])
                    new_unassigned = tuple([k for k in unassigned if k != u])

                    c = node.cost() + costs[i][u]
                    yield Node(new_state,
                               node, (i, u),
                               c,
                               extra=(costs, new_unassigned))
Example #9
0
    def random_successor(self, node):
        """
        Generate all permutations of rows.
        """
        rows = [i for i in range(node.state.n)]
        shuffle(rows)

        r1 = rows[0]
        c1 = node.state.state[r1]
        r2 = rows[1]
        c2 = node.state.state[r2]

        new_state = node.state.copy()
        ns = [i for i in node.state.state]
        ns[r1] = c2
        ns[r2] = c1
        new_state.state = tuple(ns)
        cost = new_state.num_conflicts()
        return Node(new_state, node, ('swap', (r1, c1), (r2, c2)), cost)
Example #10
0
    def successors(self, node):
        """
        Generates successor states by flipping each pair of row to column
        assignments.
        """
        p = node.state
        V, E = node.extra
        not_p = V - p

        for pV in p:
            for not_pV in not_p:
                new_p = set(p)
                new_p.remove(pV)
                new_p.add(not_pV)

                yield Node(frozenset(new_p),
                           node,
                           node_cost=cutsize(E, p),
                           extra=node.extra)
Example #11
0
    def successors(self, node):
        clause_vector = node.state
        possible_literals, flip_weights, constraints, pset, nset = node.extra

        for index in possible_literals:
            for new_j in range(len(possible_literals[index])):
                if new_j == clause_vector[index]:
                    continue

                new_clause_vector = tuple(new_j if i == index else j
                                          for i, j in enumerate(clause_vector))
                score = clause_vector_score(new_clause_vector,
                                            possible_literals, constraints,
                                            pset, nset)
                yield Node(new_clause_vector,
                           None,
                           None,
                           -1 * score,
                           extra=node.extra)
Example #12
0
    def successors(self, node):
        """
        Generates successor states by flipping each pair of row to column
        assignments.
        """
        costs = node.extra[0]

        for p in combinations(node.state, 2):
            new_cost = node.cost()
            new_cost -= costs[p[0]][node.state[p[0]]]
            new_cost -= costs[p[1]][node.state[p[1]]]
            new_cost += costs[p[0]][node.state[p[1]]]
            new_cost += costs[p[1]][node.state[p[0]]]

            state = list(node.state)
            temp = state[p[0]]
            state[p[0]] = state[p[1]]
            state[p[1]] = temp

            yield Node(tuple(state), node, p, new_cost, extra=node.extra)
Example #13
0
    def successors(self, node):
        """
        Successor nodes are possible next pattern elements that can be unified.
        """
        sub = dict(node.state)
        terms, f_terms, index = node.extra

        # Figure out best term to match (only need to choose 1 and don't need
        # to backtrack over choice).

        for term in [
                t for necessary in terms if meets_requirements(necessary, sub)
                for t in terms[necessary] if not is_negated_term(t)
        ]:

            # Pretty sure this is ok AND faster.
            key = index_key(subst(sub, term))
            # key = index_key(term)
            if key not in index:
                return

            facts = [f for f in index[key]]
            # shuffle(facts)

            for fact in facts:
                new_sub = unify(term, fact, sub)
                if new_sub is None:
                    continue

                new_terms = update_terms(terms,
                                         f_terms,
                                         new_sub,
                                         index,
                                         partial=True)
                if new_terms is None:
                    continue

                yield Node(frozenset(new_sub.items()), node, None, 0,
                           (new_terms, f_terms, index))
Example #14
0
    def random_successor(self, node):
        """
        A function that returns a random successor of the current node. This is
        used by the simulated annealing function, so it doesn't have to expand
        all successors.

        A successor is generated by randomly flipping a pair of row to column
        assignments.
        """
        p = set(node.state)
        V, E = node.extra
        not_p = V - p

        pV = choice(list(p))
        not_pV = choice(list(not_p))

        p.remove(pV)
        p.add(not_pV)

        return Node(frozenset(p),
                    node,
                    node_cost=cutsize(E, p),
                    extra=node.extra)
    def gen_generalizations(self, node):
        h = node.state
        (constraints, c_length, p_covered, p_uncovered, n_covered, n_uncovered,
         gensym) = node.extra

        # remove literals
        for literal in h:
            removable = True
            for ele in literal[1:]:
                if not is_variable(ele):
                    removable = False
                    break
                if (count_occurances(ele, h) > 1):
                    removable = False
                    break

            if removable:
                new_h = frozenset(x for x in h if x != literal)
                new_pc_subset, new_nc_subset = test_coverage(
                    new_h, constraints, p_uncovered, n_uncovered)
                new_p_covered = p_covered + new_pc_subset
                new_n_covered = n_covered + new_nc_subset
                new_p_uncovered = [
                    p for p in p_uncovered if p not in new_pc_subset
                ]
                new_n_uncovered = [
                    n for n in n_uncovered if n not in new_nc_subset
                ]
                new_c_length = c_length - 1
                score = self.score(len(new_p_covered), len(new_p_uncovered),
                                   len(new_n_covered), len(new_n_uncovered),
                                   new_c_length)

                yield Node(new_h,
                           None,
                           None,
                           -1 * score,
                           extra=(constraints, new_c_length, new_p_covered,
                                  new_p_uncovered, new_n_covered,
                                  new_n_uncovered, gensym))

        # replace constants with variables.
        for literal in h:
            for new_l in get_variablizations(literal, gensym):
                new_h = frozenset([x if x != literal else new_l for x in h])
                new_pc_subset, new_nc_subset = test_coverage(
                    new_h, constraints, p_uncovered, n_uncovered)
                new_p_covered = p_covered + new_pc_subset
                new_n_covered = n_covered + new_nc_subset
                new_p_uncovered = [
                    p for p in p_uncovered if p not in new_pc_subset
                ]
                new_n_uncovered = [
                    n for n in n_uncovered if n not in new_nc_subset
                ]
                new_c_length = c_length - 1
                score = self.score(len(new_p_covered), len(new_p_uncovered),
                                   len(new_n_covered), len(new_n_uncovered),
                                   new_c_length)

                yield Node(new_h,
                           None,
                           None,
                           -1 * score,
                           extra=(constraints, new_c_length, new_p_covered,
                                  new_p_uncovered, new_n_covered,
                                  new_n_uncovered, gensym))
    def random_successor(self, node):
        clause_vector = node.state
        # print("EXPANDING", clause_vector, node.cost())

        (possible_literals, flip_weights, constraints, pset, nset, omissions,
         comissions) = node.extra

        # omissions += 1.5
        # comissions += 1.5

        # print()
        # print("OMISSIONS", omissions)
        # print("COMISSIONS", comissions)

        # if omissions > comissions:
        #     print("LEN 1", 1 / ((comissions / omissions) *
        #                   (max_literal_length
        #                                                     - 1 + 0.1)))
        #     print("LEN 3", 1 / ((comissions / omissions) *
        #                   (max_literal_length               - 3 + 0.1)))

        # if comissions > omissions:
        #     print("LEN 1", 1 / ((comissions / omissions) * (1 + 0.1)))
        #     print("LEN 3", 1 / ((comissions / omissions) * (3 + 0.1)))

        # gen_bias = (omissions) / (omissions + comissions)
        # gen_bias = (omissions - comissions) / max(omissions, comissions)
        # print("Gen Bias", omissions / (omissions + comissions))

        flip_weights = [
            (-1 * (2 + comissions) *
             count_elements(possible_literals[i][clause_vector[i]], {}),
             i) if comissions > 0 else
            ((2 + omissions) *
             count_elements(possible_literals[i][clause_vector[i]], {}),
             i)  # if omissions > comissions else
            # (0.1 + (omissions / comissions) * (max_literal_length -
            #  count_elements(possible_literals[i][clause_vector[i]],
            #                 {})), i) if omissions > comissions else
            #(1, i)
            for i in range(len(clause_vector))
        ]

        smallest = min([w for w, i in flip_weights])
        flip_weights = [(0.001 + (w - smallest), i) for w, i in flip_weights]

        # output = [(w, possible_literals[i][clause_vector[i]]) for w, i in flip_weights]
        # pprint(output)

        #flip_weights = [(1, i) for i in range(len(clause_vector))]

        index = weighted_choice(flip_weights)

        # curr_l_size = count_elements(possible_literals[index][clause_vector[index]], {})
        # print("CURR L", possible_literals[index][clause_vector[index]])

        # print("CURRENT SIZE", curr_l_size)

        # print("GEN BIAS", gen_bias)

        # # print(possible_literals[index])
        # weighted_pl = [(gen_bias * (curr_l_size - count_elements(l, {})), j)
        #                for j, l in enumerate(possible_literals[index]) if j !=
        #                clause_vector[index]]
        # min_weight = min([w for w, _ in weighted_pl])
        # if min_weight < 0:
        #     weighted_pl = [(w + abs(min_weight) + 1, j) for w, j in weighted_pl]
        # # weighted_pl.sort()

        # # print("WPL", weighted_pl)

        # new_j = weighted_choice(weighted_pl)

        # print("NEW L", possible_literals[index][new_j])
        # # print(new_j)

        new_j = choice([
            j for j in range(len(possible_literals[index]))
            if j != clause_vector[index]
        ])

        new_clause_vector = tuple(new_j if i == index else j
                                  for i, j in enumerate(clause_vector))
        # print("SCORING")
        score, om, cm = clause_vector_score(new_clause_vector,
                                            possible_literals, constraints,
                                            pset, nset)
        # print("Done - Score =", score)
        print("Score = %0.4f, Omissions = %i, Comissions = %i" %
              (score, om, cm))
        return Node(new_clause_vector,
                    None,
                    None,
                    -1 * score,
                    extra=(possible_literals, flip_weights, constraints, pset,
                           nset, om, cm))
Example #17
0
 def successors(self, node):
     yield Node(node.state, node, 'expand', node.cost() + 1)
     yield Node(node.state, node, 'expand', node.cost() + 1)
Example #18
0
 def random_successor(self, node):
     v = -11
     while v <= node.extra[0] or v >= node.extra[1]:
         v = node.cost() + normalvariate(0, 1)
     return Node(v, node, 'expand', v, extra=node.extra)
Example #19
0
    def successors(self, node):
        if node.state < 15:
            yield Node(node.state + 1, node, 'expand', node.cost() + 1)

        if node.state > -15:
            yield Node(node.state - 1, node, 'expand', node.cost() + 1)
    def gen_specializations(self, node):
        h = node.state
        (constraints, c_length, p_covered, p_uncovered, n_covered, n_uncovered,
         gensym) = node.extra

        if len(p_covered) == 0:
            return

        p, pm = choice(p_covered)
        p_index = build_index(p)

        operator = Operator(tuple(('Rule', )), h.union(constraints), [])

        found = False
        for m in operator.match(p_index, initial_mapping=pm):
            reverse_m = {m[a]: a for a in m}
            pos_partial = set([rename(reverse_m, x) for x in p])
            found = True
            break

        if not found:
            return

        # specialize current variables using pset?
        for var in m:
            limited_m = {var: m[var]}
            new_h = frozenset([subst(limited_m, l) for l in h])

            new_p_subset, new_n_subset = test_coverage(new_h, constraints,
                                                       p_covered, n_covered)
            new_p_covered = new_p_subset
            new_p_uncovered = p_uncovered + [
                p for p in p_covered if p not in new_p_subset
            ]
            new_n_covered = new_n_subset
            new_n_uncovered = n_uncovered + [
                n for n in n_covered if n not in new_n_subset
            ]
            new_c_length = c_length + 1
            score = self.score(len(new_p_covered), len(new_p_uncovered),
                               len(new_n_covered), len(new_n_uncovered),
                               new_c_length)

            yield Node(new_h,
                       None,
                       None,
                       -1 * score,
                       extra=(constraints, new_c_length, new_p_covered,
                              new_p_uncovered, new_n_covered, new_n_uncovered,
                              gensym))

        # add new literals from pset
        for l in pos_partial:
            if l not in h:
                l = generate_literal(l[0], len(l) - 1, gensym)
                # l = generalize_literal(l, gensym)

                new_h = h.union([l])

                new_p_subset, new_n_subset = test_coverage(
                    new_h, constraints, p_covered, n_covered)
                new_p_covered = new_p_subset
                new_p_uncovered = p_uncovered + [
                    p for p in p_covered if p not in new_p_subset
                ]
                new_n_covered = new_n_subset
                new_n_uncovered = n_uncovered + [
                    n for n in n_covered if n not in new_n_subset
                ]
                new_c_length = c_length + 1
                score = self.score(len(new_p_covered), len(new_p_uncovered),
                                   len(new_n_covered), len(new_n_uncovered),
                                   new_c_length)

                yield Node(new_h,
                           None,
                           None,
                           -1 * score,
                           extra=(constraints, new_c_length, new_p_covered,
                                  new_p_uncovered, new_n_covered,
                                  new_n_uncovered, gensym))
Example #21
0
    def successors(self, node):
        h = node.state
        # print("EXPANDING H", h)
        args, constraints, pset, neg, neg_mapping, gensym = node.extra

        all_args = set(s for x in h.union(constraints)
                       for s in extract_strings(x) if is_variable(s))

        if len(pset) == 0:
            return

        p, pm = choice(pset)
        p_index = build_index(p)

        operator = Operator(tuple(('Rule', ) + tuple(all_args)),
                            h.union(constraints), [])

        # operator = Operator(tuple(('Rule',) + args), h, [])

        found = False
        for m in operator.match(p_index, initial_mapping=pm):
            reverse_m = {m[a]: a for a in m}
            pos_partial = set([rename(reverse_m, x) for x in p])
            found = True
            break

        if not found:
            return

        n_index = build_index(neg)
        found = False
        for nm in operator.match(n_index, initial_mapping=neg_mapping):
            # print(nm)
            reverse_nm = {nm[a]: a for a in nm}
            neg_partial = set([rename(reverse_nm, x) for x in neg])
            found = True
            break

        if not found:
            return

        unique_pos = pos_partial - neg_partial
        unique_neg = neg_partial - pos_partial

        # print("UNIQUE POS", unique_pos)
        # print("UNIQUE NEG", unique_neg)

        # Yield all minimum specializations of current vars
        for a in m:
            # TODO make sure m[a] is a minimum specialization
            sub_m = {a: m[a]}
            new_h = frozenset([subst(sub_m, ele) for ele in h])
            # print("SPECIALIZATION", new_h, sub_m)
            # print()
            yield Node(new_h, node, ('specializing', (a, m[a])),
                       node.cost() + 1, node.extra)

        # Add Negations for all neg specializations
        # for a in nm:
        #     sub_nm = {a: nm[a]}
        #     new_nh = set()
        #     for ele in h:
        #         new = subst(sub_nm, ele)
        #         if new != ele and new not in h:
        #             new_nh.add(('not', new))
        #     new_h = h.union(new_nh)
        #     print("NEGATION SPECIALIZATION", new_nh)
        #     yield Node(new_h, node, ('negation specialization', (a, nm[a])),
        #                node.cost()+1, node.extra)

        # if current vars then add all relations that include current vars
        if len(all_args) > 0:
            added = set()
            for literal in unique_pos:
                if literal in h or literal in constraints:
                    continue
                args = set(s for s in extract_strings(literal)
                           if is_variable(s))
                if len(args.intersection(all_args)) > 0:
                    key = (literal[0], ) + tuple(
                        ele if is_variable(ele) else '?'
                        for ele in literal[1:])
                    if key in added:
                        continue
                    added.add(key)

                    literal = generalize_literal(literal, gensym)
                    new_h = h.union(frozenset([literal]))
                    # print("ADD CURRENT", new_h)
                    # print()
                    yield Node(new_h, node, ('adding current', literal),
                               node.cost() + 1, node.extra)

        else:
            added = set()
            for literal in unique_pos:
                if literal in h or literal in constraints:
                    continue
                if literal[0] in added:
                    continue
                added.add(literal[0])
                literal = generalize_literal(literal, gensym)
                new_h = h.union(frozenset([literal]))
                # print("ADD NEW", new_h)
                # print()
                yield Node(new_h, node, ('adding', literal),
                           node.cost() + 1, node.extra)
Example #22
0
 def random_node(self):
     nq_state = self.initial.state.copy()
     nq_state.randomize()
     cost = nq_state.num_conflicts()
     return Node(nq_state, None, None, cost)
Example #23
0
 def random_successor(self, node):
     v = node.state + normalvariate(0, 1)
     return Node(v, node, 'expand', extra=node.extra)
Example #24
0
 def random_node(self):
     v = choice(list(i for i in range(11)))
     v = choice([v, 12])
     print("RANDOM NODE %s" % v)
     return Node(v)
Example #25
0
 def successors(self, node):
     for i in range(100):
         v = node.cost() + normalvariate(0, 1)
         if v < node.extra[1] and v > node.extra[0]:
             yield Node(v, node, 'expand', v, extra=node.extra)
Example #26
0
 def successors(self, node):
     for i in range(100):
         v = node.state + normalvariate(0, 1)
         yield Node(v, node, 'expand', extra=node.extra)
Example #27
0
    def successors(self, node):
        m = node.state
        h, example, unassigned, au_table, const_count, var_counts = node.extra

        # print(m)
        # print(h)
        # print(example)

        for a, b in combinations(range(len(h)), 2):
            new_var_counts = {a: var_counts[a] for a in var_counts}
            new_m = tuple(m[b] if i == a else m[a] if i == b else v
                          for i, v in enumerate(m))
            old_a = au_table[frozenset([h[a], example[m[a]]])]
            old_b = au_table[frozenset([h[b], example[m[b]]])]
            new_a = au_table[frozenset([h[a], example[new_m[a]]])]
            new_b = au_table[frozenset([h[b], example[new_m[b]]])]

            var_old_a = {}
            if isinstance(old_a, frozenset):
                c_old_a = 0
            else:
                c_old_a = count_term(old_a, var_old_a)
            var_new_a = {}
            if isinstance(new_a, frozenset):
                c_new_a = 0
            else:
                c_new_a = count_term(new_a, var_new_a)

            var_old_b = {}
            if isinstance(old_b, frozenset):
                c_old_b = 0
            else:
                c_old_b = count_term(old_b, var_old_b)
            var_new_b = {}
            if isinstance(new_b, frozenset):
                c_new_b = 0
            else:
                c_new_b = count_term(new_b, var_new_b)

            # print()
            # print()
            # print(node.cost())
            # print(new_var_counts)
            # print(old_a, '->', new_a)
            # print(c_old_a, var_old_a)
            # print(c_new_a, var_new_a)
            # print()
            # print(old_b, '->', new_b)
            # print(c_old_b, var_old_b)
            # print(c_new_b, var_new_b)

            new_reward = -1 * node.cost()
            new_reward -= c_old_a
            new_reward -= c_old_b
            new_reward += c_new_a
            new_reward += c_new_b

            for var in var_old_a:
                if var in new_var_counts and new_var_counts[var] > 1:
                    new_reward -= 1
                    new_var_counts[var] -= 1
                if var in new_var_counts and new_var_counts[var] == 1:
                    del new_var_counts[var]

            for var in var_old_b:
                if var in new_var_counts and new_var_counts[var] > 1:
                    new_reward -= 1
                    new_var_counts[var] -= 1
                if var in new_var_counts and new_var_counts[var] == 1:
                    del new_var_counts[var]

            for var in var_new_a:
                if var in new_var_counts:
                    new_reward += 1
                    new_var_counts[var] += 1
                else:
                    new_var_counts[var] = 1

            for var in var_new_b:
                if var in new_var_counts:
                    new_reward += 1
                    new_var_counts[var] += 1
                else:
                    new_var_counts[var] = 1

            # print()
            # print((a, b), m, new_m)
            # new_cost = -1 * evaluate_reward(new_m, h, example, au_table)
            new_cost = -1 * new_reward
            # print("NEW VARCOUNTS", new_var_counts)
            # print("COMPARISON")
            # print(-1 * new_reward, new_cost)
            # print()
            # print("NODE COST", new_node.cost())
            yield Node(new_m, node, ('swap', a, b), new_cost,
                       (h, example, unassigned, au_table, const_count,
                        new_var_counts))

        for a in range(len(h)):
            for ua in unassigned:
                new_var_counts = {a: var_counts[a] for a in var_counts}
                new_m = tuple(ua if i == a else v for i, v in enumerate(m))
                new_unassigned = [m[a] if x == ua else x for x in unassigned]

                old_a = au_table[frozenset([h[a], example[m[a]]])]
                new_a = au_table[frozenset([h[a], example[new_m[a]]])]

                var_old_a = {}
                if isinstance(old_a, frozenset):
                    c_old_a = 0
                else:
                    c_old_a = count_term(old_a, var_old_a)
                var_new_a = {}
                if isinstance(new_a, frozenset):
                    c_new_a = 0
                else:
                    c_new_a = count_term(new_a, var_new_a)

                new_reward = -1 * node.cost()
                new_reward -= c_old_a
                new_reward += c_new_a

                for var in var_old_a:
                    if var in new_var_counts and new_var_counts[var] > 1:
                        new_reward -= 1
                        new_var_counts[var] -= 1
                    if var in new_var_counts and new_var_counts[var] == 1:
                        del new_var_counts[var]

                for var in var_new_a:
                    if var in new_var_counts:
                        new_reward += 1
                        new_var_counts[var] += 1
                    else:
                        new_var_counts[var] = 1

                new_cost = -1 * new_reward

                # new_cost = -1 * evaluate_reward(new_m, h, example, au_table)

                yield Node(new_m, node, ('swap unassigned', a, ua), new_cost,
                           (h, example, new_unassigned, au_table, const_count,
                            var_counts))