def __kahn_topological_sort(constraint_problem: ConstraintProblem) -> List[Variable]:
    variables = constraint_problem.get_unassigned_variables()
    directed_graph = defaultdict(set)
    for var in variables:
        for neighbor in constraint_problem.get_unassigned_neighbors(var):
            if var not in directed_graph[neighbor]:
                directed_graph[var].add(neighbor)

    in_degree = {var: 0 for var in variables}
    for var in variables:
        for neighbor in directed_graph[var]:
            in_degree[neighbor] += 1

    zero_in_degree_variables = set(filter(lambda variable: in_degree[variable] == 0, in_degree.keys()))
    topologically_sorted_unassigned_variables = list()
    while zero_in_degree_variables:
        var = zero_in_degree_variables.pop()
        topologically_sorted_unassigned_variables.append(var)
        for neighbor in directed_graph[var]:
            in_degree[neighbor] -= 1
            if in_degree[neighbor] == 0:
                zero_in_degree_variables.add(neighbor)

    if len(topologically_sorted_unassigned_variables) != len(variables):
        return list()
    return topologically_sorted_unassigned_variables
コード例 #2
0
def simulated_annealing(constraint_problem: ConstraintProblem, max_steps: int, temperature: float, cooling_rate: float,
                        generate_start_state: StartStateGenerator = generate_start_state_randomly,
                        generate_successor: SuccessorGenerator = alter_random_variable_value_pair,
                        calculate_score: ScoreCalculator = consistent_constraints_amount) -> ConstraintProblem:
    generate_start_state(constraint_problem)
    if constraint_problem.is_completely_consistently_assigned():
        return constraint_problem
    max_steps -= 1

    best_score = calculate_score(constraint_problem)
    best_score_problem = deepcopy(constraint_problem)
    for i in range(max_steps):
        if constraint_problem.is_completely_consistently_assigned():
            return constraint_problem

        curr_score = calculate_score(constraint_problem)
        if best_score < curr_score:
            best_score = curr_score
            best_score_problem = deepcopy(constraint_problem)

        successor = generate_successor(constraint_problem)
        successor_score = calculate_score(successor)
        delta = successor_score - curr_score
        if delta > 0 or uniform(0, 1) < exp(delta / temperature):
            constraint_problem = successor
        temperature *= cooling_rate

    return best_score_problem
def random_restart_first_choice_hill_climbing(
    constraint_problem: ConstraintProblem,
    max_restarts: int,
    max_steps: int,
    max_successors: int,
    generate_start_state: StartStateGenerator = generate_start_state_randomly,
    generate_successor: SuccessorGenerator = alter_random_variable_value_pair,
    calculate_score: ScoreCalculator = consistent_constraints_amount
) -> ConstraintProblem:
    generate_start_state(constraint_problem)
    if constraint_problem.is_completely_consistently_assigned():
        return constraint_problem
    max_restarts -= 1

    best_score = calculate_score(constraint_problem)
    best_score_problem = deepcopy(constraint_problem)
    for i in range(max_restarts):
        generate_start_state(constraint_problem)
        for j in range(max_steps):
            if constraint_problem.is_completely_consistently_assigned():
                return constraint_problem

            current_score = calculate_score(constraint_problem)
            if best_score < current_score:
                best_score = current_score
                best_score_problem = deepcopy(constraint_problem)

            for k in range(max_successors):
                successor = generate_successor(constraint_problem)
                successor_score = calculate_score(successor)
                if current_score < successor_score:
                    constraint_problem = successor
                    break

    return best_score_problem
def tree_csp_solver(constraint_problem: ConstraintProblem, with_history: bool = False) \
        -> Optional[Deque[Tuple[Variable, Any]]]:
    actions_history = None
    if with_history:
        actions_history = deque()
    topological_sorted_unassigned_variables = __kahn_topological_sort(constraint_problem)
    if not topological_sorted_unassigned_variables:
        return actions_history

    for i in range(len(topological_sorted_unassigned_variables) - 1, 0, -1):
        for value in topological_sorted_unassigned_variables[i].domain:
            topological_sorted_unassigned_variables[i].assign(value)
            if not constraint_problem.get_consistent_domain(topological_sorted_unassigned_variables[i-1]):
                topological_sorted_unassigned_variables[i].remove_from_domain(value)
            topological_sorted_unassigned_variables[i].unassign()
        if not topological_sorted_unassigned_variables[i].domain:
            return actions_history

    for variable in topological_sorted_unassigned_variables:
        consistent_domain = constraint_problem.get_consistent_domain(variable)
        if not consistent_domain:
            return actions_history
        value, *_ = consistent_domain
        variable.assign(value)
        if with_history:
            actions_history.append((variable, value))

    return actions_history
コード例 #5
0
def __classic_backtrack(constraint_problem: ConstraintProblem,
                        inference: Optional[Inference] = None,
                        with_history: bool = False) -> bool:
    if constraint_problem.is_completely_assigned():
        if constraint_problem.is_consistently_assigned():
            return True
        return False

    selected_variable, *_ = constraint_problem.get_unassigned_variables()

    for value in selected_variable.domain:
        selected_variable.assign(value)
        if with_history:
            __actions_history.append((selected_variable, value))

        if inference is not None and not inference(constraint_problem,
                                                   selected_variable):
            selected_variable.unassign()
            if with_history:
                __actions_history.append((selected_variable, None))
            return False

        if __classic_backtrack(constraint_problem, inference, with_history):
            return True

        selected_variable.unassign()
        if with_history:
            __actions_history.append((selected_variable, None))

    return False
def __calculate_weight(constraint_problem: ConstraintProblem,
                       constraints_weights: Dict[Constraint, int]) -> int:
    weight = 0
    for variable in constraint_problem.get_variables():
        unsatisfied_constraints = filterfalse(
            None,
            constraint_problem.get_constraints_containing_variable(variable))
        for unsatisfied_const in unsatisfied_constraints:
            weight += constraints_weights[unsatisfied_const]
    return weight
コード例 #7
0
def __heuristic_backtrack(
        constraint_problem: ConstraintProblem,
        primary_select_unassigned_vars:
    SelectUnassignedVariables = minimum_remaining_values,
        secondary_select_unassigned_vars:
    SelectUnassignedVariables = degree_heuristic,
        sort_domain: SortDomain = least_constraining_value,
        inference: Optional[Inference] = None,
        find_all_solutions: bool = False,
        with_history: bool = False) -> Optional[Dict[Variable, Any]]:
    selected_unassigned_vars = primary_select_unassigned_vars(
        constraint_problem, None)
    if secondary_select_unassigned_vars is not None and len(
            selected_unassigned_vars) > 1:
        selected_unassigned_vars = secondary_select_unassigned_vars(
            constraint_problem, selected_unassigned_vars)
    selected_variable, *_ = selected_unassigned_vars

    sorted_domain = sort_domain(constraint_problem, selected_variable)
    for value in sorted_domain:
        selected_variable.assign(value)
        if with_history:
            __actions_history.append((selected_variable, value))

        if inference is not None and not inference(constraint_problem,
                                                   selected_variable):
            selected_variable.unassign()
            if with_history:
                __actions_history.append((selected_variable, None))
            continue

        if constraint_problem.is_completely_assigned():
            if constraint_problem.is_consistently_assigned():
                if find_all_solutions:
                    yield constraint_problem.get_current_assignment()
                else:
                    yield None

            selected_variable.unassign()
            if with_history:
                __actions_history.append((selected_variable, None))
            continue

        if constraint_problem.is_consistently_assigned():
            for solution_assignment in __heuristic_backtrack(
                    constraint_problem, primary_select_unassigned_vars,
                    secondary_select_unassigned_vars, sort_domain, inference,
                    find_all_solutions, with_history):
                yield solution_assignment

        selected_variable.unassign()
        if with_history:
            __actions_history.append((selected_variable, None))
コード例 #8
0
def __reduce_assignment_constraints_domains(constraint_problem: ConstraintProblem,
                                            i_subsets_consistent_assignments:
                                            Dict[Tuple[Tuple[Variable, ...], Variable], set]) -> None:
    constraints = constraint_problem.get_constraints()
    for subset, ith_variable in i_subsets_consistent_assignments:
        i_variables = frozenset(subset + (ith_variable,))
        found = False
        for constraint in constraints:
            if i_variables.issubset(constraint.variables):
                found = True
                constraint.update_i_consistent_assignments(i_subsets_consistent_assignments[(subset, ith_variable)])
        if not found:
            i_constraint = OnlyiConsistentAssignment((i_subsets_consistent_assignments[(subset, ith_variable)]))
            new_constraint = Constraint(i_variables, i_constraint)
            constraint_problem.add_constraint(new_constraint)
コード例 #9
0
def __revise_i(constraint_problem: ConstraintProblem, subset: Tuple[Variable, ...], ith_variable: Variable,
               i_subsets_consistent_assignments:  Dict[Tuple[Tuple[Variable, ...], Variable], set]) -> bool:
    revised = False
    i_subsets_constraints = map(constraint_problem.get_constraints_containing_variable, subset)
    i_subsets_constraints = set(chain.from_iterable(i_subsets_constraints))
    i_subsets_constraints.update(constraint_problem.get_constraints_containing_variable(ith_variable))
    subset_domain = [variable.domain for variable in subset]
    for assignment in product(*subset_domain):
        var_were_assigned = dict()
        for variable, value in zip(subset, assignment):
            variable_was_assigned = False if variable.value is None else True
            var_were_assigned[variable] = variable_was_assigned
            if not variable_was_assigned:
                variable.assign(value)
        ith_variable_was_assigned = False if ith_variable.value is None else True
        for value in ith_variable.domain:
            if not ith_variable_was_assigned:
                ith_variable.assign(value)
            for constraint in i_subsets_constraints:
                i_assignment = assignment + (value,)
                if not constraint.is_consistent() and i_assignment in i_subsets_consistent_assignments[(subset,
                                                                                                        ith_variable)]:
                    revised = True
                    i_subsets_consistent_assignments[(subset, ith_variable)].remove(i_assignment)
            if not ith_variable_was_assigned:
                ith_variable.unassign()
        for variable in subset:
            if not var_were_assigned[variable]:
                variable.unassign()

    __reduce_assignment_constraints_domains(constraint_problem, i_subsets_consistent_assignments)
    return revised
コード例 #10
0
def __revise(constraints_problem: ConstraintProblem, variable: Variable,
             neighbor: Variable) -> bool:
    if variable.value is not None:
        return False
    variable_constraints = constraints_problem.get_constraints_containing_variable(
        variable)
    neighbor_constraints = constraints_problem.get_constraints_containing_variable(
        neighbor)
    shared_constraint, *_ = (variable_constraints & neighbor_constraints)
    revised = False
    for value in variable.domain:
        variable.assign(value)
        if not shared_constraint.get_consistent_domain_values(neighbor):
            variable.remove_from_domain(value)
            revised = True
        variable.unassign()
    return revised
コード例 #11
0
def __classic_heuristic_backtrack(
        constraint_problem: ConstraintProblem,
        primary_select_unassigned_vars:
    SelectUnassignedVariables = minimum_remaining_values,
        secondary_select_unassigned_vars:
    SelectUnassignedVariables = degree_heuristic,
        sort_domain: SortDomain = least_constraining_value,
        inference: Optional[Inference] = None,
        with_history: bool = False) -> bool:
    if constraint_problem.is_completely_assigned():
        if constraint_problem.is_consistently_assigned():
            return True
        return False

    selected_unassigned_vars = primary_select_unassigned_vars(
        constraint_problem, None)
    if secondary_select_unassigned_vars is not None and len(
            selected_unassigned_vars) > 1:
        selected_unassigned_vars = secondary_select_unassigned_vars(
            constraint_problem, selected_unassigned_vars)
    selected_variable, *_ = selected_unassigned_vars

    sorted_domain = sort_domain(constraint_problem, selected_variable)
    for value in sorted_domain:
        selected_variable.assign(value)
        if with_history:
            __actions_history.append((selected_variable, value))

        if inference is not None and not inference(constraint_problem,
                                                   selected_variable):
            selected_variable.unassign()
            if with_history:
                __actions_history.append((selected_variable, None))
            return False

        if __classic_heuristic_backtrack(constraint_problem,
                                         primary_select_unassigned_vars,
                                         secondary_select_unassigned_vars,
                                         sort_domain, inference, with_history):
            return True

        selected_variable.unassign()
        if with_history:
            __actions_history.append((selected_variable, None))

    return False
コード例 #12
0
def least_constraining_value(constraint_problem: ConstraintProblem,
                             variable: Variable) -> list:
    unassigned_neighbors = constraint_problem.get_unassigned_neighbors(
        variable)

    def neighbors_consistent_domain_lengths(value) -> int:
        variable.assign(value)
        consistent_domain_lengths = map(
            lambda neighbor: len(
                (constraint_problem.get_consistent_domain(neighbor))),
            unassigned_neighbors)
        variable.unassign()
        return sum(consistent_domain_lengths)

    return sorted(constraint_problem.get_consistent_domain(variable),
                  key=neighbors_consistent_domain_lengths,
                  reverse=True)
コード例 #13
0
def ac3(constraint_problem: ConstraintProblem,
        assigned_variable: Variable = None) -> bool:
    if assigned_variable is not None:  # usage of ac3 as part of Maintaining Arc Consistency (MAC) algorithm
        unassigned_neighbors = constraint_problem.get_unassigned_neighbors(
            assigned_variable)
        arcs = {(unassigned_neighbor, assigned_variable)
                for unassigned_neighbor in unassigned_neighbors}
    else:
        arcs = {(variable, neighbor)
                for variable in constraint_problem.get_unassigned_variables()
                for neighbor in constraint_problem.get_neighbors(variable)}

    while arcs:
        variable, neighbor = arcs.pop()
        if __revise(constraint_problem, variable, neighbor):
            if not constraint_problem.get_consistent_domain(variable):
                return False
            rest_of_neighbors = constraint_problem.get_neighbors(variable) - {
                neighbor
            }
            if rest_of_neighbors:
                for other_neighbor in rest_of_neighbors:
                    arcs.add((other_neighbor, variable))

    for var in constraint_problem.get_variables():
        if not var.domain or not constraint_problem.get_consistent_domain(var):
            return False
    return True
コード例 #14
0
def forward_check(constraint_problem: ConstraintProblem,
                  assigned_variable: Variable) -> bool:
    unassigned_neighbors_frozenset = constraint_problem.get_unassigned_neighbors(
        assigned_variable)
    unsatisfiable_neighbors = filter(
        lambda unassigned_neighbor: not constraint_problem.
        get_consistent_domain(unassigned_neighbor),
        unassigned_neighbors_frozenset)
    return False if any(unsatisfiable_neighbors) else True
コード例 #15
0
def i_consistency(constraint_problem: ConstraintProblem, i: int) -> bool:
    variables = constraint_problem.get_variables()

    assert 0 < i <= len(variables), "for i = {0}: i <= 0 or (number of variables in constraint_problem) < i.".format(i)

    i_minus_one_sized_subsets = combinations(variables, i - 1)
    i_subsets_consistent_assignments = __initialize_i_consistency(variables, i_minus_one_sized_subsets)

    reducing_domains = True
    while reducing_domains:
        reducing_domains = False
        for subset, ith_variable in i_subsets_consistent_assignments:
            if __revise_i(constraint_problem, subset, ith_variable, i_subsets_consistent_assignments):
                reducing_domains = True

    for var in constraint_problem.get_variables():
        if not var.domain or not constraint_problem.get_consistent_domain(var):
            return False
    return True
def degree_heuristic(
        constraint_problem: ConstraintProblem,
        variables: Optional[FrozenSet[Variable]] = None
) -> FrozenSet[Variable]:
    if variables is not None:  # then we're using degree_heuristic as secondary key
        max_variable = max(
            variables,
            key=lambda var: len(
                constraint_problem.get_unassigned_neighbors(var)))
        return frozenset({max_variable})

    unassigned_variables = constraint_problem.get_unassigned_variables()
    max_variable = max(
        unassigned_variables,
        key=lambda var: len(constraint_problem.get_unassigned_neighbors(var)))
    max_degree = len(constraint_problem.get_unassigned_neighbors(max_variable))
    max_variables = filter(
        lambda var: len(constraint_problem.get_unassigned_neighbors(var)) ==
        max_degree, unassigned_variables)
    return frozenset(max_variables)
コード例 #17
0
def ac4(constraint_problem: ConstraintProblem) -> bool:
    support_counter = collections.Counter()
    variable_value_pairs_supported_by = collections.defaultdict(set)
    unsupported_variable_value_pairs = collections.deque()
    __initialize_ac4(constraint_problem.get_constraints(), support_counter, variable_value_pairs_supported_by,
                     unsupported_variable_value_pairs)

    while unsupported_variable_value_pairs:
        second_variable, second_value = unsupported_variable_value_pairs.popleft()
        for first_variable, first_value in variable_value_pairs_supported_by[(second_variable, second_value)]:
            if first_value in first_variable.domain:
                support_counter[(first_variable, first_value, second_variable)] -= 1
                if support_counter[(first_variable, first_value, second_variable)] == 0:
                    first_variable.remove_from_domain(first_value)
                    unsupported_variable_value_pairs.append((first_variable, first_value))

    for var in constraint_problem.get_variables():
        if not var.domain or not constraint_problem.get_consistent_domain(var):
            return False
    return True
def __get_random_conflicted_variable(constraint_problem: ConstraintProblem,
                                     read_only_variables: FrozenSet[Variable],
                                     tabu_size: int) -> Variable:
    conflicted_variables = set()
    for constraint in constraint_problem.get_unsatisfied_constraints():
        conflicted_variables.update(constraint.variables)
    conflicted_variables -= read_only_variables
    if tabu_size != -1:
        untabued_conflicted_variables = conflicted_variables - set(
            __tabu_queue)
        return choice(tuple(untabued_conflicted_variables))
    return choice(tuple(conflicted_variables))
def minimum_remaining_values(
        constraint_problem: ConstraintProblem,
        variables: Optional[FrozenSet[Variable]] = None
) -> FrozenSet[Variable]:
    if variables is not None:  # then we're using minimum_remaining_values as secondary key
        min_variable = min(
            variables,
            key=lambda variable: len(
                constraint_problem.get_consistent_domain(variable)))
        return frozenset({min_variable})

    unassigned_variables = constraint_problem.get_unassigned_variables()
    min_variable = min(
        unassigned_variables,
        key=lambda var: len(constraint_problem.get_consistent_domain(var)))
    min_remaining_values = len(
        constraint_problem.get_consistent_domain(min_variable))
    min_variables = filter(
        lambda var: len(constraint_problem.get_consistent_domain(var)) ==
        min_remaining_values, unassigned_variables)
    return frozenset(min_variables)
コード例 #20
0
def __backtrack(constraint_problem: ConstraintProblem,
                inference: Optional[Inference] = None,
                find_all_solutions: bool = False,
                with_history: bool = False) -> Optional[Dict[Variable, Any]]:
    variable, *_ = constraint_problem.get_unassigned_variables()
    for value in variable.domain:
        variable.assign(value)
        if with_history:
            __actions_history.append((variable, value))

        if inference is not None and not inference(constraint_problem,
                                                   variable):
            variable.unassign()
            if with_history:
                __actions_history.append((variable, None))
            continue

        if constraint_problem.is_completely_assigned():
            if constraint_problem.is_consistently_assigned():
                if find_all_solutions:
                    yield constraint_problem.get_current_assignment()
                else:
                    yield None

            variable.unassign()
            if with_history:
                __actions_history.append((variable, None))
            continue

        if constraint_problem.is_consistently_assigned():
            for solution_assignment in __backtrack(constraint_problem,
                                                   inference,
                                                   find_all_solutions,
                                                   with_history):
                yield solution_assignment

        variable.unassign()
        if with_history:
            __actions_history.append((variable, None))
def __get_min_conflicts_value(constraint_problem: ConstraintProblem,
                              conflicted_variable: Variable) -> Any:
    min_conflicts_count = float("inf")
    min_conflicting_values = list()
    for value in conflicted_variable.domain:
        conflicted_variable.assign(value)
        conflicts_count = len(constraint_problem.get_unsatisfied_constraints())
        if conflicts_count < min_conflicts_count:
            min_conflicts_count = conflicts_count
            min_conflicting_values.clear()
            min_conflicting_values.append(value)
        elif conflicts_count == min_conflicts_count:
            min_conflicting_values.append(value)
        conflicted_variable.unassign()

    return choice(min_conflicting_values)
コード例 #22
0
def naive_cycle_cutset(constraint_problem: ConstraintProblem, with_history: bool = False) \
        -> Optional[Deque[Tuple[Variable, Any]]]:
    actions_history = None
    if with_history:
        actions_history = deque()
    variables = constraint_problem.get_variables()
    read_only_variables = constraint_problem.get_assigned_variables()
    constraints = list(constraint_problem.get_constraints())
    constraints.sort(key=lambda constraint: len(constraint.variables), reverse=True)
    constraint_graph = constraint_problem.get_constraint_graph_as_adjacency_list()

    for i in range(1, len(constraints)):
        cutset_constraints = constraints[:i]
        cutset_variables = set()
        for cutset_const in cutset_constraints:
            cutset_variables.update(cutset_const.variables)
        reduced_graph = {var: neighbors for var, neighbors in constraint_graph.items() if var not in cutset_variables}
        for var in reduced_graph:
            reduced_graph[var] -= cutset_variables

        if __is_tree(reduced_graph):
            consistent_assignments_list = __get_consistent_assignments(cutset_variables, cutset_constraints,
                                                                       read_only_variables)
            non_cutset_variables = variables - cutset_variables
            non_cutset_vars_to_original_domains_map = {var: var.domain for var in non_cutset_variables}

            for consist_assignment in consistent_assignments_list:
                for var, value in zip(cutset_variables, consist_assignment):
                    if var not in read_only_variables:
                        var.assign(value)
                        if with_history:
                            actions_history.append((var, value))
                for non_cutset_var in non_cutset_variables:
                    if non_cutset_var not in read_only_variables:
                        non_cutset_var.domain = list(constraint_problem.get_consistent_domain(non_cutset_var))

                tree_csp_action_history = tree_csp_solver(constraint_problem, with_history)
                if with_history:
                    actions_history.extend(tree_csp_action_history)
                if constraint_problem.is_completely_consistently_assigned():
                    return actions_history

                for var in variables:
                    if var not in read_only_variables:
                        var.unassign()
                        if with_history:
                            actions_history.append((var, None))
                for var in non_cutset_vars_to_original_domains_map:
                    if var not in read_only_variables:
                        var.domain = non_cutset_vars_to_original_domains_map[var]

    return actions_history
def __get_best_reduction_variable_value(
        constraint_problem: ConstraintProblem,
        constraints_weights: Dict[Constraint, int],
        read_only_variables: FrozenSet[Variable]) -> Tuple[int, Variable, Any]:
    pairs_to_weight_reduction = dict()
    weight = __calculate_weight(constraint_problem, constraints_weights)
    original_assignment = constraint_problem.get_current_assignment()
    constraint_problem.unassign_all_variables()
    for variable in constraint_problem.get_variables() - read_only_variables:
        for value in variable.domain:
            variable.assign(value)
            curr_weight = __calculate_weight(constraint_problem,
                                             constraints_weights)
            pairs_to_weight_reduction[(variable, value)] = weight - curr_weight
            variable.unassign()

    constraint_problem.unassign_all_variables()
    constraint_problem.assign_variables_from_assignment(original_assignment)
    max_variable, max_value = max(pairs_to_weight_reduction,
                                  key=pairs_to_weight_reduction.get)
    return pairs_to_weight_reduction[(max_variable,
                                      max_value)], max_variable, max_value
def constraints_weighting(constraint_problem: ConstraintProblem, max_tries: int, with_history: bool = False) \
        -> Optional[Deque[Tuple[Variable, Any]]]:
    actions_history = None
    if with_history:
        actions_history = deque()
    constraints_weights = {
        constraint: 1
        for constraint in constraint_problem.get_constraints()
    }
    read_only_variables = constraint_problem.get_assigned_variables()

    for i in range(max_tries):
        constraint_problem.assign_variables_with_random_values(
            read_only_variables)
        last_reduction = float("inf")
        while 0 < last_reduction:
            if constraint_problem.is_completely_consistently_assigned():
                return actions_history

            reduction, variable, value = __get_best_reduction_variable_value(
                constraint_problem, constraints_weights, read_only_variables)
            variable.unassign()
            if with_history:
                actions_history.append((variable, None))
            variable.assign(value)
            if with_history:
                actions_history.append((variable, value))
            last_reduction = reduction

            for unsatisfied_constraint in constraint_problem.get_unsatisfied_constraints(
            ):
                constraints_weights[unsatisfied_constraint] += 1

        if i != max_tries - 1:
            constraint_problem.unassign_all_variables(read_only_variables)

    return actions_history
コード例 #25
0
def __forward_checking_backtrack(
        constraint_problem: ConstraintProblem,
        find_all_solutions: bool = False,
        with_history: bool = False) -> Optional[Dict[Variable, Any]]:
    variable, *_ = constraint_problem.get_unassigned_variables()
    for value in variable.domain:
        variable.assign(value)
        if with_history:
            __actions_history.append((variable, value))

        unassigned_neighbors_frozenset = constraint_problem.get_unassigned_neighbors(
            variable)
        unsatisfiable_neighbors = filter(
            lambda unassigned_neighbor: not constraint_problem.
            get_consistent_domain(unassigned_neighbor),
            unassigned_neighbors_frozenset)
        if any(unsatisfiable_neighbors):
            variable.unassign()
            if with_history:
                __actions_history.append((variable, None))
            continue

        if constraint_problem.is_completely_assigned():
            if constraint_problem.is_consistently_assigned():
                if find_all_solutions:
                    yield constraint_problem.get_current_assignment()
                else:
                    yield None

            variable.unassign()
            if with_history:
                __actions_history.append((variable, None))
            continue

        if constraint_problem.is_consistently_assigned():
            for solution_assignment in __forward_checking_backtrack(
                    constraint_problem, find_all_solutions, with_history):
                yield solution_assignment

        variable.unassign()
        if with_history:
            __actions_history.append((variable, None))
コード例 #26
0
def do_not_sort(constraint_problem: ConstraintProblem,
                variable: Variable) -> list:
    return list(constraint_problem.get_consistent_domain(variable))
コード例 #27
0
def __optimized_heuristic_backtrack(constraint_problem: ConstraintProblem,
                                    find_all_solutions: bool = False,
                                    with_history: bool = False):
    unassigned_variables = constraint_problem.get_unassigned_variables()
    min_variable = min(
        unassigned_variables,
        key=lambda var: len(constraint_problem.get_consistent_domain(var)))
    min_remaining_values = len(
        constraint_problem.get_consistent_domain(min_variable))
    min_variables = filter(
        lambda var: len(constraint_problem.get_consistent_domain(var)) ==
        min_remaining_values, unassigned_variables)
    selected_unassigned_vars = frozenset(min_variables)
    if len(selected_unassigned_vars) > 1:
        selected_variable = max(
            selected_unassigned_vars,
            key=lambda var: len(
                constraint_problem.get_unassigned_neighbors(var)))
    else:
        selected_variable, *_ = selected_unassigned_vars

    unassigned_neighbors = constraint_problem.get_unassigned_neighbors(
        selected_variable)

    def neighbors_consistent_domain_lengths(val) -> int:
        selected_variable.assign(val)
        consistent_domain_lengths = map(
            lambda neighbor: len(
                (constraint_problem.get_consistent_domain(neighbor))),
            unassigned_neighbors)
        selected_variable.unassign()
        return sum(consistent_domain_lengths)

    sorted_domain = sorted(
        constraint_problem.get_consistent_domain(selected_variable),
        key=neighbors_consistent_domain_lengths,
        reverse=True)

    for value in sorted_domain:
        selected_variable.assign(value)
        if with_history:
            __actions_history.append((selected_variable, value))

        if constraint_problem.is_completely_assigned():
            if constraint_problem.is_consistently_assigned():
                if find_all_solutions:
                    yield constraint_problem.get_current_assignment()
                else:
                    yield None

            selected_variable.unassign()
            if with_history:
                __actions_history.append((selected_variable, None))
            continue

        if constraint_problem.is_consistently_assigned():
            for solution_assignment in __optimized_heuristic_backtrack(
                    constraint_problem, find_all_solutions, with_history):
                yield solution_assignment

        selected_variable.unassign()
        if with_history:
            __actions_history.append((selected_variable, None))
def min_conflicts(
        constraint_problem: ConstraintProblem,
        max_steps: int,
        tabu_size: int = -1,
        with_history: bool = False) -> Optional[Deque[Tuple[Variable, Any]]]:
    __tabu_queue.clear()
    read_only_variables = constraint_problem.get_assigned_variables()

    if tabu_size == -1:
        tabu_size = 0
    assert tabu_size + len(read_only_variables) < len(constraint_problem.get_variables()), \
        "tabu_size + len(read_only_variables) is equal or bigger than constraint_problem's variables amount."
    if tabu_size == 0:
        tabu_size = -1

    actions_history = None
    if with_history:
        actions_history = deque()
    rand_assignmt_history = constraint_problem.assign_variables_with_random_values(
        read_only_variables, actions_history)
    if with_history:
        actions_history.extend(rand_assignmt_history)

    best_min_conflicts = len(constraint_problem.get_unsatisfied_constraints())
    best_min_conflicts_assignment = constraint_problem.get_current_assignment()
    for i in range(max_steps):
        if constraint_problem.is_completely_consistently_assigned():
            return actions_history

        conflicted_variable = __get_random_conflicted_variable(
            constraint_problem, read_only_variables, tabu_size)
        conflicted_variable.unassign()
        if with_history:
            actions_history.append((conflicted_variable, None))
        min_conflicts_value = __get_min_conflicts_value(
            constraint_problem, conflicted_variable)
        conflicted_variable.assign(min_conflicts_value)
        if with_history:
            actions_history.append((conflicted_variable, min_conflicts_value))

        if len(__tabu_queue) == tabu_size:
            __tabu_queue.popleft()
        if __tabu_queue:
            __tabu_queue.append(conflicted_variable)

        curr_conflicts_count = len(
            constraint_problem.get_unsatisfied_constraints())
        if curr_conflicts_count < best_min_conflicts:
            best_min_conflicts = curr_conflicts_count
            best_min_conflicts_assignment = constraint_problem.get_current_assignment(
            )

    constraint_problem.unassign_all_variables()
    constraint_problem.assign_variables_from_assignment(
        best_min_conflicts_assignment)
    return actions_history
def generate_start_state_randomly(
        constraint_problem: ConstraintProblem) -> None:
    constraint_problem.unassign_all_variables()
    for var in constraint_problem.get_variables():
        var.assign(choice(var.domain))
def consistent_constraints_amount(
        constraint_problem: ConstraintProblem) -> int:
    return len(constraint_problem.get_consistent_constraints())