Пример #1
0
def generate_solvable_problem(m, n, alphabet, target_solution):
    # Generating a problem backwards, such that
    # max(h(s, si)) = target solution, which we will treat as

    if target_solution < 0 or target_solution > m:
        raise ValueError(f'Invalid target solution value.')

    solution_string = random_string(alphabet, m)
    # Now we need to construct n strings such that
    # they differ from solution_string at up to target_solution characters
    # Keep in mind that at least one string must be at exactly target_solution characters,
    # otherwise the target_solution is not actually optimal
    # So we will say that a random number of strings between (1, n) will take that value
    orig_fake_string = truly_modify_at_random_positions(
        solution_string, alphabet, target_solution)
    strings = [orig_fake_string]

    for i in range(1, n):
        new_str = truly_modify_at_random_positions(
            solution_string, alphabet, random.randint(0, target_solution))
        strings.append(new_str)

    random.shuffle(strings)

    wsm = utils.problem_metric(solution_string, strings)
    if wsm != target_solution:
        print(f'Failed....')

    # assertion step

    return CSProblem(m, n, strings, alphabet, expect=target_solution)

    pass
    def solve_(self, problem: CSProblem) -> Tuple[str, dict]:
        m = problem.m
        alphabet = problem.alphabet
        strings = problem.strings

        q = ['']
        min_hamming = float('inf')
        min_string = None

        iterations = 0
        pruned = 0
        while q:
            iterations += 1
            curr_string = q.pop()
            curr_string_length = len(curr_string)
            curr_string_score = problem_metric(curr_string, strings)

            if curr_string_score >= min_hamming:
                pruned += 1
                continue

            if curr_string_length == m:
                if curr_string_score < min_hamming:
                    min_hamming = curr_string_score
                    min_string = curr_string
                continue
            q += [curr_string + next_letter for next_letter in alphabet]

        return min_string, {'iterations': iterations, 'pruned': pruned}
 def solve_(self, problem: CSProblem) -> Tuple[str, dict]:
     best_score = problem.m
     best_string = None
     for s in StringGenerator(problem.alphabet, problem.m):
         sm = utils.problem_metric(s, problem.strings)
         if sm <= best_score:
             best_score = sm
             best_string = s
     return best_string, None
    def solve_(self, problem: CSProblem) -> Tuple[str, dict]:
        original_string_set = problem.strings
        alphabet = problem.alphabet
        m = problem.m
        n = problem.n

        logm = math.log2(m)

        r = self.config['r']

        total_iters = math.factorial(n) // math.factorial(r) // math.factorial(
            n - r)
        best_non_trivial_string, best_non_trivial_score = None, m

        times_lp = 0
        times_force = 0

        for i, subset_index_list in enumerate(combinations(range(n), r)):
            subset_strings = [
                original_string_set[i] for i in subset_index_list
            ]
            Q = utils.Q_all(subset_strings)
            P = [j for j in range(m) if j not in Q]
            k = len(P)

            if k <= logm:
                solve_func = solve_by_force
                times_force += 1
            else:
                solve_func = solve_by_lp_relaxation
                times_lp += 1
            ss = solve_func(P, Q, alphabet, m, n, original_string_set,
                            subset_strings[0])
            if ss is None:
                continue
            s_p, s_p_metric = ss
            if s_p_metric <= best_non_trivial_score:
                best_non_trivial_score = s_p_metric
                best_non_trivial_string = s_p

        best_trivial_string, best_trivial_score = None, m
        for s in problem.strings:
            met = utils.problem_metric(s, problem.strings)
            if met <= best_trivial_score:
                best_trivial_score = met
                best_trivial_string = s

        if best_non_trivial_string is None:
            return best_trivial_string, {'trivial': True}

        if best_non_trivial_score < best_trivial_score:
            return best_non_trivial_string, {
                'times_lp': times_lp,
                'times_force': times_force
            }

        return best_trivial_string, {'trivial': True}
Пример #5
0
    def __init__(self,
                 solution: str,
                 elapsed,
                 problem: CSProblem,
                 extra: any = None) -> None:
        self.solution = solution
        self.measure = utils.problem_metric(solution, problem.strings)
        self.extra = extra
        self.problem = problem
        self.quality = (problem.m - self.measure) / problem.m
        self.objective_quality = None
        self.elapsed = elapsed

        if problem.expect is not None:
            self.objective_quality = problem.expect / self.measure
Пример #6
0
    def solve_(self, problem: CSProblem) -> Tuple[str, dict]:
        m, n, alphabet, strings = problem.m, problem.n, problem.alphabet, problem.strings
        A = len(alphabet)
        rho = self.config['RHO']
        colony_size = self.config['COLONY_SIZE']
        miters = self.config['MAX_ITERS']

        global_best_ant = None
        global_best_metric = m
        init_pher = 1.0 / A
        world_trails = [[init_pher for _ in range(A)] for _ in range(m)]
        trail_row_wise_sums = [1.0 for _ in range(m)]

        for iteration in range(miters):

            local_best_ant = None
            local_best_metric = m
            for _ in range(colony_size):
                ant = ''.join(fast_pick(alphabet, world_trails[next_character_index], trail_row_wise_sums[next_character_index]) for next_character_index in range(m))
                ant_metric = utils.problem_metric(ant, strings)

                if ant_metric <= local_best_metric:
                    local_best_metric = ant_metric
                    local_best_ant = ant

            # First we perform pheromone evaporation
            for i in range(m):
                for j in range(A):
                    world_trails[i][j] = world_trails[i][j] * (1 - rho)

            # Now, using the elitist strategy, only the best ant is allowed to update his pheromone trails
            best_ant_ys = (alphabet.index(a) for a in local_best_ant)
            best_ant_xs = range(m)

            for x, y in zip(best_ant_xs, best_ant_ys):
                world_trails[x][y] = world_trails[x][y] + (1 - 1.0*local_best_metric / m)

            if local_best_metric <= global_best_metric:
                global_best_metric = local_best_metric
                global_best_ant = local_best_ant

            trail_row_wise_sums = [sum(world_trails[i]) for i in range(m)]
        if global_best_ant is None:
            print()
        return global_best_ant, {}
def solve_by_lp_relaxation(P, Q, alphabet, m, n, original_strings, primary_reference_string):
    LP_T_ = 'float64'

    A = len(alphabet)
    nP = len(P)

    # There are nP*A + 1 variables - nP*A for each xja, and 1 for d
    num_variables = nP*A + 1

    # ======================= CONSTRUCTING THE EQUALITY CONSTRAINTS ========================
    # There are nP constraints with equality
    lp_eq_matrix = np.zeros((nP, num_variables), dtype=LP_T_)
    # For each j we need an "exactly one" constraints
    # The constraint will have ones only within one j-group, and zeros on other positions
    # Therefore we iterate over j-groups
    for i in range(nP):
        left_bound = i*A
        right_bound = (i+1)*A
        lp_eq_matrix[i][left_bound:right_bound] = np.ones(A, dtype=LP_T_)

    # The ds are not in this part and they remain zeros since the matrix was constructed with np.zerps
    # RHS are all ones
    lp_eq_b = np.ones(nP, dtype=LP_T_)

    # ======================= CONSTRUCTING THE INEQUALITY CONSTRAINTS ======================
    # There are n inequality constraints
    # For x_j_a, its absolute position as a variable
    # is j*A + a
    lp_leq_matrix = np.zeros((n, num_variables), dtype=LP_T_)
    # Each of them has -1 coefficient with d, and those correspond to the very last column
    lp_leq_matrix[:, -1] = -np.ones(n, dtype=LP_T_)
    # Other coefficients are chi(i, j, a)
    for i in range(n):
        for a in range(A):
            for j in range(nP):
                actual_var_idx = j*A + a
                lp_leq_matrix[i][actual_var_idx] = chi(original_strings, i, j, alphabet[a])
    # RHS are -d(si|Q, s'|Q)
    lp_leq_b = -np.array([utils.hamming_distance(utils.sat(original_strings[i], Q), utils.sat(primary_reference_string, Q)) for i in range(n)], dtype='int32')

    # Target function is just 'd'
    c = np.zeros(num_variables, dtype=LP_T_)
    c[-1] = 1

    # Every variable must be positive
    lower_bounds = [0.0 for _ in range(num_variables)]
    # xs are constrainted by 1, d is unconstrained above
    upper_bounds: List[any] = [1.0 for _ in range(num_variables-1)] + [None]


    # Now we just plug it into scipy's solver
    # sout, serr = sys.stdout, sys.stderr
    # sys.stdout, sys.stderr = None, None
    lp_solution = linprog(c,
                          A_ub=lp_leq_matrix,
                          b_ub=lp_leq_b,
                          A_eq=lp_eq_matrix,
                          b_eq=lp_eq_b,
                          bounds=list(zip(lower_bounds, upper_bounds)))
    # sys.stdout, sys.stderr = sout, serr


    if not lp_solution.success:
        print('not stonks')
        return None

    # print(lp_solution)
    s_prime = reconstruct_solution(m, Q, primary_reference_string, lp_solution.x, alphabet)
    return s_prime, utils.problem_metric(s_prime, original_strings)