Beispiel #1
0
def iterated_ls1(distances):
    start = time.perf_counter()
    ls_count = 1
    n_all = distances.shape[0]
    n = int(np.ceil(n_all / 2))

    init_path, init_outside = ls.random_path(n, n_all)

    swap_actions = ls.swap_edges_actions(n)
    exchange_actions = ls.exchange_vertices_actions(init_path.shape[0],
                                                    init_outside.shape[0])

    best_path, best_outside = steepest(distances, init_path, init_outside,
                                       swap_actions, exchange_actions)
    best_score = ut.evaluate(best_path, distances)

    while time.perf_counter() - start <= AVG_MSLS_TIME:
        path, outside = steepest(distances, perturbation(best_path, 2),
                                 best_outside, swap_actions, exchange_actions)
        score = ut.evaluate(path, distances)
        ls_count += 1

        if score < best_score:
            best_score, best_path, best_outside = score, path, outside

    return best_path, ls_count
Beispiel #2
0
def evolutionary(distances):
    start = time.perf_counter()
    ls_count = 1

    n_all = distances.shape[0]
    n = int(np.ceil(n_all / 2))

    swap_actions = ls.swap_edges_actions(n)
    exchange_actions = ls.exchange_vertices_actions(n, n_all - n)

    population_size = 20
    population, scores = generate_population(population_size, n, n_all,
                                             distances, swap_actions,
                                             exchange_actions)
    worst_idx = np.argmax(scores)

    while time.perf_counter() - start < AVG_MSLS_TIME:
        path1, path2 = pick_two_random(population)
        recombined_path = recombination(path1, path2, n)

        new_path, _ = lsb.steepest(distances, recombined_path,
                                   get_outside(recombined_path, n_all),
                                   swap_actions, exchange_actions)
        new_score = ut.evaluate(new_path, distances)
        ls_count = ls_count + 1

        if new_score < scores[worst_idx] and is_unique_in_population(
                population, scores, new_path, new_score):
            population[worst_idx] = new_path
            scores[worst_idx] = new_score
            worst_idx = np.argmax(scores)

    return population[np.argmin(scores)], ls_count
Beispiel #3
0
def greedy_cycle_v2(start_point,
                    distances):  # dodawanie kolenych łuków do cyklu Haminga
    n_all = distances.shape[0]
    n = int(np.ceil(n_all / 2))

    points_to_check = [x for x in range(n_all)]
    print(points_to_check)
    print(start_point)
    points_to_check.remove(start_point)

    global_solution = [start_point]

    for i in range(n - 1):
        global_result = np.inf
        local_solution = global_solution.copy()
        for count, j in enumerate(global_solution):
            for k in points_to_check:
                temp_solution = global_solution.copy()
                temp_solution.insert(count, k)
                local_result = ut.evaluate(temp_solution, distances)
                if local_result < global_result:
                    last_point = k
                    global_result = local_result
                    local_solution = temp_solution.copy()
        points_to_check.remove(last_point)
        global_solution = local_solution.copy()
    print(global_solution)

    return global_solution
Beispiel #4
0
def test(algorithm, instance, distances):
    n = 10
    solutions = []
    times = []
    ls_counts = []
    results = np.zeros(n, dtype="int64")

    for i in range(n):
        start = time.perf_counter()
        solution, count = algorithm(distances)
        solutions.append(solution)
        ls_counts.append(count)
        end = time.perf_counter()
        times.append(end - start)
        results[i] = ut.evaluate(solutions[i], distances)
        print(results[i], times[i])

    best_start_point = np.argmin(results)
    best_solution = solutions[best_start_point]
    min_val = np.min(results)
    max_val = np.max(results)
    avg_val = np.mean(results)
    avg_time = np.mean(times)
    min_ls_count = np.min(ls_counts)
    max_ls_count = np.max(ls_counts)
    avg_ls_count = np.mean(ls_counts)

    return best_solution, best_start_point, min_val, max_val, avg_val, avg_time, min_ls_count, max_ls_count, avg_ls_count
Beispiel #5
0
def multiple_start_ls(distances):
    m = 100
    best_path = ls.steepest_edges(distances)
    best_score = ut.evaluate(best_path, distances)

    print("best: ", best_score)

    for i in range(m - 1):
        result_path = ls.steepest_edges(distances)
        result_score = ut.evaluate(result_path, distances)

        print(i, "best: ", best_score, "result: ", result_score)
        if result_score < best_score:
            best_path = result_path
            best_score = result_score

    return best_path
Beispiel #6
0
def global_convex(filename, distances, f):
    optimums = np.loadtxt(filename, dtype='int')

    score = np.empty(optimums.shape[0])
    vertices_similarities = np.empty(optimums.shape[0])
    edges_similarities = np.empty(optimums.shape[0])
    mean_vertices_similarities = np.empty(optimums.shape[0])
    mean_edges_similarities = np.empty(optimums.shape[0])
    correlation = np.empty(optimums.shape[0])

    for i, path in enumerate(optimums):
        score[i] = ut.evaluate(path, distances)

    best_path_index = np.argmin(score)
    best_path = optimums[best_path_index]

    # Similarity to best path
    for i, path in enumerate(optimums):
        vertices_similarities[i] = vertices_similarity(path, best_path)
        edges_similarities[i] = edges_similarity(path, best_path)
        mean_vertices_similar = 0
        mean_edges_similar = 0
        for another_path in optimums:
            mean_vertices_similar += vertices_similarity(path, another_path)
            mean_edges_similar += edges_similarity(path, another_path)
        mean_vertices_similarities[
            i] = mean_vertices_similar / optimums.shape[0]
        mean_edges_similarities[i] = mean_edges_similar / optimums.shape[0]

    print(f, stats.pearsonr(score, vertices_similarities)[0])
    print(f, stats.pearsonr(score, edges_similarities)[0])
    print(f, stats.pearsonr(score, mean_vertices_similarities)[0])
    print(f, stats.pearsonr(score, mean_edges_similarities)[0])
    print("etap")

    plt.plot(score,
             edges_similarities,
             "o:",
             color="green",
             linewidth=0,
             alpha=0.2)
    # plt.plot(score, vertices_similarities, "o:", color="green", linewidth=0, alpha=0.2)
    plt.plot(score[best_path_index],
             edges_similarities[best_path_index],
             "o:",
             linewidth=0,
             alpha=0.5)
    plt.xlabel("lx")
    plt.ylabel("ly")
    title = "edges_similarities - " + f
    plt.title(title)
    plt.savefig(f"plots/{title}.png")
    plt.grid(True)
    # plt.show()

    return 0
Beispiel #7
0
def generate_population(population_size, n, n_all, distances, swap_actions,
                        exchange_actions):
    population = np.zeros((population_size, n), int)
    scores = np.zeros(population_size, int)
    current_population_size = 0

    while current_population_size < population_size:
        path = lsb.greedy_cycle([np.random.randint(n_all)], n, n_all,
                                distances)
        score = ut.evaluate(path, distances)

        if is_unique_in_population(population, scores, path, score):
            population[current_population_size] = path
            scores[current_population_size] = score
            current_population_size += 1

    return population, scores
Beispiel #8
0
def evaluate(path, point, position, distances):
    path.insert(position, point)
    return ut.evaluate(path, distances)