Ejemplo n.º 1
0
Archivo: lab6.py Proyecto: TSienki/AEM
def msls(dist_matrix,
         neighbourhood_radius,
         data,
         candidates=False,
         cache=False,
         option="random"):
    local_optimums = []
    local_optimums_costs = []
    current_step = 0

    while current_step < 5:
        clusters = np.ones(len(data), dtype=np.int32) * (-1)

        if option == "prim":
            result = create_n_trees_prim(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "kruskal":
            result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "random":
            clusters = random_groups(data.shape[0])

        clusters_before = np.copy(clusters)

        run_algorithm_steepest(clusters, dist_matrix, neighbourhood_radius,
                               candidates, cache)
        cost = cost_function(dist_matrix, clusters)[0]

        if cost not in local_optimums_costs:
            local_optimums.append(clusters)
            local_optimums_costs.append(
                cost_function(dist_matrix, clusters)[0])
            current_step += 1
    return local_optimums, local_optimums_costs
Ejemplo n.º 2
0
def ils(dist_matrix, neighbourhood_radius, data, time_limit, candidates=False, cache=False, option="prim", perturbation = "small"):
    timeout = time_limit
    timeout_start = time.time()

    best_clusters = None
    best_cost = np.inf
    cluster_before_best = None
    clusters = np.ones(len(data), dtype=np.int32) * (-1)
    if option == "prim":
        result = create_n_trees_prim(data, dist_matrix, 20, 1)
        clusters = create_clusters_from_tree(data, result)
    elif option == "kruskal":
        result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
        clusters = create_clusters_from_tree(data, result)
    elif option == "random":
        clusters = random_groups(data.shape[0])
    clusters_before = np.copy(clusters)
    clusters_to_process = np.copy(clusters)
    while time.time() < timeout_start + timeout:
        if perturbation == "small":
            clusters = small_perturbations(clusters_to_process, 20, 50, dist_matrix)
        elif perturbation == "big":
            clusters = big_random_perturbation(clusters_to_process, 30, dist_matrix)

        run_algorithm_steepest(clusters_to_process, dist_matrix, neighbourhood_radius, candidates, cache)
        cost = cost_function(dist_matrix, clusters_to_process)[0]
        if cost < best_cost:
            best_cost = cost
            best_clusters = clusters_to_process
            cluster_before_best = clusters_before
    return best_cost, best_clusters, cluster_before_best
Ejemplo n.º 3
0
def msls(dist_matrix, neighbourhood_radius, data, candidates=False, cache=False, option="random"):

    best_clusters = None
    best_cost = np.inf
    cluster_before_best = None

    for i in range(100):
        print(f"Wywołanie MSLS numer {i}")
        clusters = np.ones(len(data), dtype=np.int32) * (-1)

        if option == "prim":
            result = create_n_trees_prim(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "kruskal":
            result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "random":
            clusters = random_groups(data.shape[0])

        clusters_before = np.copy(clusters)

        run_algorithm_steepest(clusters, dist_matrix, neighbourhood_radius, candidates, cache)
        cost = cost_function(dist_matrix, clusters)[0]

        if cost < best_cost:
            best_cost = cost
            best_clusters = clusters
            cluster_before_best = clusters_before
    return best_cost, best_clusters, cluster_before_best
Ejemplo n.º 4
0
def run_measurements(data, dist_matrix, neighbourhood_radius, steps_for_time_measurements=50, option="prim"):
    greedy_times_measurements = []
    steepest_times_measurements = []
    greedy_measurements = []
    steepest_measurements = []
    dist = np.copy(dist_matrix)
    best_greedy = 10000
    best_clusters_greedy = []
    best_steepest = 10000
    best_clusters_steepest = []
    costs_greedy = []
    costs_steepest = []
    clusters_before_greedy = []
    clusters_before_steepest = []

    for i in range(steps_for_time_measurements):
        clusters = np.ones(len(data), dtype=np.int32) * (-1)

        if option == "prim":
            result = create_n_trees_prim(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "kruskal":
            result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "random":
            clusters = random_groups(data.shape[0])

        greedy_clusters = np.copy(clusters)
        steepest_clusters = np.copy(clusters)

        measurement = time_measure(run_algorithm_greedy, (greedy_clusters, dist, neighbourhood_radius))
        greedy_times_measurements.append(measurement)
        greedy_cost = cost_function(dist_matrix, greedy_clusters)[0]
        greedy_measurements.append(greedy_cost)
        costs_greedy.append(greedy_cost)
        if greedy_cost < best_greedy:
            best_greedy = greedy_cost
            best_clusters_greedy = greedy_clusters
            clusters_before_greedy = clusters

        measurement = time_measure(run_algorithm_steepest, (steepest_clusters, dist, neighbourhood_radius))
        steepest_times_measurements.append(measurement)
        steepest_cost = cost_function(dist_matrix, steepest_clusters)[0]
        steepest_measurements.append(steepest_cost)
        costs_steepest.append(steepest_cost)
        if steepest_cost < best_steepest:
            best_steepest = steepest_cost
            best_clusters_steepest = steepest_clusters
            clusters_before_steepest = clusters

    print(f"Greedy cost min:{min(greedy_measurements)}, max:{max(greedy_measurements)}, avg: {sum(greedy_measurements) / len(greedy_measurements)}")
    print(f"Greedy Time min:{min(greedy_times_measurements)}, max:{max(greedy_times_measurements)}, avg: {sum(greedy_times_measurements) / len(greedy_times_measurements)}")

    print(f"Steepest cost min:{min(steepest_measurements)}, max:{max(steepest_measurements)}, avg: {sum(steepest_measurements) / len(steepest_measurements)}")
    print(f"Steepest Time min:{min(steepest_times_measurements)}, max:{max(steepest_times_measurements)}, avg: {sum(steepest_times_measurements) / len(steepest_times_measurements)}")

    draw_scatter(data, best_clusters_greedy, True)
    draw_scatter(data, best_clusters_steepest, True)
    draw_scatter(data, clusters_before_greedy, False)
    draw_scatter(data, clusters_before_steepest, False)
Ejemplo n.º 5
0
def run_measurements(data,
                     dist_matrix,
                     neighbourhood_radius,
                     steps_for_time_measurements=50,
                     option="prim"):
    steepest_times_measurements = []
    steepest_measurements = []
    dist = np.copy(dist_matrix)

    cache_times_measurements = []
    cache_measurements = []

    candidates_times_measurements = []
    candidates_measurements = []

    candidates_cache_times_measurements = []
    candidates_cache_measurements = []

    best_steepest = np.inf
    best_clusters_steepest = None

    best_cache = np.inf
    best_clusters_cache = None

    best_candidates = np.inf
    best_candidates_clusters = None

    best_candidates_cache = np.inf
    best_candidates_cache_clusters = None

    clusters_before_steepest = None
    clusters_before_cache = None
    clusters_before_candidates = None
    clusters_before_candidates_cache = None

    for i in range(steps_for_time_measurements):
        clusters = np.ones(len(data), dtype=np.int32) * (-1)

        if option == "prim":
            result = create_n_trees_prim(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "kruskal":
            result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "random":
            clusters = random_groups(data.shape[0])

        steepest_clusters = np.copy(clusters)
        cache_cluster = np.copy(clusters)
        candidates_cluster = np.copy(clusters)
        candidates_cache_cluster = np.copy(clusters)

        measurement = time_measure(
            run_algorithm_steepest,
            (steepest_clusters, dist, neighbourhood_radius, False, False))
        steepest_times_measurements.append(measurement)
        steepest_cost = cost_function(dist_matrix, steepest_clusters)[0]
        steepest_measurements.append(steepest_cost)

        measurement = time_measure(
            run_algorithm_steepest,
            (cache_cluster, dist, neighbourhood_radius, False, True))
        cache_times_measurements.append(measurement)
        cache_cost = cost_function(dist_matrix, steepest_clusters)[0]
        cache_measurements.append(cache_cost)

        measurement = time_measure(
            run_algorithm_steepest,
            (candidates_cluster, dist, neighbourhood_radius, True, False))
        candidates_times_measurements.append(measurement)
        candidates_cost = cost_function(dist_matrix, candidates_cluster)[0]
        candidates_measurements.append(candidates_cost)

        measurement = time_measure(run_algorithm_steepest,
                                   (candidates_cache_cluster, dist,
                                    neighbourhood_radius, True, False))
        candidates_cache_times_measurements.append(measurement)
        candidates_cache_cost = cost_function(dist_matrix,
                                              candidates_cache_cluster)[0]
        candidates_cache_measurements.append(candidates_cache_cost)

        if steepest_cost < best_steepest:
            best_steepest = steepest_cost
            best_clusters_steepest = steepest_clusters
            clusters_before_steepest = clusters

        if cache_cost < best_cache:
            best_cache = cache_cost
            best_clusters_cache = cache_cluster
            clusters_before_cache = clusters

        if candidates_cost < best_candidates:
            best_candidates = candidates_cost
            best_candidates_clusters = candidates_cluster
            clusters_before_candidates = clusters

        if candidates_cache_cost < best_candidates_cache:
            best_candidates_cache = candidates_cache_cost
            best_candidates_cache_clusters = candidates_cache_cluster
            clusters_before_candidates_cache = clusters

    print(
        f"Steepest cost min:{min(steepest_measurements)}, max:{max(steepest_measurements)}, avg: {sum(steepest_measurements) / len(steepest_measurements)}"
    )
    print(
        f"Steepest time min:{min(steepest_times_measurements)}, max:{max(steepest_times_measurements)}, avg: {sum(steepest_times_measurements) / len(steepest_times_measurements)}"
    )

    print(
        f"Cache steepest cost min:{min(cache_measurements)}, max:{max(cache_measurements)}, avg: {sum(cache_measurements) / len(cache_measurements)}"
    )
    print(
        f"Cache steepest time min:{min(cache_times_measurements)}, max:{max(cache_times_measurements)}, avg: {sum(cache_times_measurements) / len(cache_times_measurements)}"
    )

    print(
        f"Candidates steepest cost min:{min(candidates_measurements)}, max:{max(candidates_measurements)}, avg: {sum(candidates_measurements) / len(candidates_measurements)}"
    )
    print(
        f"Candidates steepest time min:{min(candidates_times_measurements)}, max:{max(candidates_times_measurements)}, avg: {sum(candidates_times_measurements) / len(candidates_times_measurements)}"
    )

    print(
        f"Candidates and cache steepest cost min:{min(candidates_cache_measurements)}, max:{max(candidates_cache_measurements)}, "
        f"avg: {sum(candidates_cache_measurements) / len(candidates_cache_measurements)}"
    )
    print(
        f"Candidates and cache steepest time min:{min(candidates_cache_times_measurements)}, "
        f"max:{max(candidates_cache_times_measurements)}, "
        f"avg: {sum(candidates_cache_times_measurements) / len(candidates_cache_times_measurements)}"
    )

    draw_scatter(data, best_clusters_steepest, True)
    draw_scatter(data, clusters_before_steepest, False)
    draw_scatter(data, best_clusters_cache, True)
    draw_scatter(data, clusters_before_cache, False)
    draw_scatter(data, best_candidates_clusters, True)
    draw_scatter(data, clusters_before_candidates, False)
    draw_scatter(data, best_candidates_cache_clusters, True)
    draw_scatter(data, clusters_before_candidates_cache, False)
Ejemplo n.º 6
0
Archivo: Lab2.py Proyecto: TSienki/AEM
def run_measurements(data,
                     dist_matrix,
                     neighbourhood,
                     steps_for_time_measurements=1,
                     option="prim"):
    times_measurements = []
    times_measurements_2 = []
    dist_1 = np.copy(dist_matrix)
    dist_2 = np.copy(dist_matrix)
    best_greedy = 10000
    best_clusters_greedy = []
    best_steepest = 10000
    best_clusters_steepest = []
    costs_greedy = []
    costs_steepest = []
    clusters_before_greedy = []
    clusters_before_steepest = []

    for i in range(steps_for_time_measurements):
        clusters = np.ones(len(data), dtype=np.int32) * (-1)
        if option == "prim":
            result = create_n_trees_prim(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "kruskal":
            result = create_n_trees_kruskal(data, dist_matrix, 20, 1)
            clusters = create_clusters_from_tree(data, result)
        elif option == "random":
            clusters = random_groups(data.shape[0])
        clusters_2 = np.copy(clusters)
        clusters_before = np.copy(clusters)
        measurement = time_measure(run_algorithm,
                                   (clusters, dist_1, neighbourhood, "greedy"))
        times_measurements.append(measurement)
        measurement2 = time_measure(
            run_algorithm, (clusters_2, dist_2, neighbourhood, "steepest"))
        times_measurements_2.append(measurement2)
        cost = sum(count_costs(clusters, dist_1, 20)) / 20
        cost2 = sum(count_costs(clusters_2, dist_2, 20)) / 20
        costs_greedy.append(cost)
        costs_steepest.append(cost2)
        if cost < best_greedy:
            best_greedy = cost
            best_clusters_greedy = clusters
            clusters_before_greedy = clusters_before
        if cost2 < best_steepest:
            best_steepest = cost2
            best_clusters_steepest = clusters_2
            clusters_before_steepest = clusters_before
    print(option)
    print(np.max(best_clusters_steepest))
    print(np.max(best_clusters_greedy))
    print(
        f"Najmniejszy koszt dla lokalnego przeszukiwania w wersji zachłannej dla wstępnych danych {option} wynosi {min(costs_greedy)}, "
        f"największy {max(costs_greedy)}, średni {sum(costs_greedy)/len(costs_greedy)}.\n"
        f"Najmniejszt koszt dla lokalnego przeszukiwania w wersji stromej wynosi {min(costs_steepest)}, "
        f"największy {max(costs_steepest)}, średni {sum(costs_steepest)/len(costs_steepest)}."
    )
    print(
        f"Pomiary czasu dla {steps_for_time_measurements} kroków dla algorytmu greedy to "
        f"min: {min(times_measurements)} sekund, max: {max(times_measurements)} sekund i "
        f"avg: {sum(times_measurements) / len(times_measurements)} sekund")
    print(
        f"Pomiary czasu dla {steps_for_time_measurements} kroków dla algorytmu steepest to "
        f"min: {min(times_measurements_2)} sekund, max: {max(times_measurements_2)} sekund i "
        f"avg: {sum(times_measurements_2) / len(times_measurements_2)} sekund")
    draw_scatter(data, best_clusters_greedy, True)
    draw_scatter(data, best_clusters_steepest, True)
    draw_scatter(data, clusters_before_greedy, False)
    draw_scatter(data, clusters_before_steepest, False)