Exemplo n.º 1
0
def main() :
    RANDOM_POINT_COUNT = 1000
    CLUSTER_COUNT = 3
    REPEAT_COUNT = 50

    cls = datareader.get_method('categorypoint2d.CategoryPoint2D')
    data = datareader.get_data('datamakers.data_set_1', cls)
    
    solver1 = ksolver.KSolver(CLUSTER_COUNT, cls)
    results1 = ksolver.run(solver1, data, REPEAT_COUNT, 0)

    print (results1['distances'])
    print (results1)
Exemplo n.º 2
0
def main():
    RANDOM_POINT_COUNT = 1000
    CLUSTER_COUNT = 3
    REPEAT_COUNT = 50

    cls = datareader.get_method('realpoint2d.RealPoint2D')
    data = datareader.get_data('datamakers.gen_data_1', cls,
                               RANDOM_POINT_COUNT, CLUSTER_COUNT)

    solver = ksolver.KSolver(CLUSTER_COUNT, cls)

    results1 = ksolver.run(solver, data, REPEAT_COUNT, 0)
    results2 = ksolver.run(solver, data, REPEAT_COUNT, 1)

    print(results1['distances'])
    print(results2['distances'])
Exemplo n.º 3
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("--points",
                        help="how many points to generate",
                        default=1000,
                        nargs='?',
                        type=int)
    parser.add_argument("--clusters",
                        help="how many clusters",
                        default=3,
                        nargs=1,
                        type=int)
    parser.add_argument(
        "--repeat",
        help="how many times to run algorithm, keeping best result",
        default=50,
        nargs='?',
        type=int)
    parser.add_argument("dataclass", help="name of data class", nargs=1)
    parser.add_argument("datamaker",
                        help="name of data generating function",
                        nargs=1)
    parser.add_argument("--init_max_distance",
                        help="use maximum distance cluster initialization",
                        default=False,
                        nargs='?',
                        type=bool)

    try:
        args = parser.parse_args()
        print(args)
    except IOError as e:
        print(e)
        sys.exit(1)

    cls = datareader.get_method(args.dataclass[0])
    data = datareader.get_data(args.datamaker[0], cls, args.points,
                               args.clusters)
    solver = KSolver(args.clusters, cls)
    results = run(solver, data, args.repeat, args.init_max_distance and 1 or 0)

    print(results)
Exemplo n.º 4
0
from __future__ import print_function
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
from datareader import get_data
import numpy as np

directory = "rpq"
task_list = [
    "data.000", "data.001", "data.002", "data.003", "data.004", "data.005",
    "data.006", "data.007", "data.008"
]
result_list = [228, 3026, 3665, 3309, 3191, 3618, 3446, 3821, 3634]

for i in range(len(task_list)):
    task_name = task_list[i]
    tasks = get_data(directory, task_name)

    print("----- File: ", task_name, "-----")
    print("Carlier: ", result_list[i])

    # create a model
    model = cp_model.CpModel()

    # max variables value, overcounted
    variables_max_value = 0
    for task in tasks:
        variables_max_value += task.times[0] + task.times[1] + task.times[2]

    # alphas (needed to find the order)
    alphas = np.zeros((len(tasks), len(tasks))).tolist()
    for i in range(len(tasks)):
Exemplo n.º 5
0
import copy
from datareader import get_data
from makespan import makespan, to_natural_order, get_order
from schrage import schrage_n2, schrage_n2_pmtn, schrage_nlogn, schrage_nlogn_pmtn
from random_search import random_search

tasks = get_data("in50.txt")

# INITIAL ORDER
init_order = get_order(tasks)
init_makespan = makespan(init_order, tasks)
print("[INIT] makespan: ", init_makespan)

# SCHRAGE ORDER
schrage_n2_order, schrage_n2_time = schrage_n2(tasks)
shrage_n2_makespan = makespan(schrage_n2_order, tasks)
#print("[SHRAGE N^2] order: ", schrage_n2_order)
print("[SHRAGE N^2] makespan: {}, time: {}" .format(shrage_n2_makespan, schrage_n2_time))

# SCHRAGE ORDER NLOGN
schrage_nlogn_order, schrage_nlogn_time = schrage_nlogn(tasks)
schrage_nlogn_makespan = makespan(schrage_nlogn_order, tasks)
#print("[SHRAGE NLOGN] order: ", schrage_nlogn_order)
print("[SHRAGE NLOGN] makespan: {}, time: {}" .format(schrage_nlogn_makespan, schrage_nlogn_time))

#SCHRAGE ORDER N2 PMTN
schrage_n2_ptmn_makespan, schrage_n2_ptmn_order, schrage_n2_ptmn_time = schrage_n2_pmtn(tasks)
print("[SHRAGE N^2 PMTN] makespan: {}, time: {}" .format(schrage_n2_ptmn_makespan, schrage_n2_ptmn_time))

#SCHRAGE ORDER NLOGN PMTN
schrage_nlogn_pmtn_makespan, schrage_nlogn_pmtn_order, schrage_nlogn_pmtn_time = schrage_nlogn_pmtn(tasks)
init_temp = 1000
final_temp = 0.1
u = 0.98
cooling_fcn_type = 0
insert = 0

fcn0_times = []
fcn0_cmax = []
fcn1_times = []
fcn1_cmax = []

move_type = 0

# for FCN0
for set in sets:
    tasks, numb_of_machines = get_data(set)

    cooling_fcn_type = 0

    simulated_annealing_order, iterations, sa_time = simulated_annealing(copy.deepcopy(tasks), numb_of_machines,
                                                                         init_temp, final_temp, u, cooling_fcn_type,
                                                                         move_type, insert)
    simulated_annealing_makespan = makespan(simulated_annealing_order, tasks, numb_of_machines)
    fcn0_times.append(sa_time)
    fcn0_cmax.append(simulated_annealing_makespan)

# for FCN1
for set in sets:
    tasks, numb_of_machines = get_data(set)

    cooling_fcn_type = 1
Exemplo n.º 7
0
import copy
from datareader import get_data
from makespan import makespan, to_natural_order, get_order
from simulated_annealing import simulated_annealing
from improved_simulated_annealing import improved_simulated_annealing
from neh import neh


tasks, numb_of_machines = get_data("data.001")

# INITIAL ORDER
init_order = get_order(tasks)
init_makespan = makespan(init_order, tasks, numb_of_machines)
print("[INIT] makespan: {}, time: {}" .format(init_makespan, 0))

# NEH ORDER
neh_order, neh_time = neh(copy.deepcopy(tasks), numb_of_machines)
neh_makespan = makespan(neh_order, tasks, numb_of_machines)
print("[NEH ] makespan: {}, time: {}" .format(neh_makespan, neh_time))

# SIMULATED ANNEALING ORDER
init_temp = 5000
final_temp = 0.1
u = 0.98
cooling_fcn_type = 0
move_type = 0
insert = 0

simulated_annealing_order, iterations_sa, sa_time = simulated_annealing(copy.deepcopy(tasks), numb_of_machines, init_temp,
                                                                     final_temp, u, cooling_fcn_type, move_type, insert)
simulated_annealing_makespan = makespan(simulated_annealing_order, tasks, numb_of_machines)

task_list = [
    "data.000", "data.001", "data.002", "data.003", "data.004", "data.005",
    "data.006", "data.007", "data.008"
]
result_list = [228, 3026, 3665, 3309, 3191, 3618, 3446, 3821, 3634]

deep_left_sequence_makespans = []
deep_left_parallel_makespans = []

deep_left_sequence_times = []
deep_left_parallel_times = []

for i in range(0, len(task_list)):
    tasks = get_data(task_list[i])
    result = result_list[i]

    print("THREADS TEST: ", task_list[i])
    print("-")

    # ------------------------------------------------ WIDE LEFT
    u, pi = schrage(copy.deepcopy(tasks))
    ub = 999999999
    lb = 0

    pi_list = [pi]

    # ALGORITHM
    start = timer()
    carlier_wl_parallel(copy.deepcopy(tasks))