def neh(tasks, numb_of_machines): start = timer() # step 1: find omegas(j) omegas = [] for task in tasks: omegas.append(sum(task.times)) # step 2: sort in descending order (get sorted order) omegas_order = np.argsort(-np.array(omegas)).tolist() # steps 3, 4: repeat n times (n = numb of tasks) solution_order = [] for i in omegas_order: # (3) get task with the highest omega value # (4) insert task & pick task with the lowest makespan lowest_makespan = float("inf") lowest_makespan_sequence = [] sequences = get_sequences(i, solution_order) for sequence in sequences: if makespan(sequence, tasks, numb_of_machines) < lowest_makespan: lowest_makespan = makespan(sequence, tasks, numb_of_machines) lowest_makespan_sequence = sequence solution_order = lowest_makespan_sequence #print(lowest_makespan) stop = timer() return solution_order, (stop - start) * 1000
def get_probability(prev_order, new_order, tasks, temperature): prev_order_cmax = makespan(prev_order, tasks) new_order_cmax = makespan(new_order, tasks) if new_order_cmax < prev_order_cmax: return 1 return exp((prev_order_cmax - new_order_cmax) / temperature)
def generating(iter): number_of_machines = int(input("Ile maszyn?: ")) number_of_tasks = int(input("Ile zadan?: ")) times = [] durationBruteforce = [] durationJohnson = [] i = 0 while i < iter: generatedTasks = [] cnt_tasks = 0 cnt_machines = 0 for cnt_tasks in range(0, number_of_tasks): rows = [] for cnt_machines in range(0, number_of_machines): rows.append(int(random.uniform(1, 10))) print("{}".format(rows)) generatedTasks.append(Task(cnt_tasks, rows)) bruteforceOrder, timeBruteforce = bruteforce( copy.deepcopy(generatedTasks), number_of_machines) johnsonOrder, timeJohnson = johnson(copy.deepcopy(generatedTasks), number_of_machines) durationBruteforce.append(timeBruteforce) durationJohnson.append(timeJohnson) bruteforceMakespan = makespan(bruteforceOrder, generatedTasks, number_of_machines) johnsonMakespan = makespan(johnsonOrder, generatedTasks, number_of_machines) i += 1 if johnsonMakespan == bruteforceMakespan: times.append(johnsonMakespan) else: times.append(-1) x = PrettyTable() print("") print("----------------------------------------------------------") x.field_names = [ "l.p.", "Johnson Makespan", "Poprawnosc", "Czas Bruteforce [ms]", "Czas Johnson [ms]" ] k = 0 for k in range(0, iter): if times[k] == -1: x.add_row([ k + 1, "{}".format(times[k]), "Nie", "{}".format(durationBruteforce[k]), "{}".format(durationJohnson[k]) ]) else: x.add_row([ k + 1, "{}".format(times[k]), "Tak", "{}".format(durationBruteforce[k]), "{}".format(durationJohnson[k]) ]) print(x)
def IR4_mod(tasks, solution_order, numb_of_machines): maxDiff = 0 baseTime = makespan(solution_order, tasks, numb_of_machines) for i in solution_order: idx = solution_order.index(i) solution_order.remove(i) tempTime = makespan(solution_order, tasks, numb_of_machines) if baseTime - tempTime > maxDiff: maxDiff = baseTime - tempTime maxTask = i solution_order.insert(idx, i) return maxTask
def amelioration_locale(individu, data): qualite_initiale = makespan(individu, data) voisins = [] for _ in range(5): voisin = swap(individu) voisins.append((voisin, makespan(voisin, data))) meilleurVoisin = choisir_meilleur(voisins) if (meilleurVoisin[1] < qualite_initiale): individu = meilleurVoisin[0] return individu
def neh(jobMatrix): jobs_with_total_times = [(job_id, sum(job)) for job_id, job in enumerate(jobMatrix.T)] order = [] for job in sorted(jobs_with_total_times, key=lambda x: x[1], reverse=True): candidates = [] for i in range(0, len(order) + 1): candidate = order[:i] + [job[0]] + order[i:] candidates.append((candidate, makespan(candidate, jobMatrix))) order = min(candidates, key=lambda x: x[1])[0] #print(makespan(order, jobMatrix)) return (order, makespan(order, jobMatrix))
def bruteforce(tasks, numb_of_machines): start = timer() best_order = get_order(tasks) best_makespan = makespan(best_order, tasks, numb_of_machines) for p in permute(get_order(tasks)): print("order: {}".format(p)) print("makespan: {}".format(makespan(p, tasks, numb_of_machines))) print("---") if makespan(p, tasks, numb_of_machines) < best_makespan: best_order = list(p) best_makespan = makespan(p, tasks, numb_of_machines) stop = timer() return best_order, (stop - start) * 1000
def plotGantt(jobMatrix, jobOrder, nom, nb=7): fig, ax = plt.subplots() nb_machine, nb_jobs = jobMatrix.shape ganttTable = _calc_makespan(jobMatrix, jobOrder, True) colors = [ '#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000', "#ff0000", "#7f0000", "#400000", "#ff8080", "#7f4040", "#403030", "#bf8f00", "#7f7040", "#7fff00", "#508020", "#dfffbf", "#10401c", "#7fff9f", "#208080", "#80ffff", "#204040", "#002080", "#101c40", "#809fff", "#bfcfff", "#9f40ff", "#604080", "#dfbfff", "#383040", "#400030", "#bf309b", "#806078" ] for i in range(nb_jobs): ax.broken_barh([(ganttTable[i, 2 * j], ganttTable[i, 2 * j + 1] - ganttTable[i, 2 * j]) for j in range(nb_machine)], (10 * i, 10), facecolors=colors[:nb_machine]) ax.set_xlabel('Temps') ax.set_yticks([i * 10 for i in range(nb)]) tasklist = ["Task" + str(x) for x in jobOrder] ax.set_yticklabels(tasklist) ax.grid(True) plt.title('Makespan = {}'.format(makespan(jobOrder, jobMatrix))) plt.show()
def test(file, testcase): start = time() result = makespan(file(testfile), testfile) end = time() print "Best Makespan =", result return (result, end - start)
def random_search(tasks, max_time): numb_of_experiments = 100 # experiments performed per loop best_solution = "" best_cmax = 10000000 t0 = time.time() total_experiments = 0 rs = get_random_schedule(tasks) while True: start = time.time() for i in range(0, numb_of_experiments): random.shuffle(rs) cmax = makespan(rs, tasks) if cmax < best_cmax: best_cmax = cmax best_solution = copy.deepcopy(rs) total_experiments += numb_of_experiments if max_time and time.time() - t0 > max_time: break t = time.time() - start #if t > 0: #print("Best: ", best_cmax, ", time: ", time.time() - t0) return best_solution
def neh(times): jobs_with_total_times = [(job_id, sum(job)) for job_id, job in enumerate(times)] order = [] for job in sorted(jobs_with_total_times, key=lambda x: x[1], reverse = True): candidates = [] for i in range(0, len(order) + 1): candidate = order[:i] + [job[0]] + order[i:] candidates.append((candidate, makespan(candidate, times))) order = min(candidates, key = lambda x: x[1])[0] return order
def get_probability_sa(prev_order, new_order, tasks, numb_of_machines, temperature, move_type): prev_order_cmax = makespan(prev_order, tasks, numb_of_machines) new_order_cmax = makespan(new_order, tasks, numb_of_machines) if move_type == 0: if new_order_cmax < prev_order_cmax: return 1 return 1/(1+2*exp((prev_order_cmax-new_order_cmax)/temperature)) elif move_type == 1: if new_order_cmax < prev_order_cmax: return 0 return 1/(1+2*exp((prev_order_cmax-new_order_cmax)/temperature)) elif move_type == 2: if prev_order_cmax != new_order_cmax: if new_order_cmax < prev_order_cmax: return 1 return 1/(1+2*exp((prev_order_cmax-new_order_cmax)/temperature)) else: return 0 return -1
def cds(times): jobs_count = len(times) machine_count = len(times[0]) perms = [] times_merged = [[0, sum(job_times)] for job_times in times] for i in range(0, machine_count - 1): for k in range(0, jobs_count): times_merged[k][0] += times[k][i] times_merged[k][1] -= times[k][i] perms.append(johnson(times_merged)) return min(perms, key=lambda p: makespan(p, times))
def CDS(jobMatrix): times = jobMatrix.T jobs_count = len(times) machine_count = len(times[0]) merged_times = [[0, sum(j)] for j in times] perms = [] for i in range(0, machine_count - 1): for j in range(0, jobs_count): merged_times[j][0] += times[j][i] merged_times[j][1] -= times[j][i] perms.append(Johnson(np.array(merged_times).T)) result = min(perms, key=lambda p: _calc_makespan(jobMatrix, np.array(p))) return (result, makespan(result, jobMatrix))
def do_schrage(tasks): ready_tasks = [] final_list = [] unready_tasks = copy.deepcopy(tasks) t = min(extract_column(unready_tasks, 0)) while ready_tasks or unready_tasks: while unready_tasks and min(extract_column(unready_tasks, 0)) <= t: j = numpy.argmin(extract_column(unready_tasks, 0)) ready_tasks.append(unready_tasks.pop(j)) if not ready_tasks: t = min(extract_column(unready_tasks, 0)) else: j = numpy.argmax(extract_column(ready_tasks, 2)) final_list.append(ready_tasks.pop(j)) t += final_list[-1].times[1] return makespan.makespan(final_list), final_list
def do_carlier(carlier_object): carlier_object.U, carlier_object.pi = schrage.do_schrage( carlier_object.tasks) if carlier_object.U < carlier_object.UB: carlier_object.UB = copy.deepcopy(carlier_object.U) carlier_object.opt_pi = copy.deepcopy(carlier_object.pi) # b index potential_b = [] for j in range(0, len(carlier_object.tasks)): if makespan.makespan(carlier_object.pi) == ( carlier_object.pi[j].times[2] + makespan.makespan_carlier(carlier_object.pi)[j]): potential_b.append(carlier_object.pi[j].id) carlier_object.b = max(potential_b) # a index for j in range(0, len(carlier_object.tasks)): sum_of = 0 for i in range(j, find_by_id(carlier_object, carlier_object.b) + 1): sum_of += carlier_object.pi[i].times[1] if carlier_object.U == ( sum_of + carlier_object.pi[j].times[0] + carlier_object.tasks[carlier_object.b].times[2]): carlier_object.a = carlier_object.pi[j].id break carlier_object.a = find_by_id(carlier_object, carlier_object.a) carlier_object.b = find_by_id(carlier_object, carlier_object.b) # c index potential_c = [] for j in range(carlier_object.a, carlier_object.b + 1): if carlier_object.pi[j].times[2] < carlier_object.pi[ carlier_object.b].times[2]: potential_c.append(j) if potential_c: carlier_object.c = max(potential_c) else: return carlier_object.U # K block K = [] # K to blok, w którym są indeksy do pi[index] for j in range(carlier_object.c + 1, carlier_object.b + 1): K.append(j) # K u {c} block K_ = [] # K to blok, w którym są indeksy do pi[index] for j in range(carlier_object.c, carlier_object.b + 1): K_.append(j) # r(K), q(K), p(K), h(K) h_k, r_k, p_k, q_k = calc_k(carlier_object, K) # remember tasks[c].times[0] old_r_pi = copy.deepcopy( carlier_object.tasks[carlier_object.pi[carlier_object.c].id].times[0]) # new tasks[c].times[0] carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[0] = max( carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[0], r_k + p_k) carlier_object.LB = schrage.do_schrage_pmtn(carlier_object.tasks)[0] # step 16 new LB h_k_c, r_k_c, p_k_c, q_k_c = calc_k(carlier_object, K_) carlier_object.LB = max(h_k, h_k_c, carlier_object.LB) # left if if carlier_object.LB < carlier_object.UB: do_carlier(carlier_object) # restore tasks[c].times[0] carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[0] = old_r_pi # remember tasks[c].times[2] old_q_pi = copy.deepcopy(carlier_object.pi[carlier_object.c].times[2]) # new q time for tasks[c].times[2] carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[2] = max( carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[2], q_k + p_k) # step 22 new LB carlier_object.LB = schrage.do_schrage_pmtn(carlier_object.tasks)[0] # step 23 max LB h_k_c, r_k_c, p_k_c, q_k_c = calc_k(carlier_object, K_) carlier_object.LB = max(h_k, h_k_c, carlier_object.LB) # step 24 right loop if carlier_object.LB < carlier_object.UB: do_carlier(carlier_object) # step 27 restore tasks[c].times[2] carlier_object.tasks[carlier_object.pi[ carlier_object.c].id].times[2] = old_q_pi return carlier_object.U
import copy from datareader import get_data from makespan import makespan, to_natural_order, get_order from schrage import schrage_n2, schrage_n2_pmtn, schrage_nlogn, schrage_nlogn_pmtn from random_search import random_search tasks = get_data("in50.txt") # INITIAL ORDER init_order = get_order(tasks) init_makespan = makespan(init_order, tasks) print("[INIT] makespan: ", init_makespan) # SCHRAGE ORDER schrage_n2_order, schrage_n2_time = schrage_n2(tasks) shrage_n2_makespan = makespan(schrage_n2_order, tasks) #print("[SHRAGE N^2] order: ", schrage_n2_order) print("[SHRAGE N^2] makespan: {}, time: {}" .format(shrage_n2_makespan, schrage_n2_time)) # SCHRAGE ORDER NLOGN schrage_nlogn_order, schrage_nlogn_time = schrage_nlogn(tasks) schrage_nlogn_makespan = makespan(schrage_nlogn_order, tasks) #print("[SHRAGE NLOGN] order: ", schrage_nlogn_order) print("[SHRAGE NLOGN] makespan: {}, time: {}" .format(schrage_nlogn_makespan, schrage_nlogn_time)) #SCHRAGE ORDER N2 PMTN schrage_n2_ptmn_makespan, schrage_n2_ptmn_order, schrage_n2_ptmn_time = schrage_n2_pmtn(tasks) print("[SHRAGE N^2 PMTN] makespan: {}, time: {}" .format(schrage_n2_ptmn_makespan, schrage_n2_ptmn_time)) #SCHRAGE ORDER NLOGN PMTN schrage_nlogn_pmtn_makespan, schrage_nlogn_pmtn_order, schrage_nlogn_pmtn_time = schrage_nlogn_pmtn(tasks)
fcn0_cmax = [] fcn1_times = [] fcn1_cmax = [] move_type = 0 # for FCN0 for set in sets: tasks, numb_of_machines = get_data(set) cooling_fcn_type = 0 simulated_annealing_order, iterations, sa_time = simulated_annealing(copy.deepcopy(tasks), numb_of_machines, init_temp, final_temp, u, cooling_fcn_type, move_type, insert) simulated_annealing_makespan = makespan(simulated_annealing_order, tasks, numb_of_machines) fcn0_times.append(sa_time) fcn0_cmax.append(simulated_annealing_makespan) # for FCN1 for set in sets: tasks, numb_of_machines = get_data(set) cooling_fcn_type = 1 simulated_annealing_order, iterations, sa_time = simulated_annealing(copy.deepcopy(tasks), numb_of_machines, init_temp, final_temp, u, cooling_fcn_type, move_type, insert) simulated_annealing_makespan = makespan(simulated_annealing_order, tasks, numb_of_machines) fcn1_times.append(sa_time) fcn1_cmax.append(simulated_annealing_makespan)
def test(f, testcase): start = time() result = makespan(f(testcase), testcase) end = time() return (result, end - start)
from prettytable import PrettyTable as tabelki import makespan import read import schrage x = read.Reader() x.read("data//in50.txt") y = schrage.Schrage(x.my_data) y_1 = schrage.Schrage(x.my_data) t = tabelki(['Alogrithm', 'in50', 'in100', 'in200', 'Sum']) sigma = y.do_schrage() c_max = makespan.makespan(sigma) c_max_pmtn = y_1.do_schrage_pmtn() x.read("data//in100.txt") y_2 = schrage.Schrage(x.my_data) y_3 = schrage.Schrage(x.my_data) sigma = y_2.do_schrage() c_max_1 = makespan.makespan(sigma) c_max_pmtn_1 = y_3.do_schrage_pmtn() x.read("data//in200.txt") y_4 = schrage.Schrage(x.my_data) y_5 = schrage.Schrage(x.my_data) sigma = y_4.do_schrage() c_max_2 = makespan.makespan(sigma)
def evaluer_qualite(population, data): return [(individual, makespan(individual, data)) for individual in population]
taskset = [] x = [] for i in range(5, 300): taskset.append(create_instances(i)) x.append(i) iter = 0 for i in range(0, len(taskset)): tasks = copy.deepcopy(taskset[i]) # ------------------------------------------------ SCHRAGE # SCHRAGE ORDER schrage_n2_order, schrage_n2_time = schrage_n2(tasks) shrage_n2_makespan = makespan(schrage_n2_order, tasks) # print("[SHRAGE N^2] order: ", schrage_n2_order) schrage_makespans.append(shrage_n2_makespan) schrage_times.append(schrage_n2_time) # ------------------------------------------------ SCHRAGE # SCHRAGE ORDER N2 PMTN schrage_n2_ptmn_makespan, schrage_n2_ptmn_order, schrage_n2_ptmn_time = schrage_n2_pmtn( tasks) schrage_pmtn_makespans.append(schrage_n2_ptmn_makespan) schrage_pmtn_times.append(schrage_n2_ptmn_time) # ------------------------------------------------ DEEP LEFT ub = 999999999 u, pi = schrage(copy.deepcopy(tasks))
def optimal(times): job_count = len(times) return min(permutations(range(job_count)), key=lambda x: makespan(x, times))
def optimal(times): job_count = len(times) return min(permutations(range(job_count)), key = lambda x: makespan(x, times))
def evaluate_fitness(population, times): return [(individual, makespan(individual, times)) for individual in population]
def simulated_annealing(matrice, Ti = 790,Tf = 3 ,alpha = 0.93): #Number of jobs given nb_machines, job_count = matrice.shape n = job_count; default_timer = None if sys.platform == "win32": default_timer = time.time() else: default_timer = time.time() s = default_timer #Initialize the primary seq old_seq = neh(matrice) old_seq = old_seq[0] old_makeSpan = makespan(old_seq,matrice) #print("old sequence: ",old_seq) #print("old makespan: ",old_makeSpan) new_seq = [] delta_mk1 = 0 #Initialize the temperature T = Ti Tf = Tf alpha = alpha # of iterations temp_cycle = 0 while T >= Tf : new_seq = old_seq.copy() job = new_seq.pop(randint(0,n-1)) new_seq.insert(randint(0,n-1),job) new_make_span = makespan(new_seq,matrice) delta_mk1 = new_make_span - old_makeSpan if delta_mk1 <= 0: old_seq = new_seq old_makeSpan = new_make_span else : Aprob = np.exp(-(delta_mk1/T)) if Aprob > np.random.uniform(0.5,0.9): old_seq = new_seq old_makeSpan = new_make_span else : #The solution is discarded pass T = T * alpha temp_cycle += 1 e = default_timer #Result Sequence seq = old_seq schedules = np.zeros((nb_machines, job_count), dtype=dict) # schedule first job alone first task = {"name": "job_{}".format( seq[0] + 1), "start_time": 0, "end_time": matrice[0][seq[0]]} schedules[0][0] = task for m_id in range(1, nb_machines): start_t = schedules[m_id - 1][0]["end_time"] end_t = start_t + matrice[m_id][0] task = {"name": "job_{}".format( seq[0] + 1), "start_time": start_t, "end_time": end_t} schedules[m_id][0] = task for index, job_id in enumerate(seq[1::]): start_t = schedules[0][index]["end_time"] end_t = start_t + matrice[0][job_id] task = {"name": "job_{}".format( job_id + 1), "start_time": start_t, "end_time": end_t} schedules[0][index + 1] = task for m_id in range(1, nb_machines): start_t = max(schedules[m_id][index]["end_time"], schedules[m_id - 1][index + 1]["end_time"]) end_t = start_t +matrice[m_id][job_id] task = {"name": "job_{}".format( job_id + 1), "start_time": start_t, "end_time": end_t} schedules[m_id][index + 1] = task t_t = e - s return seq, old_makeSpan
import copy from datareader import get_data from makespan import makespan, to_natural_order, get_order from simulated_annealing import simulated_annealing from improved_simulated_annealing import improved_simulated_annealing from neh import neh tasks, numb_of_machines = get_data("data.001") # INITIAL ORDER init_order = get_order(tasks) init_makespan = makespan(init_order, tasks, numb_of_machines) print("[INIT] makespan: {}, time: {}" .format(init_makespan, 0)) # NEH ORDER neh_order, neh_time = neh(copy.deepcopy(tasks), numb_of_machines) neh_makespan = makespan(neh_order, tasks, numb_of_machines) print("[NEH ] makespan: {}, time: {}" .format(neh_makespan, neh_time)) # SIMULATED ANNEALING ORDER init_temp = 5000 final_temp = 0.1 u = 0.98 cooling_fcn_type = 0 move_type = 0 insert = 0 simulated_annealing_order, iterations_sa, sa_time = simulated_annealing(copy.deepcopy(tasks), numb_of_machines, init_temp, final_temp, u, cooling_fcn_type, move_type, insert) simulated_annealing_makespan = makespan(simulated_annealing_order, tasks, numb_of_machines)
def opt(times): job_count = len(times) print "Machine times: \n", times, '\n' z = min(permutations(range(job_count)), key=lambda x: makespan(x, times)) return z