def test_difference(): frames = read_flow_shop_instances(FLOW_SHOP_INSTANCE_DIR + '/20jobs_5machines.txt') frame = frames[0] palmer_solution = palmer_heuristics(frame) local_search(frame, palmer_solution) print( "\npalmer solution: ", palmer_solution, "\npalmer end time: %s\n\n" % str(compute_end_time(frame, palmer_solution))) cds_solution = cds_heuristics(frame) local_search(frame, cds_solution) print( "cds solution: ", cds_solution, "\ncds end time: %s\n\n" % str(compute_end_time(frame, cds_solution))) neh_solution = neh_heuristics(frame) local_search(frame, neh_solution) print( "neh solution: ", neh_solution, "\nneh end time: %s\n\n" % str(compute_end_time(frame, neh_solution))) liu_solution = liu_reeves_heuristics(frame, 20) local_search(frame, liu_solution) print( "liu solution: ", liu_solution, "\nliu end time: %s\n\n" % str(compute_end_time(frame, liu_solution)))
def percentage_deviation_using_upper_bound(fst_heuristic: object, fst_args: dict, frames: list) -> float: """ The calculations are performed with respect to the results that are stored by frames in `upper_bound` property. Parameters ---------- fst_heuristic: object function callback fst_args: dict named arguments for `fst_heuristic` frames: list list of `JobSchedulingFrame` objects Returns ------- average_deviation: float Notes ----- Averaging occurs by the count of frames. """ solutions_ratio = 0. for frame in frames: fst_solution = fst_heuristic(frame, **fst_args) fst_end_time = compute_end_time(frame, fst_solution) end_time_diff = fst_end_time - frame.upper_bound solutions_ratio += end_time_diff / frame.upper_bound return solutions_ratio / len(frames) * 100
def fgh_heuristic(frame: JobSchedulingFrame, count_alpha: int = 1) -> list: init_jobs = [idx_job for idx_job in range(frame.count_jobs)] sum_times = [ frame.get_sum_processing_time(idx_job) for idx_job in init_jobs ] tetta = sum(sum_times) / frame.count_jobs time_min = min(sum_times) time_max = max(sum_times) alpha_min = time_min / (time_min + tetta) alpha_max = time_max / (time_max + tetta) period = (alpha_max - alpha_min) / count_alpha solutions = [] for i in range(count_alpha): alpha = alpha_max - period * i init_jobs.sort( key=lambda idx_job: fgh_index(sum_times[idx_job], alpha, tetta), reverse=True) solutions.append(copy(init_jobs)) for i in range(count_alpha): solutions[i], _ = local_search_partitial_sequence(frame, solutions[i]) solutions.sort(key=lambda solution: compute_end_time(frame, solution)) return solutions[0]
def local_search_partitial_sequence(frame: JobSchedulingFrame, init_jobs: list) -> list: """ This perform for all jobs in `init_jobs`: Select the next job from `init_jobs` and insert it in all possible positions in the partial sequence and keep the best one (i.e. minimum flowsum) as the current partial sequence. Parameters ---------- frame: JobSchedulingFrame init_jobs: list Returns ------- result of local search: list, bool if would be found a solution better than `init_jobs`, returns True. Notes ----- don't modificate `init_jobs` """ solution = [init_jobs[0]] # using job, which have max processing time better_than_init_jobs = False # init end time time_compare = compute_end_time(frame, init_jobs) # local search for position_job, idx_job in enumerate(init_jobs[1:], 1): min_end_time = time_compare for insert_place in range(position_job + 1): solution.insert(insert_place, idx_job) end_time = compute_end_time(frame, solution) if min_end_time > end_time: min_end_time = end_time best_insert_place = insert_place solution.pop(insert_place) solution.insert(best_insert_place, idx_job) solution_time = compute_end_time(frame, solution) better_than_init_jobs = solution_time < time_compare return solution, better_than_init_jobs
def test_johnson_algorithm(self): frame = JobSchedulingFrame([[2, 3], [8, 3], [4, 6], [9, 5], [6, 8], [9, 7]]) solution = johnson_algorithm(frame) assert solution == [0, 2, 4, 5, 3, 1] solution_end_time = compute_end_time(frame, solution) assert solution_end_time == 41
def local_search(frame: JobSchedulingFrame, init_jobs: list) -> list: """ Local search occurs by pairwise exchange of jobs and evaluation of the total flow time. Parameters ---------- frame: JobSchedulingFrame init_jobs: list Returns ------- result of local search: list, bool if would be found a solution better than `init_jobs`, returns True. Notes ----- don't modificate `init_jobs` """ solution = copy.copy(init_jobs) different_from_init_jobs = False while True: improvement = False for idx in range(len(solution) - 1): best_flowshop_time = compute_end_time(frame, solution) swap(solution, idx, idx + 1) new_flowshop_time = compute_end_time(frame, solution) if best_flowshop_time > new_flowshop_time: best_flowshop_time = new_flowshop_time improvement = True different_from_init_jobs = True else: # reverse swap swap(solution, idx, idx + 1) if not improvement: break return solution, different_from_init_jobs
def test_create_schedule_with_count_machine(self): sch = create_schedule(self.frame1, self.frame1_solution1, count_machine=self.frame1.count_machines) assert sch.end_time() == self.end_time_f1_s1 assert self.str_f1_s1.startswith(str(sch)) sch2_end_time = compute_end_time(self.frame1, self.frame1_solution1, count_machine=1) assert sch2_end_time == 82
def percentage_deviation(fst_heuristic: object, fst_args: dict, scnd_heuristic: object, scnd_args: dict, frames: list) -> float: """ The calculations are performed with respect to the results of the second heuristics. Parameters ---------- fst_heuristic: object function callback fst_args: dict named arguments for `fst_heuristic` scnd_heuristic: object function callback scnd_args: dict named arguments for `scnd_heuristic` frames: list list of `JobSchedulingFrame` objects Returns ------- average_deviation: float Notes ----- Averaging occurs by the count of frames. """ solutions_ratio = 0. for frame in frames: fst_solution = fst_heuristic(frame, **fst_args) fst_end_time = compute_end_time(frame, fst_solution) scnd_solution = scnd_heuristic(frame, **scnd_args) scnd_end_time = compute_end_time(frame, scnd_solution) end_time_diff = fst_end_time - scnd_end_time solutions_ratio += end_time_diff / scnd_end_time return solutions_ratio / len(frames) * 100
def _artificial_time(frame: JobSchedulingFrame, jobs: list, unscheduled_jobs: list) -> int: # copy processing time matrix jobs x machines from frame processing_times = frame.copy_proc_time # creating processing times for artificial job as average of # the processing times of jobs from `unscheduled_jobs` artificial_prc_times = [] for idx_machine in range(frame.count_machines): average_time = 0. for idx_job in range(len(unscheduled_jobs)): average_time += processing_times[idx_job][idx_machine] average_time /= len(unscheduled_jobs) artificial_prc_times.append(round(average_time)) processing_times.append(artificial_prc_times) assert frame.count_jobs + 1 == len(processing_times) assert frame.count_machines == len(processing_times[0]) frame_with_artificial_job = JobSchedulingFrame(processing_times) jobs.append(frame.count_jobs - 1) # added index of artificial job if len(jobs) != 1: end_time_sec_last_job = compute_end_time(frame_with_artificial_job, jobs, len(jobs) - 1, frame.count_machines - 1) else: end_time_sec_last_job = 0 end_time_last_job = compute_end_time(frame_with_artificial_job, jobs, len(jobs), frame.count_machines - 1) result_time = end_time_sec_last_job + end_time_last_job jobs.pop() return result_time
def test_fgh_heuristic(file_name, expected_percent_ratio): """ Function for research. Problem ------- Flow shop problem. Abstract -------- The experiment consists in comparing the results of FGH(count_jobs / count_machines) heuristic with the best results obtained by many researchers for Taillard's Flow shop problems published on the website: http://mistic.heig-vd.ch/taillard/problemes.dir/ordonnancement.dir/ordonnancement.html Notes ----- Starts as follows (from root folder): `pytest amyachev_degree/tests/test_simple_heuristics.py\ ::test_fgh_heuristic` All tests run about 1484 + _ sec. """ frames = read_flow_shop_instances(FLOW_SHOP_INSTANCE_DIR + file_name) assert len(frames) == 10 # these characteristics are the same for all frames count_jobs = frames[0].count_jobs count_machines = frames[0].count_machines # TODO use percentage_deviation func solutions_ratio = [] for i in range(10): # TODO need a way to automatically define `count_alpha` variable solution = fgh_heuristic(frames[i], count_alpha=count_jobs // count_machines + 11) schedule_end_time = compute_end_time(frames[i], solution) end_time_diff = schedule_end_time - frames[i].upper_bound solutions_ratio.append(end_time_diff / frames[i].upper_bound) average_percent_ratio = sum(solutions_ratio) / len(solutions_ratio) * 100 assert round(average_percent_ratio, 2) == expected_percent_ratio
def cds_heuristics(frame: JobSchedulingFrame) -> list: """ Compute approximate solution for instance of Flow Job problem by Campbell, Dudek, and Smith (CDS) heuristic. Parameters ---------- frame: JobSchedulingFrame Returns ------- solution: list sequence of job index Notes ----- Developed by Campbell, Dudek, and Smith in 1970. """ johnson_frame = JobSchedulingFrame([[]]) johnson_solutions_with_end_time = [] # Create `count_machines - 1` sub-problems # which will be solved by Johnson's algorithm for sub_problem in range(1, frame.count_machines): # Create processing times matrix for all jobs on only 2 machines proc_times = cds_create_proc_times(frame, sub_problem) johnson_frame.set_processing_times(proc_times) johnson_solution = johnson_algorithm(johnson_frame) # end time compute for the original task, that is `frame` end_time = compute_end_time(frame, johnson_solution) johnson_solutions_with_end_time.append((johnson_solution, end_time)) johnson_solutions_with_end_time.sort(key=lambda elem: elem[1]) # return only solution with minimum makespan (end_time) return johnson_solutions_with_end_time[0][0]
def test_palmer_heuristics(file_name, expected_percent_ratio): """ Function for research. Problem ------- Flow shop problem. Abstract -------- The experiment consists in comparing the results of Palmer's heuristic with the best results obtained by many researchers for Taillard's Flow shop problems published on the website: http://mistic.heig-vd.ch/taillard/problemes.dir/ordonnancement.dir/ordonnancement.html Notes ----- Starts as follows (from root folder): `pytest amyachev_degree/tests/test_simple_heuristics.py\ ::test_palmer_heuristics` First 9 tests run about 1.25 sec. All tests run about 1.58 sec. """ frames = read_flow_shop_instances(FLOW_SHOP_INSTANCE_DIR + file_name) assert len(frames) == 10 solutions_ratio = [] for i in range(10): solution = palmer_heuristics(frames[i]) schedule_end_time = compute_end_time(frames[i], solution) end_time_diff = schedule_end_time - frames[i].upper_bound solutions_ratio.append(end_time_diff / frames[i].upper_bound) average_percent_ratio = sum(solutions_ratio) / len(solutions_ratio) * 100 assert round(average_percent_ratio, 2) == expected_percent_ratio
def liu_reeves_heuristics(frame: JobSchedulingFrame, count_sequences: int): init_sequence = [idx_job for idx_job in range(frame.count_jobs)] unscheduled_jobs = copy(init_sequence) init_sequence.sort(key=lambda next_job: _index_function( frame, [], unscheduled_jobs, next_job)) solutions = [] for idx in range(count_sequences): solution = [init_sequence[idx]] unscheduled_jobs.remove(init_sequence[idx]) for _ in range(frame.count_jobs - 1): min_job = min(unscheduled_jobs, key=lambda next_job: _index_function( frame, solution, unscheduled_jobs, next_job)) solution.append(min_job) unscheduled_jobs.remove(min_job) solutions.append(solution) unscheduled_jobs = copy(init_sequence) solutions.sort(key=lambda solution: compute_end_time(frame, solution)) return solutions[0]