def pre_processing(self, j): lg.msg(logging.INFO, 'Starting optimizer {} run {}'.format(j.oid, str(j.run))) self.exec_start_time = time.time() j.rft = [] j.rbest = Particle() j.population = []
def __init__(self): self.results_path = 'results/hoprun_' + datetime.now().strftime( "%Y%m%d-%H%M%S") self.create_results_folder() self.set_log_file() lg.msg(logging.INFO, 'Heuristic Optimizer Platform (HOP) starting...') self.hm = HeuristicsManager(results_path=self.results_path) self.vis = Visualisation() self.optimize() lg.msg(logging.INFO, 'Heuristic Optimizer Platform (HOP) completed')
def post_processing(self): self.hj.pid_lb_diff_pct, self.hj.pid_ub_diff_pct = Stats.bounds_compare(self.ilb, self.iub, self.hj.gbest.fitness) fitness, _ = self.evaluator(self.hj.gbest.candidate) # set machine assigned jobs to best permutation filename = self.hj.results_path + '/' + self.hj.pid + ' ' + self.hj.bid + ' ' + self.hj.oid + ' gbest Gantt chart' self.vis.solution_representation_gantt(fitness, self.machines, self.jobs, filename) lg.msg(logging.INFO, 'Machine times for best fitness {}'.format(fitness)) self.machines_times(self.hj.gbest.candidate) lg.msg(logging.INFO, 'Job times for best fitness of {} with permutation {}'.format(fitness, self.hj.gbest.candidate)) self.jobs_times(self.hj.gbest.candidate)
def __init__(self, results_path): lg.msg(logging.INFO, 'Initialising Heuristics Manager') self.results_path = results_path self.random = Random() self.random.seed(seed) self.vis = Visualisation() self.settings = self.get_config() self.problems_optimizers = [] self.problems = [] self.benchmarks = [] self.optimizers = [] self.jobs = self.set_jobs() self.exec_start_time = 0
def optimize(self): if self.initial_temp == 0: self.hj.pid_cls.initial_sample = self.hj.pid_cls.generate_initial_sample( ) self.initial_temp = self.set_initial_temp() lg.msg(logging.DEBUG, 'Initial temperature set to {}'.format(self.initial_temp)) self.anneal() # Evaluating initial temperature has a one-time computational cost, so reduce budget if required if self.initial_temp_cost != 0: self.hj.budget += self.initial_temp_cost self.initial_temp_cost = 0
def anneal(self): # Set initial solution candidate if self.hj.rbest.fitness == self.hj.rbest.fitness_default: self.hj.rbest.candidate = self.get_generator()(lb=self.hj.pid_lb, ub=self.hj.pid_ub) self.hj.rbest.fitness, self.hj.budget = self.hj.pid_cls.evaluator( self.hj.rbest.candidate, self.hj.budget) self.temp = self.initial_temp while self.hj.budget > 0: if self.temp < self.temp_threshold: if self.hj.reheat: self.temp = self.initial_temp else: break new_p = Particle() # If continuous problem generate new solution otherwise perturb current candidate combination if self.hj.pid_type == 'continuous': new_p.candidate = self.get_generator()(lb=self.hj.pid_lb, ub=self.hj.pid_ub) else: new_p.candidate = self.hj.variator(self.hj.rbest.candidate) new_p.fitness, self.hj.budget = self.hj.pid_cls.evaluator( new_p.candidate, self.hj.budget) loss = self.hj.rbest.fitness - new_p.fitness if loss > 0.3: loss = 0.3 probability = math.exp(loss / self.temp) if (new_p.fitness < self.hj.rbest.fitness) or ( self.random.random() < probability): lg.msg( logging.DEBUG, 'Previous best {} replaced by new best {}'.format( self.hj.rbest.fitness, new_p.fitness)) self.hj.rbest = copy.deepcopy(new_p) self.hj.rft.append(self.hj.rbest.fitness) if not self.fromhyper: self.hj.iter_last_imp[ self.hj.run] = self.hj.budget_total - self.hj.budget self.hj.imp_count[self.hj.run] += 1 self.temp *= self.cooling_rate lg.msg(logging.DEBUG, 'Completed annealing with temperature at {}'.format(self.temp))
def execute_jobs(self): for j in self.jobs: if j.pid_enabled and j.oid_enabled: pass else: continue j.start_time = time.time() if j.bid != 'na': lg.msg(logging.INFO, 'Benchmark {}'.format(j.bid)) lg.msg(logging.INFO, 'Optimizing {} with {} ({})'.format(j.pid_desc, j.oid, j.oid_desc)) lg.msg(logging.INFO, 'Executing {} sample runs'.format(j.runs_per_optimizer)) for r in range(j.runs_per_optimizer): j.run = r self.pre_processing(j) # Controller pre-processing j.pid_cls.pre_processing() # Problem pre-processing j.oid_cls.run(jobs=self.jobs, fromhyper=False) # Execute optimizer self.post_processing(j) # Controller post-processing j.avg_comp_time_s = j.total_comp_time_s / j.runs_per_optimizer # Execute problem-specific tasks upon optimization completion e.g. generate Gantt chart of best schedule j.pid_cls.post_processing() lg.msg(logging.INFO, 'Completed optimizing {} with {} ({})'.format(j.pid_desc, j.oid, j.oid_desc)) self.summary()
def hyper(self): self.set_llh_samples() self.add_samples_to_trend() bcf, bc, llh = self.select_heuristic() self.set_rbest(bcf, bc) while self.hj.budget > 0: bcf, bc, llh = self.select_heuristic() lg.msg( logging.DEBUG, 'Low level component {} seeding Hyper with best fitness {} and candidate {}' .format(self.low_level_heuristics[llh].oid, bcf, bc)) self.set_rbest(bcf, bc) pop = self.set_pop() # Execute low level heuristic self.low_level_heuristics[llh].budget = self.hj.llh_budget self.low_level_heuristics[ llh].rbest.fitness = self.hj.rbest.fitness self.low_level_heuristics[ llh].rbest.candidate = self.hj.rbest.candidate self.low_level_heuristics[llh].population = pop self.low_level_heuristics[llh].oid_cls.run(fromhyper=True) self.low_level_heuristics[llh].llh_oid_run_count += 1 self.hj.budget = int(self.hj.budget - self.hj.llh_budget) if self.low_level_heuristics[ llh].rbest.fitness < self.hj.rbest.fitness: lg.msg( logging.INFO, 'Inserting fitness into archive {} by heuristic {}'.format( self.low_level_heuristics[llh].rbest.fitness, self.low_level_heuristics[llh].oid)) self.low_level_heuristics[llh].llh_oid_aggr_imp += ( self.hj.rbest.fitness - self.low_level_heuristics[llh].rbest.fitness) self.llh_fitness[llh].insert(0, self.low_level_heuristics[llh]. rbest.fitness) # Insert at start self.llh_candidates[llh].insert( 0, self.low_level_heuristics[llh].rbest.candidate) self.hj.rft.append( self.low_level_heuristics[llh].rbest.fitness) self.set_rbest(self.low_level_heuristics[llh].rbest.fitness, self.low_level_heuristics[llh].rbest.candidate) self.hj.iter_last_imp[ self.hj.run] = self.hj.budget_total - self.hj.budget self.hj.imp_count[self.hj.run] += 1
def __init__(self, **kwargs): Optimizer.__init__(self, **kwargs) # Optimizer specific self.temp = 0 self.temp_threshold = 1 lg.msg(logging.DEBUG, 'Temperature threshold set to {}'.format(self.temp_threshold)) self.initial_temp = 0 self.initial_temp_cost = 0 self.cooling_rate = 0.98 lg.msg(logging.DEBUG, 'Cooling rate set to {}'.format(self.cooling_rate))
def evolve(self): # Incoming population migrates to starting population, reset to fit GA if self.hj.population: self.reset_inherited_population_attr() # Complete assembly of initial population size, accounting for any incoming migrant population for i in range(self.hj.initial_pop_size - len(self.hj.population)): candidate = Particle() candidate.candidate = self.get_generator()(lb=self.hj.pid_lb, ub=self.hj.pid_ub) self.hj.population.append(candidate) while self.hj.budget > 0: # Evaluate any new candidates for ci, candidate in enumerate(self.hj.population): if candidate.fitness == candidate.fitness_default: c = copy.deepcopy(candidate.candidate) if self.get_generator().__name__ == 'generator_chromosome': c = self.binary_to_float(c) candidate.fitness, self.hj.budget = self.hj.pid_cls.evaluator( c, self.hj.budget) # Sort population by fitness ascending self.hj.population.sort(key=lambda x: x.fitness, reverse=False) if self.hj.population[0].fitness < self.hj.rbest.fitness: lg.msg( logging.DEBUG, 'Previous best is {}, now updated with new best {}'.format( self.hj.rbest.fitness, self.hj.population[0].fitness)) self.hj.rbest.fitness = self.hj.population[0].fitness self.hj.rbest.candidate = self.hj.population[0].candidate self.hj.rft.append(self.hj.population[0].fitness) if not self.fromhyper: self.hj.iter_last_imp[ self.hj.run] = self.hj.budget_total - self.hj.budget self.hj.imp_count[self.hj.run] += 1 self.parents = self.parent_selection() if not self.parents: # Convergence break self.children = self.parent_crossover() self.children_mutate() self.hj.population = self.update_population()
def jobs_times(self, permutation): jt = [] total_idle_time = 0 _ = self.evaluator(permutation) # Evaluate best permutation to set machines cols = ['Job', 'Start Time', 'Finish Time', 'Idle Time'] jt.append(cols) format_spec = "{:>15}" * 4 lg.msg(logging.INFO, format_spec.format(*cols)) for pi, p in enumerate(permutation): start_time = 0 end_time = 0 idle_time = 0 for ji, j in enumerate(self.machines['assigned_jobs']): if ji == 0: start_time = j[pi][1] end_time = j[pi][2] continue idle_time += j[pi][1] - end_time end_time = j[pi][2] lg.msg(logging.INFO, format_spec.format(str(p), str(start_time), str(end_time), str(idle_time))) jt.append([str(p), str(start_time), str(end_time), str(idle_time)]) total_idle_time += idle_time lg.msg(logging.INFO, 'Jobs total idle time is {}'.format(total_idle_time)) filename = self.hj.results_path + '/' + self.hj.pid + ' ' + self.hj.bid + ' ' + self.hj.oid + \ ' jobs times run ' + str(self.hj.run) + '.csv' Helper.write_to_csv(jt, filename, header=True)
def post_processing(self, j): j.end_time = time.time() j.total_comp_time_s += time.time() - self.exec_start_time if isinstance(j.rbest.candidate[0], float) and j.pid_type == 'combinatorial': j.rbest.candidate = j.pid_cls.candidate_spv_continuous_to_discrete(j.rbest.candidate) lg.msg(logging.INFO, 'Run {} best fitness is {} with candidate {}'.format(j.run, "{:.10f}".format( j.rbest.fitness), j.rbest.candidate)) lg.msg(logging.INFO, 'Completed benchmark {} optimizer {} run {}'.format(j.bid, j.oid, str(j.run))) self.log_optimizer_fitness(j) filename = self.results_path + '/' + j.pid + ' ' + j.bid + ' ' + j.oid + ' rbest fitness trend run ' + str(j.run) self.vis.fitness_trend(j.rft, filename) # Plot run-specific trend Helper.write_to_csv(j.rft, filename + '.csv', header=False) if j.run == j.runs_per_optimizer - 1: return # Reinstate full computational budget for next job run except last run, as budget used in summary reporting j.budget = j.budget_total
def machines_times(self, permutation): mt = [] total_idle_time = 0 _ = self.evaluator(permutation) cols = ['Machine', 'Start Time', 'Finish Time', 'Idle Time'] mt.append(cols) format_spec = "{:>15}" * 4 lg.msg(logging.INFO, format_spec.format(*cols)) # Calculate idle time from list tuples as start time(m+1) - finish time(m). Include last machine start time for mi, m in enumerate(self.machines['assigned_jobs']): finish_time = m[-1][2] idle_time = sum([x[1]-x[0] for x in zip([x[2] for x in m], [x[1] for x in m[1:] + [(0, m[-1][2], 0)]])]) total_idle_time += idle_time lg.msg(logging.INFO, format_spec.format(str(mi), str(m[0][1]), str(finish_time), str(idle_time))) mt.append([str(mi), str(m[0][1]), str(finish_time), str(idle_time)]) lg.msg(logging.INFO, 'Machines total idle time is {}'.format(total_idle_time)) filename = self.hj.results_path + '/' + self.hj.pid + ' ' + self.hj.bid + ' ' + self.hj.oid + \ ' machines times run ' + str(self.hj.run) + '.csv' Helper.write_to_csv(mt, filename, header=True)
def machines_set_lower_bounds_taillard(self): for m in range(self.machines['quantity']): lb = self.machines['loadout_times'][m] minimum_before_machine_start = [] minimum_after_machine_start = [] for j in self.jobs['list']: if m > 0: minimum_before_machine_start.append(sum(j[:m])) if m < self.machines['quantity']: minimum_after_machine_start.append(sum(j[m+1:])) if minimum_before_machine_start: lb += min(minimum_before_machine_start) if minimum_after_machine_start: lb += min(minimum_after_machine_start) self.machines['lower_bounds_taillard'].append(lb) lg.msg(logging.DEBUG, 'Machine {} Taillard lower bound is {} time units'.format(m, lb)) lg.msg(logging.INFO, 'Calculated Taillard benchmark instance lower bound (max) is {} time units'.format( max(self.machines['lower_bounds_taillard']))) if max(self.machines['lower_bounds_taillard']) != self.ilb: lg.msg(logging.WARNING, 'Calculated Taillard instance benchmark ({}) != lb in benchmark instance file ' '({})'.format(max(self.machines['lower_bounds_taillard']), self.ilb))
def summary(self): lg.msg(logging.INFO, 'Statistics') summary = [] for p in self.problems: if not self.settings['prb'][p]['enabled']: continue for b in self.benchmarks: _new_benchmark = True gbest_ft = {} bdp = {} # Bounds diff pct other = {} for o in self.optimizers: if not self.settings['opt'][o]['enabled']: continue for j in self.jobs: if not (j.pid == p and j.bid == b and j.oid == o): continue if _new_benchmark: lg.msg(logging.INFO, 'Summary for problem {} benchmark {}'.format(p, b)) _new_benchmark = False gbest_ft[j.oid] = {} gbest_ft[j.oid] = j.gft bdp[j.oid] = {} other[j.oid] = {} other[j.oid]['avg_comp_time_s'] = j.avg_comp_time_s other[j.oid]['budget'] = j.budget_total other[j.oid]['budget_rem'] = j.budget if j.iter_last_imp: other[j.oid]['avg_iter_last_imp'] = int(statistics.mean(j.iter_last_imp)) else: other[j.oid]['avg_iter_last_imp'] = 'n/a' if other[j.oid]['avg_iter_last_imp'] != 'n/a': other[j.oid]['budget_no_imp_pct'] = round(((j.budget_total - other[j.oid]['avg_iter_last_imp']) / j.budget_total) * 100, 2) else: other[j.oid]['budget_no_imp_pct'] = 'n/a' if j.imp_count: other[j.oid]['avg_imp_count'] = int(statistics.mean(j.imp_count)) else: other[j.oid]['avg_imp_count'] = 'n/a' if j.bid != 'na': bdp[j.oid] = [j.pid_cls.ilb, j.pid_lb_diff_pct, j.pid_cls.iub, j.pid_ub_diff_pct] else: bdp[j.oid] = ['na'] * 4 # Only proceed for compiled stats for valid problem/benchmark/optimizer if not gbest_ft: continue stats_summary = Stats.get_summary(gbest_ft) format_spec = "{:>30}" * 16 cols = ['Optimizer', 'Min Fitness', 'Max Fitness', 'Avg Fitness', 'StDev', 'Wilcoxon', 'LB', 'LB Diff %', 'UB', 'UB Diff %', 'Avg Cts', 'Budget', 'Budget Rem', 'Avg Iter Last Imp', 'Budget No Imp %', 'Avg Imp Count'] summary.append(cols) lg.msg(logging.INFO, format_spec.format(*cols)) for k, v in stats_summary.items(): lg.msg(logging.INFO, format_spec.format(str(k), str(round(v['minf'], 3)), str(round(v['maxf'], 3)), str(v['mean']), str(v['stdev']), str(v['wts']), str(bdp[k][0]), str(bdp[k][1]), str(bdp[k][2]), str(bdp[k][3]), str(round(other[k]['avg_comp_time_s'], 3)), other[k]['budget'], other[k]['budget_rem'], other[k]['avg_iter_last_imp'], other[k]['budget_no_imp_pct'], other[k]['avg_imp_count'])) summary.append([str(k), str(v['minf']), str(v['maxf']), str(v['mean']), str(v['stdev']), str(v['wts']), str(bdp[k][0]), str(bdp[k][1]), str(bdp[k][2]), str(bdp[k][3]), str(round(other[k]['avg_comp_time_s'], 3)), other[k]['budget'], other[k]['budget_rem'], other[k]['avg_iter_last_imp'], other[k]['budget_no_imp_pct'], other[k]['avg_imp_count']]) # Summary per problem Helper.write_to_csv(summary, self.results_path + '/' + p + ' ' + b + ' problem summary.csv') # Fitness trend for all optimizers per problem filename = self.results_path + '/' + p + ' ' + b + ' all optimizers gbest fitness trend' self.vis.fitness_trend_all_optimizers(gbest_ft, filename) Helper.write_to_csv(gbest_ft, filename + '.csv')
def machines_set_loadout_times(self): for m in range(self.machines['quantity']): loadout = sum(i[m] for i in self.jobs['list']) self.machines['loadout_times'].append(loadout) lg.msg(logging.DEBUG, 'Machine {} loaded with {} time units'.format(m, loadout))
def jobs_set_total_units(self): self.jobs['total_units'] = [sum(j) for j in self.jobs['list']] if logging.DEBUG >= self.logger.level: for ji, j in enumerate(self.jobs['total_units']): lg.msg(logging.DEBUG, 'Job {} allocated {} time units'.format(ji, j))