def jobs_times(self, permutation): jt = [] total_idle_time = 0 _ = self.evaluator(permutation) # Evaluate best permutation to set machines cols = ['Job', 'Start Time', 'Finish Time', 'Idle Time'] jt.append(cols) format_spec = "{:>15}" * 4 lg.msg(logging.INFO, format_spec.format(*cols)) for pi, p in enumerate(permutation): start_time = 0 end_time = 0 idle_time = 0 for ji, j in enumerate(self.machines['assigned_jobs']): if ji == 0: start_time = j[pi][1] end_time = j[pi][2] continue idle_time += j[pi][1] - end_time end_time = j[pi][2] lg.msg(logging.INFO, format_spec.format(str(p), str(start_time), str(end_time), str(idle_time))) jt.append([str(p), str(start_time), str(end_time), str(idle_time)]) total_idle_time += idle_time lg.msg(logging.INFO, 'Jobs total idle time is {}'.format(total_idle_time)) filename = self.hj.results_path + '/' + self.hj.pid + ' ' + self.hj.bid + ' ' + self.hj.oid + \ ' jobs times run ' + str(self.hj.run) + '.csv' Helper.write_to_csv(jt, filename, header=True)
def machines_times(self, permutation): mt = [] total_idle_time = 0 _ = self.evaluator(permutation) cols = ['Machine', 'Start Time', 'Finish Time', 'Idle Time'] mt.append(cols) format_spec = "{:>15}" * 4 lg.msg(logging.INFO, format_spec.format(*cols)) # Calculate idle time from list tuples as start time(m+1) - finish time(m). Include last machine start time for mi, m in enumerate(self.machines['assigned_jobs']): finish_time = m[-1][2] idle_time = sum([x[1]-x[0] for x in zip([x[2] for x in m], [x[1] for x in m[1:] + [(0, m[-1][2], 0)]])]) total_idle_time += idle_time lg.msg(logging.INFO, format_spec.format(str(mi), str(m[0][1]), str(finish_time), str(idle_time))) mt.append([str(mi), str(m[0][1]), str(finish_time), str(idle_time)]) lg.msg(logging.INFO, 'Machines total idle time is {}'.format(total_idle_time)) filename = self.hj.results_path + '/' + self.hj.pid + ' ' + self.hj.bid + ' ' + self.hj.oid + \ ' machines times run ' + str(self.hj.run) + '.csv' Helper.write_to_csv(mt, filename, header=True)
def post_processing(self, j): j.end_time = time.time() j.total_comp_time_s += time.time() - self.exec_start_time if isinstance(j.rbest.candidate[0], float) and j.pid_type == 'combinatorial': j.rbest.candidate = j.pid_cls.candidate_spv_continuous_to_discrete(j.rbest.candidate) lg.msg(logging.INFO, 'Run {} best fitness is {} with candidate {}'.format(j.run, "{:.10f}".format( j.rbest.fitness), j.rbest.candidate)) lg.msg(logging.INFO, 'Completed benchmark {} optimizer {} run {}'.format(j.bid, j.oid, str(j.run))) self.log_optimizer_fitness(j) filename = self.results_path + '/' + j.pid + ' ' + j.bid + ' ' + j.oid + ' rbest fitness trend run ' + str(j.run) self.vis.fitness_trend(j.rft, filename) # Plot run-specific trend Helper.write_to_csv(j.rft, filename + '.csv', header=False) if j.run == j.runs_per_optimizer - 1: return # Reinstate full computational budget for next job run except last run, as budget used in summary reporting j.budget = j.budget_total
def summary(self): lg.msg(logging.INFO, 'Statistics') summary = [] for p in self.problems: if not self.settings['prb'][p]['enabled']: continue for b in self.benchmarks: _new_benchmark = True gbest_ft = {} bdp = {} # Bounds diff pct other = {} for o in self.optimizers: if not self.settings['opt'][o]['enabled']: continue for j in self.jobs: if not (j.pid == p and j.bid == b and j.oid == o): continue if _new_benchmark: lg.msg(logging.INFO, 'Summary for problem {} benchmark {}'.format(p, b)) _new_benchmark = False gbest_ft[j.oid] = {} gbest_ft[j.oid] = j.gft bdp[j.oid] = {} other[j.oid] = {} other[j.oid]['avg_comp_time_s'] = j.avg_comp_time_s other[j.oid]['budget'] = j.budget_total other[j.oid]['budget_rem'] = j.budget if j.iter_last_imp: other[j.oid]['avg_iter_last_imp'] = int(statistics.mean(j.iter_last_imp)) else: other[j.oid]['avg_iter_last_imp'] = 'n/a' if other[j.oid]['avg_iter_last_imp'] != 'n/a': other[j.oid]['budget_no_imp_pct'] = round(((j.budget_total - other[j.oid]['avg_iter_last_imp']) / j.budget_total) * 100, 2) else: other[j.oid]['budget_no_imp_pct'] = 'n/a' if j.imp_count: other[j.oid]['avg_imp_count'] = int(statistics.mean(j.imp_count)) else: other[j.oid]['avg_imp_count'] = 'n/a' if j.bid != 'na': bdp[j.oid] = [j.pid_cls.ilb, j.pid_lb_diff_pct, j.pid_cls.iub, j.pid_ub_diff_pct] else: bdp[j.oid] = ['na'] * 4 # Only proceed for compiled stats for valid problem/benchmark/optimizer if not gbest_ft: continue stats_summary = Stats.get_summary(gbest_ft) format_spec = "{:>30}" * 16 cols = ['Optimizer', 'Min Fitness', 'Max Fitness', 'Avg Fitness', 'StDev', 'Wilcoxon', 'LB', 'LB Diff %', 'UB', 'UB Diff %', 'Avg Cts', 'Budget', 'Budget Rem', 'Avg Iter Last Imp', 'Budget No Imp %', 'Avg Imp Count'] summary.append(cols) lg.msg(logging.INFO, format_spec.format(*cols)) for k, v in stats_summary.items(): lg.msg(logging.INFO, format_spec.format(str(k), str(round(v['minf'], 3)), str(round(v['maxf'], 3)), str(v['mean']), str(v['stdev']), str(v['wts']), str(bdp[k][0]), str(bdp[k][1]), str(bdp[k][2]), str(bdp[k][3]), str(round(other[k]['avg_comp_time_s'], 3)), other[k]['budget'], other[k]['budget_rem'], other[k]['avg_iter_last_imp'], other[k]['budget_no_imp_pct'], other[k]['avg_imp_count'])) summary.append([str(k), str(v['minf']), str(v['maxf']), str(v['mean']), str(v['stdev']), str(v['wts']), str(bdp[k][0]), str(bdp[k][1]), str(bdp[k][2]), str(bdp[k][3]), str(round(other[k]['avg_comp_time_s'], 3)), other[k]['budget'], other[k]['budget_rem'], other[k]['avg_iter_last_imp'], other[k]['budget_no_imp_pct'], other[k]['avg_imp_count']]) # Summary per problem Helper.write_to_csv(summary, self.results_path + '/' + p + ' ' + b + ' problem summary.csv') # Fitness trend for all optimizers per problem filename = self.results_path + '/' + p + ' ' + b + ' all optimizers gbest fitness trend' self.vis.fitness_trend_all_optimizers(gbest_ft, filename) Helper.write_to_csv(gbest_ft, filename + '.csv')