def exchange2(self, arg_timetable): pena = penalty.Penalty() excha_flag = True for grade in range(1, GRADE + 1): for cla in range(1, CLASS + 1): for day in range(DAY): flag, time = pena.judge_free(arg_timetable, grade, cla, day) if (flag is False): if (excha_flag): excha_grade = grade excha_class = cla old_day = day old_time = time excha_flag = False while (True): new_day = random.randint(0, DAY - 1) new_time = random.randint(0, TIME - 2) if (new_day == old_day and new_time == new_day): continue elif ((hasattr( arg_timetable[excha_grade][excha_class][new_day][new_time], 'subname') is False) or arg_timetable[excha_grade] [excha_class][new_day][new_time].fix_flag): break # 交換 arg_timetable_t = arg_timetable[excha_grade][excha_class][old_day][ old_time] arg_timetable[excha_grade][excha_class][old_day][ old_time] = arg_timetable[excha_grade][excha_class][new_day][ new_time] arg_timetable[excha_grade][excha_class][new_day][ new_time] = arg_timetable_t
def annealingoptimize1(self, arg_timetable, arg_teacher, temp=10000, cool=0.99): # 初期化 cnt = 0 penal = 1000000 # 解の記録 penalty_history = [] # オブジェクト生成 obj1 = exchange.Exchange() obj2 = penalty.Penalty() obj3 = arrangement.Arrangement() # 探索 while penal > MIN_PENALTY or cnt < MAX_CNT: # 交換 old_timetable = copy.deepcopy(arg_timetable) if (penal >= 1000000): obj1.exchange1(arg_timetable) elif (penal >= 10000): obj1.exchange2(arg_timetable) else: obj1.exchange3(arg_timetable) # 先生配置 old_teacher = copy.deepcopy(arg_teacher) new_teacher = copy.deepcopy(arg_teacher) obj3.arrange_teacher(old_timetable, old_teacher) obj3.arrange_teacher(arg_timetable, new_teacher) # ペナルティを計算 old_pena = obj2.calc_penalty_soft(old_timetable, old_teacher) new_pena = obj2.calc_penalty_soft(arg_timetable, new_teacher) # 温度から確立を定義 pro = pow(math.e, -abs(new_pena - old_pena) / temp) # コストを比較し改善 or 確率 if (new_pena < old_pena or random.random() < pro): penal = new_pena teacher = copy.copy(new_teacher) else: arg_timetable = copy.deepcopy(old_timetable) penal = old_pena teacher = copy.copy(old_teacher) # 温度を下げる temp = temp * cool # カウントインクリメント cnt += 1 # 表示 if cnt % 100 == 0: print('penalty:', penal) print('count :', cnt) print() # 探索が停滞 if cnt > 300 and penal > 1000000: break # ペナルティ penalty_history.append(penal) # 表示 print('----------------------') print('penalty: ', penal) print('----------------------') # 戻り値 return arg_timetable, teacher, penal, penalty_history
def greatdeluge2opt(self, arg_timetable, arg_teacher): # 初期化 cnt_stag:停滞回数 penalty_history:解の記録 cnt_stag = 0 penalty_history = [] # オブジェクト生成 obj1 = exchange.Exchange() obj2 = penalty.Penalty() obj3 = arrangement.Arrangement() # 先生配置 teacher = copy.deepcopy(arg_teacher) obj3.arrange_teacher(arg_timetable, teacher) # ペナルティ計算 cost_current = obj2.calc_penalty_soft(arg_timetable, teacher) # 最小ペナルティ min_cost = cost_current # 初期水位 water_level = cost_current # ペナルティ記録 penalty_history.append(cost_current) # ループ while min_cost > 0 and cnt_stag < MAX_STAG1: ### 2-opt ### # コピー timetable = copy.deepcopy(arg_timetable) teacher = copy.deepcopy(arg_teacher) # 交換 obj1.exchange3(timetable) # 先生配置 obj3.arrange_teacher(timetable, teacher) # 現在のペナルティ cost_neigh = obj2.calc_penalty_soft(timetable, teacher) # 改善 or 水位以下 if cost_current >= cost_neigh or water_level >= cost_neigh: # 交換を許可 arg_timetable = copy.deepcopy(timetable) cost_current = cost_neigh # 改善 if cost_current < min_cost: # ペナルティ min_cost = cost_current min_timetable = copy.deepcopy(arg_timetable) cnt_stag = 0 # 水位以下 if cost_current < water_level: # 水位を下げる water_level -= DEC_WATER1 # 同じ if cost_current == min_cost or cost_current == water_level: cnt_stag += 1 else: cnt_stag += 1 # 再上昇 if cnt_stag % FREQ_RERISE1 == 0: water_level += RERISE1 # 表示 print('min_pena', min_cost) print('cur_pena', cost_current) print('water_level', water_level) print('cnt_stag', cnt_stag) print() # ペナルティ記録 penalty_history.append(cost_current) obj3.arrange_teacher(min_timetable, arg_teacher) return arg_timetable, teacher, min_cost, penalty_history
def greatdelugels(self, arg_timetable, arg_teacher): # 初期化 cnt_stag:停滞回数 cnt_stag = 0 penalty_history = [] # オブジェクト生成 obj1 = exchange.Exchange() obj2 = penalty.Penalty() obj3 = arrangement.Arrangement() # 先生配置 teacher = copy.deepcopy(arg_teacher) obj3.arrange_teacher(arg_timetable, teacher) # ペナルティ計算 cost_current = obj2.calc_penalty_soft(arg_timetable, teacher) # 最小ペナルティ min_cost = cost_current # 初期水位 water_level = cost_current # ペナルティ記録 penalty_history.append(cost_current) # ループ while min_cost > 0 and cnt_stag < MAX_STAG2: ### LocalSearch ### # リスト penal = [] timetable = [] teacher = [] # 近傍探索 for x in range(5): # コピー timetable.append(copy.deepcopy(arg_timetable)) # 交換 obj1.exchange3(timetable[x]) # 先生配置 teacher.append(copy.deepcopy(arg_teacher)) obj3.arrange_teacher(timetable[x], teacher[x]) # 現在のペナルティ penal.append(obj2.calc_penalty_soft(timetable[x], teacher[x])) # 最小要素 min_element = penal.index(min(penal)) timetable = copy.deepcopy(timetable[min_element]) teacher = copy.deepcopy(teacher[min_element]) cost_neigh = min(penal) # 改善 or 水位以下 if cost_current >= cost_neigh or water_level >= cost_neigh: # 交換を許可 arg_timetable = copy.deepcopy(timetable) cost_current = cost_neigh # 改善 if cost_current < min_cost: # ペナルティ min_cost = cost_current min_timetable = copy.deepcopy(arg_timetable) cnt_stag = 0 # 水位以下 if cost_current < water_level: # 水位を下げる water_level -= DEC_WATER2 # 同じ if cost_current == min_cost or cost_current == water_level: cnt_stag += 1 else: cnt_stag += 1 # ペナルティ penalty_history.append(cost_current) # 再上昇 if cnt_stag % FREQ_RERISE2 == 0: water_level += RERISE2 # 表示 print('min_pena', min_cost) print('cur_pena', cost_current) print('water_level', water_level) print('cnt_stag', cnt_stag) print() # ペナルティ記録 penalty_history.append(cost_current) obj3.arrange_teacher(min_timetable, arg_teacher) return min_timetable, arg_teacher, min_cost, penalty_history