def start_evolution(self, verbose): print("=== Pieces: {}\n".format(len(self._pieces))) if verbose: plot = Plot(self._image) ImageAnalysis.analyze_image(self._pieces) fittest = None best_fitness_score = float("-inf") termination_counter = 0 for generation in range(self._generations): print_progress(generation, self._generations - 1, prefix="=== Solving puzzle: ") new_population = [] # Elitism elite = self._get_elite_individuals(elites=self._elite_size) new_population.extend(elite) selected_parents = roulette_selection(self._population, elites=self._elite_size) for first_parent, second_parent in selected_parents: crossover = Crossover(first_parent, second_parent) crossover.run() child = crossover.child() new_population.append(child) fittest = self._best_individual() if fittest.fitness <= best_fitness_score: termination_counter += 1 else: best_fitness_score = fittest.fitness if termination_counter == self.TERMINATION_THRESHOLD: print("\n\n=== GA terminated") print("=== There was no improvement for {} generations".format( self.TERMINATION_THRESHOLD)) return fittest self._population = new_population if verbose: plot.show_fittest( fittest.to_image(), "Generation: {} / {}".format(generation + 1, self._generations)) return fittest
def analyze_image(cls, pieces): for piece in pieces: # For each edge we keep best matches as a sorted list. # Edges with lower dissimilarity_measure have higher priority. cls.best_match_table[piece.id] = { "T": [], "R": [], "D": [], "L": [] } def update_best_match_table(first_piece, second_piece): measure = dissimilarity_measure(first_piece, second_piece, orientation) cls.put_dissimilarity((first_piece.id, second_piece.id), orientation, measure) cls.best_match_table[second_piece.id][orientation[0]].append( (first_piece.id, measure)) cls.best_match_table[first_piece.id][orientation[1]].append( (second_piece.id, measure)) # Calculate dissimilarity measures and best matches for each piece. iterations = len(pieces) - 1 for first in range(iterations): print_progress(first, iterations - 1, prefix="=== Analyzing image:") for second in range(first + 1, len(pieces)): for orientation in ["LR", "TD"]: update_best_match_table(pieces[first], pieces[second]) update_best_match_table(pieces[second], pieces[first]) for piece in pieces: for orientation in ["T", "L", "R", "D"]: cls.best_match_table[piece.id][orientation].sort( key=lambda x: x[1])
def analyze_image(cls, pieces): # 图片分析 for piece in pieces: # For each edge we keep best matches as a sorted list. # Edges with lower dissimilarity_measure have higher priority. # 保存每一块的每个方向匹配度最高的块 420 * 4 cls.best_match_table[piece.id] = { # "T": [(piece.id,dissimilarity)], "T": [], "R": [], "D": [], "L": [] } cls.max_match_d = -1 cls.max_match_mgc = -1 def update_best_match_table(first_piece, second_piece): # 保存每次计算出来的两个碎片的相似度 # 记录每两块之间的相似度,加入到列表之中 (用颜色空间计算两个edge的距离) measure = dissimilarity_measure(first_piece, second_piece, orientation) if (measure[0] > cls.max_match_d): cls.max_match_d = measure[0] if (measure[1] > cls.max_match_mgc): cls.max_match_mgc = measure[1] # 保存每对的相似度 cls.put_dissimilarity((first_piece.id, second_piece.id), orientation, measure) cls.best_match_table[second_piece.id][orientation[0]].append( (first_piece.id, measure)) cls.best_match_table[first_piece.id][orientation[1]].append( (second_piece.id, measure)) # Calculate dissimilarity measures and best matches for each piece.计算相似度 iterations = len(pieces) - 1 for first in range(iterations): print_progress(first, iterations - 1, prefix="=== Analyzing image:") for second in range(first + 1, len(pieces)): # 对每一种方向都计算相似度 for orientation in ["LR", "TD"]: update_best_match_table(pieces[first], pieces[second]) update_best_match_table(pieces[second], pieces[first]) # normalize d_mgc for piece in pieces: for orientation in ["T", "L", "R", "D"]: d_mgc = cls.best_match_table[piece.id][orientation] new_value = [] for piece_id, measure in d_mgc: new_value.append((piece_id, measure[0] / cls.max_match_d + measure[1] / cls.max_match_mgc)) # new_value.append((piece_id, measure[0] / cls.max_match_d)) cls.best_match_table[piece.id][orientation] = new_value # normalize dissimilarity for id1, id2 in cls.dissimilarity_measures: for orientation in cls.dissimilarity_measures[(id1, id2)]: v = cls.dissimilarity_measures[(id1, id2)][orientation] new_value = v[0] / cls.max_match_d + v[1] / cls.max_match_mgc # new_value = v[0] / cls.max_match_d cls.dissimilarity_measures[(id1, id2)][orientation] = new_value for piece in pieces: for orientation in ["T", "L", "R", "D"]: # 对每一块的每个方向的相似度进行排序 cls.best_match_table[piece.id][orientation].sort( key=lambda x: x[1])
def start_evolution(self, verbose): with open('result_file_%d.csv' % Config.round_id, 'w') as f: line = "%s,%s,%s,%s,%s,%s,%s\n" % ( 'time', 'cog_index', 'correct_in_db', 'total_in_db', 'correct_in_GA', 'total_in_GA', 'precision') f.write(line) ''' print("=== Pieces: {}\n".format(len(self._pieces))) ''' if verbose: from gaps.plot import Plot plot = Plot(self._image) #ImageAnalysis.analyze_image(self._pieces) start_time = time.time() fittest = None best_fitness_score = float("-inf") solution_found = False if Config.multiprocess: data_q = Queue() res_q = Queue() processes = [] for pid in range(Config.process_num): p = Process(target=worker, args=(pid, start_time, self._pieces[:], 0)) p.start() processes.append(p) redis_key = 'round:%d:parents' % (Config.round_id) redis_cli.hdel(redis_key, 'process:%d' % pid) redis_key = 'round:%d:children' % (Config.round_id) redis_cli.hdel(redis_key, 'process:%d' % pid) old_crowd_edge_count = 1 for generation in range(self._generations): if not Config.cli_args.online and not Config.cli_args.hide_detail: print_progress(generation, self._generations - 1, prefix="=== Solving puzzle offline: ", start_time=start_time) refreshTimeStamp(start_time) ## In crowd-based algorithm, we need to access database to updata fintess measure ## at the beginning of each generation. # update fitness from database. generation_start_time = time.time() db_update() if not Config.cli_args.hide_detail: print("edge_count:{}/edge_prop:{}".format( db_update.crowd_edge_count, db_update.crowd_edge_count / Config.total_edges)) redis_key = 'round:%d:dissimilarity' % Config.round_id dissimilarity_json = json.dumps(dissimilarity_measure.measure_dict) #print(dissimilarity_json) redis_cli.set(redis_key, dissimilarity_json) # calculate dissimilarity and best_match_table. ImageAnalysis.analyze_image(self._pieces) # fitness of all individuals need to be re-calculated. for _individual in self._population: _individual._objective = None _individual._fitness = None db_update_time = time.time() new_population = [] # random.shuffle(self._population) self._population.sort(key=attrgetter("objective")) #print(','.join([str(ind.get_pieces_id_list()) for ind in self._population])) # Elitism # elite = self._get_elite_individuals(elites=self._elite_size) elite = self._population[-self._elite_size:] new_population.extend(elite) if Config.fitness_func_name == 'rank-based': #!!! self._population needs to be sorted first # for rank, indiv in enumerate(self._population): # indiv.calc_rank_fitness(rank) self.calc_rank_fitness() select_elite_time = time.time() if solution_found: print("GA found a solution for round {}!".format( Config.round_id)) if Config.cli_args.online: GA_time = time.time() - ( mongo_wrapper.get_round_start_milisecs() / 1000.0) print("GA time: %.3f" % GA_time) else: winner_time = mongo_wrapper.get_round_winner_time_milisecs( ) / 1000.0 GA_time = time.time() - start_time + \ mongo_wrapper.get_round_winner_time_milisecs() * Config.offline_start_percent / 1000.0 print("solved, winner time: %.3f, GA time: %.3f" % (winner_time, GA_time)) if Config.multiprocess: for p in processes: p.terminate() notify_crowdjigsaw_server() exit(0) self._get_common_edges(elite[:4]) selected_parents = roulette_selection(self._population, elites=self._elite_size) select_parent_time = time.time() result = set() if Config.multiprocess: # multiprocessing worker_args = [] # assign equal amount of work to process_num-1 processes redis_key = 'round:%d:parents' % (Config.round_id) redis_data = {} for pid in range(Config.process_num): parents_data = json.dumps([(f_parent.get_pieces_id_list(), s_parent.get_pieces_id_list()) for (f_parent, s_parent) in selected_parents[(len(selected_parents)//Config.process_num)*pid \ : (len(selected_parents)//Config.process_num)*(pid+1)]]) redis_data['process:%d' % pid] = parents_data redis_cli.hmset(redis_key, redis_data) redis_key = 'round:%d:children' % (Config.round_id) for pid in range(Config.process_num): while True: children_json = redis_cli.hget(redis_key, 'process:%d' % pid) if children_json: children_data = json.loads(children_json) if children_data: if len(children_data) != 49: continue redis_key = 'round:%d:parents' % ( Config.round_id) redis_cli.hdel(redis_key, 'process:%d' % pid) redis_key = 'round:%d:children' % ( Config.round_id) redis_cli.hdel(redis_key, 'process:%d' % pid) result.update(children_data) break else: # non multiprocessing for first_parent, second_parent in selected_parents: crossover = Crossover(first_parent, second_parent) crossover.run() child = crossover.child() result.add(','.join( [str(_) for _ in child.get_pieces_id_list()])) while len(result) < len(selected_parents): random_child = [str(i) for i in range(len(self._pieces))] np.random.shuffle(random_child) result.add(','.join(random_child)) result = list(map(lambda x: [int(_) for _ in x.split(',')], result)) result = [ Individual([self._pieces[_] for _ in c], Config.cli_args.rows, Config.cli_args.cols, False) for c in result ] new_population.extend(result) for child in new_population: if child.is_solution(): fittest = child redis_key = 'round:' + str(Config.round_id) + ':GA_edges' res = redis_cli.set(redis_key, json.dumps(list(child.edges_set()))) solution_found = True break crossover_time = time.time() if not solution_found: fittest = self._best_individual() if fittest.fitness > best_fitness_score: best_fitness_score = fittest.fitness self._population = new_population if verbose: from gaps.plot import Plot plot.show_fittest( fittest.to_image(), "Generation: {} / {}".format(generation + 1, self._generations)) times = { 'generation_time': time.time() - generation_start_time, 'db_update_time': db_update_time - generation_start_time, 'select_elite_time': select_elite_time - db_update_time, 'select_parent_time': select_parent_time - select_elite_time, 'crossover_time': crossover_time - select_parent_time } print(times) return fittest
def start_evolution(self, verbose): print("=== Pieces: {}\n".format(len(self._pieces))) if verbose: plot = Plot(self._image) ImageAnalysis.analyze_image(self._pieces) fittest = None best_fitness_score = float("-inf") termination_counter = 0 for generation in range(self._generations): print_progress(generation, self._generations - 1, prefix="=== Solving puzzle: ") new_population = [] # Elitism # 取适应度最高的两个图片 elite = self._get_elite_individuals(elites=self._elite_size) new_population.extend(elite) # 从种群中随机选择popultation - elite_size个父母 selected_parents = roulette_selection(self._population, elites=self._elite_size) # 通过父母生成子代,加入到new_population中 for first_parent, second_parent in selected_parents: # 交叉互换,生成子代 crossover = Crossover(first_parent, second_parent) crossover.run() child = crossover.child() # child.mutate() new_population.append(child) # 从上一代中选出适应度最高的一个 fittest = self._best_individual() fittest.mutate() # min_fitness = 0 # for index in range(len(new_population)): # if new_population[index].fitness < new_population[min_fitness].fitness: # min_fitness = index # fittest.clear_fitness() # if fittest.fitness > new_population[min_fitness].fitness: # new_population[min_fitness] = fittest print("old_fittest : ", fittest.fitness, end="") # image = fittest.to_image() # rightImage = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # cv2.imwrite("temp_image_" + str(generation) + ".jpg", rightImage) best_adjoin = fittest.best_adjoin(self._piece_size) rightImage = cv2.cvtColor(best_adjoin, cv2.COLOR_RGB2BGR) cv2.imwrite("temp_image_best_adjoin_" + str(generation) + ".jpg", rightImage) # penalisze = fittest.penalize() # print(" new_fittest : ", fittest.fitness) # rightImage = cv2.cvtColor(penalize, cv2.COLOR_RGB2BGR) # cv2.imwrite("temp_image_penalize_" + str(generation) + ".jpg", rightImage) # 如果上一代最佳比历史最佳好,则termination_counter += 1,否则替换 if fittest.fitness < best_fitness_score: termination_counter += 1 else: best_fitness_score = fittest.fitness termination_counter = 0 if termination_counter % 4 == 2: predicate = Individual(fittest.pieces, fittest.rows, fittest.columns, shuffle=False) predicate.penalize_image = fittest.penalize_image # 处理局部最优 predicate.manually_select() # predicate.shuffle_assembling() print("predicate_fitness : %s " % str(predicate.fitness)) image = predicate.to_image() rightImage = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.imwrite("predicate_image_" + str(generation) + ".jpg", rightImage) for index in range(len(new_population)): if new_population[index].fitness < predicate.fitness: new_population[index] = predicate break # 如果连续十代都没有更优子代,则退出 if termination_counter == self.TERMINATION_THRESHOLD: print("\n\n=== GA terminated") print("=== There was no improvement for {} generations".format( self.TERMINATION_THRESHOLD)) return fittest self._population = new_population if verbose: plot.show_fittest( fittest.to_image(), "Generation: {} / {}".format(generation + 1, self._generations)) return fittest