def search_tile_rectangle(self, image, threshold=10, encourage=5.1, close_kernel=(5, 10, 15, 20, 25)): """ Search for the corner of empty tile. This is a fallback method for fallback method, almost almost no need. Note: This method may have a difference in about 2 pixels. Args: image (np.ndarray): Monochrome image. threshold (int): Number of rectangles. encourage (int, float): close_kernel (tuple[int]): Kernel size use in morphology close Returns: bool: If success. """ location = np.array([]) for kernel in close_kernel: # Re-creating closed image kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel, kernel)) image_closed = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel) # Find rectangles contours, _ = cv2.findContours(image_closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) rectangle = np.array([ cv2.boundingRect(cv2.convexHull(cont).astype(np.float32)) for cont in contours ]) try: # Filter out correct rectangles rectangle = rectangle[(rectangle[:, 2] > 100) & (rectangle[:, 3] > 100)] shape = rectangle[:, 2:] diff = np.abs(shape - np.round(shape / self.config.HOMO_TILE) * self.config.HOMO_TILE) rectangle = rectangle[np.all(diff < encourage, axis=1)] location = np.append( location, rectangle[:, :2], axis=0) if len(location) else rectangle[:, :2] except IndexError: location = [] if len(location) > threshold: self.homo_loca = fit_points(location, mod=self.config.HOMO_TILE, encourage=encourage) self.map_inner = get_map_inner(location) message = 'good match' else: message = 'bad match' # print(self.homo_loca % self.config.HOMO_TILE) logger.attr_align('tile_rectangle', f'{len(location)} rectangles ({message})') return message != 'bad match'
def load(self, image): """ Args: image (np.ndarray): """ self.load_globe_map() start_time = time.time() local = self.find_peaks( self.perspective_transform(image), para=self.config.OS_LOCAL_FIND_PEAKS_PARAMETERS) local = local.astype(np.uint8) local = cv2.resize(local, None, fx=self.config.OS_GLOBE_IMAGE_RESIZE, fy=self.config.OS_GLOBE_IMAGE_RESIZE) result = cv2.matchTemplate(self.globe, local, cv2.TM_CCOEFF_NORMED) _, similarity, _, loca = cv2.minMaxLoc(result) loca = np.array(loca) / self.config.OS_GLOBE_IMAGE_RESIZE loca = tuple(self.homo_center + loca - self.config.OS_GLOBE_IMAGE_PAD) self.center_loca = loca time_cost = round(time.time() - start_time, 3) logger.attr_align('globe_center', loca) logger.attr_align('similarity', float2str(similarity), front=float2str(time_cost) + 's') if similarity < 0.1: logger.warning('Low similarity when matching OS globe')
def search_tile_corner(self, image, threshold=0.8, encourage=1.0): """ Search for the corner of empty tile. This is a fallback method, almost no need. Note: This method has a difference in 0.5 ~ 1.0 pixel. Args: image (np.ndarray): Monochrome image. threshold (float): encourage (int, float): Returns: bool: If success. """ similarity = 0 location = np.array([]) for index in range(4): template = ASSETS.tile_corner_image_list[index] result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) similarity = max(similarity, np.max(result)) loca = np.argwhere(result > threshold)[:, ::-1] - self.config.HOMO_CORNER_OFFSET_LIST[index] location = np.append(location, loca, axis=0) if len(location) else loca if similarity > threshold: self.homo_loca = fit_points( location, mod=self.config.HOMO_TILE, encourage=encourage) - self.config.HOMO_CENTER_OFFSET self.map_inner = get_map_inner(location) message = f'{len(location)} matches' else: message = 'bad match' # print(self.homo_loca % self.config.HOMO_TILE) logger.attr_align('tile_corner', f'{float2str(similarity)} ({message})') return message != 'bad match'
def search_tile_center(self, image, threshold_good=0.9, threshold=0.8, encourage=1.0): """ Search for the center of empty tile. Note: This is the main method. `len(res[res > 0.8])` is 3x faster than `np.sum(res > 0.8)` Args: image (np.ndarray): Monochrome image. threshold_good (float); threshold (float): encourage (int, float): Returns: bool: If success. """ result = cv2.matchTemplate(image, ASSETS.tile_center_image, cv2.TM_CCOEFF_NORMED) _, similarity, _, loca = cv2.minMaxLoc(result) if similarity > threshold_good: self.homo_loca = np.array(loca) - self.config.HOMO_CENTER_OFFSET self.map_inner = np.array(loca) message = 'good match' elif similarity > threshold: location = np.argwhere(result > threshold)[:, ::-1] self.homo_loca = fit_points( location, mod=self.config.HOMO_TILE, encourage=encourage) - self.config.HOMO_CENTER_OFFSET self.map_inner = get_map_inner(location) message = f'{len(location)} matches' else: message = 'bad match' # print(self.homo_loca % self.config.HOMO_TILE) logger.attr_align('tile_center', f'{float2str(similarity)} ({message})') return message != 'bad match'
def predict(self): """ Predict grid info. """ start_time = time.time() for grid in self: grid.predict() logger.attr_align('predict', len(self.grids.keys()), front=float2str(time.time() - start_time) + 's')
def load(self, image): """ Args: image: """ image = self._image_clear_ui(np.array(image)) self.image = image super().load(image) # Create local view map grids = {} grid_class = OSGrid if self.mode == 'os' else Grid for loca, points in self.generate(): if area_in_area(area1=corner2area(points), area2=self.config.DETECTING_AREA): grids[loca] = grid_class(location=loca, image=image, corner=points, config=self.config) # Handle grids offset offset = np.min(list(grids.keys()), axis=0) if np.sum(np.abs(offset)) > 0: logger.attr_align('grids_offset', tuple(offset.tolist())) self.grids = {} for loca, grid in grids.items(): x, y = np.subtract(loca, offset) grid.location = (x, y) self.grids[(x, y)] = grid else: self.grids = grids self.shape = np.max(list(self.grids.keys()), axis=0) # Find local view center for loca, grid in self.grids.items(): offset = grid.screen2grid([self.config.SCREEN_CENTER ])[0].astype(int) points = grid.grid2screen( np.add([[0.5, 0], [-0.5, 0], [0, 0.5], [0, -0.5]], offset)) self.swipe_base = np.array([ np.linalg.norm(points[0] - points[1]), np.linalg.norm(points[2] - points[3]) ]) self.center_loca = tuple(np.add(loca, offset).tolist()) logger.attr_align('center_loca', self.center_loca) if self.center_loca in self: self.center_offset = self.grids[self.center_loca].screen2grid( [self.config.SCREEN_CENTER])[0] else: x = max(self.center_loca[0] - self.shape[0], 0) if self.center_loca[0] > 0 else self.center_loca[0] y = max(self.center_loca[1] - self.shape[1], 0) if self.center_loca[1] > 0 else self.center_loca[1] self.center_offset = offset - self.center_loca raise MapDetectionError( f'Camera outside map: offset=({x}, {y})') break
def update(self, camera=True): """Update map image Args: camera: True to update camera position and perspective data. """ self.device.screenshot() if not camera: self.view.update(image=self.device.image) return True if not hasattr(self, 'view'): self.view = View(self.config) try: self.view.load(self.device.image) except MapDetectionError as e: if self.info_bar_count(): logger.info('Perspective error cause by info bar. Waiting.') self.handle_info_bar() return self.update(camera=camera) elif self.is_in_stage(): logger.warning('Image is in stage') raise CampaignEnd('Image is in stage') elif not self.appear(IN_MAP): logger.warning('Image to detect is not in_map') raise e elif 'Camera outside map' in str(e): string = str(e) logger.warning(string) x, y = string.split('=')[1].strip('() ').split(',') self._map_swipe((-int(x.strip()), -int(y.strip()))) else: raise e if not self._correct_camera: self.show_camera() return False # Set camera position if self.view.left_edge: x = 0 + self.view.center_loca[0] elif self.view.right_edge: x = self.map.shape[0] - self.view.shape[0] + self.view.center_loca[0] else: x = self.camera[0] if self.view.lower_edge: y = 0 + self.view.center_loca[1] elif self.view.upper_edge: y = self.map.shape[1] - self.view.shape[1] + self.view.center_loca[1] else: y = self.camera[1] if self.camera != (x, y): logger.attr_align('camera_corrected', f'{location2node(self.camera)} -> {location2node((x, y))}') self.camera = (x, y) self.show_camera()
def load(self, image): """ Args: image (np.ndarray): Shape (720, 1280, 3) """ start_time = time.time() self.image = image # Image initialisation image = self.load_image(image) # Lines detection inner_h = self.detect_lines( image, is_horizontal=True, param=self.config.INTERNAL_LINES_FIND_PEAKS_PARAMETERS, threshold=self.config.INTERNAL_LINES_HOUGHLINES_THRESHOLD, theta=self.config.HORIZONTAL_LINES_THETA_THRESHOLD).move( *self.config.DETECTING_AREA[:2]) inner_v = self.detect_lines( image, is_horizontal=False, param=self.config.INTERNAL_LINES_FIND_PEAKS_PARAMETERS, threshold=self.config.INTERNAL_LINES_HOUGHLINES_THRESHOLD, theta=self.config.VERTICAL_LINES_THETA_THRESHOLD).move( *self.config.DETECTING_AREA[:2]) edge_h = self.detect_lines( image, is_horizontal=True, param=self.config.EDGE_LINES_FIND_PEAKS_PARAMETERS, threshold=self.config.EDGE_LINES_HOUGHLINES_THRESHOLD, theta=self.config.HORIZONTAL_LINES_THETA_THRESHOLD, pad=self.config.DETECTING_AREA[2] - self.config.DETECTING_AREA[0]).move( *self.config.DETECTING_AREA[:2]) edge_v = self.detect_lines( image, is_horizontal=False, param=self.config.EDGE_LINES_FIND_PEAKS_PARAMETERS, threshold=self.config.EDGE_LINES_HOUGHLINES_THRESHOLD, theta=self.config.VERTICAL_LINES_THETA_THRESHOLD, pad=self.config.DETECTING_AREA[3] - self.config.DETECTING_AREA[1]).move( *self.config.DETECTING_AREA[:2]) # Lines pre-cleansing horizontal = inner_h.add(edge_h).group() vertical = inner_v.add(edge_v).group() edge_h = edge_h.group() edge_v = edge_v.group() if not self.config.TRUST_EDGE_LINES: edge_h = edge_h.delete(inner_h) # Experimental, reduce edge lines. edge_v = edge_v.delete(inner_v) self.horizontal = horizontal self.vertical = vertical # Calculate perspective self.crossings = self.horizontal.cross(self.vertical) self.vanish_point = optimize.brute(self._vanish_point_value, self.config.VANISH_POINT_RANGE) distance_point_x = optimize.brute( self._distant_point_value, self.config.DISTANCE_POINT_X_RANGE)[0] self.distant_point = (distance_point_x, self.vanish_point[1]) logger.attr_align('vanish_point', point2str(*self.vanish_point, length=5)) logger.attr_align('distant_point', point2str(*self.distant_point, length=5)) if np.linalg.norm(np.subtract(self.vanish_point, self.distant_point)) < 10: raise MapDetectionError('Vanish point and distant point too close') # Re-generate lines. Useless after mid_cleanse function added. # self.horizontal = self.crossings.link(None, is_horizontal=True).group() # self.vertical = self.crossings.link(self.vanish_point).group() # self.draw(self.crossings.link(self.distant_point)) # print(edge_h) # print(inner_h.group()) # Lines cleansing # self.draw() self.map_inner = get_map_inner(self.crossings.points) self.horizontal, self.lower_edge, self.upper_edge = self.line_cleanse( self.horizontal, inner=inner_h.group(), edge=edge_h) self.vertical, self.left_edge, self.right_edge = self.line_cleanse( self.vertical, inner=inner_v.group(), edge=edge_v) # self.draw() # print(self.horizontal) # print(self.lower_edge, self.upper_edge) # print(self.vertical) # print(self.left_edge, self.right_edge) # Log time_cost = round(time.time() - start_time, 3) logger.info('%ss %s Horizontal: %s (%s inner, %s edge)' % (float2str(time_cost), '_' if self.lower_edge else ' ', len(self.horizontal), len(horizontal), len(edge_h))) logger.info( 'Edges: %s%s%s Vertical: %s (%s inner, %s edge)' % ('/' if self.left_edge else ' ', '_' if self.upper_edge else ' ', '\\' if self.right_edge else ' ', len( self.vertical), len(vertical), len(edge_v)))
def show_camera(self): logger.attr_align('Camera', location2node(self.camera))
def update(self, camera=True): """Update map image Args: camera: True to update camera position and perspective data. """ self.device.screenshot() if not camera: self.view.update(image=self.device.image) return True self._view_init() try: self.view.load(self.device.image) except MapDetectionError as e: if self.info_bar_count(): logger.info('Perspective error cause by info bar. Waiting.') self.handle_info_bar() return self.update(camera=camera) elif self.appear(GET_ITEMS_1): logger.warning('Items got. Trying handling mystery.') self.handle_mystery() return self.update(camera=camera) elif self.handle_story_skip(): logger.warning('Perspective error cause by story. Handling.') self.ensure_no_story(skip_first_screenshot=False) return self.update(camera=camera) elif self.is_in_stage(): logger.warning('Image is in stage') raise CampaignEnd('Image is in stage') elif self.appear(AUTO_SEARCH_MENU_CONTINUE, offset=self._auto_search_menu_offset): logger.warning('Image is in auto search menu') self.ensure_auto_search_exit() raise CampaignEnd('Image is in auto search menu') elif not self.appear(IN_MAP): logger.warning('Image to detect is not in_map') if self.appear_then_click(GAME_TIPS, offset=(20, 20)): logger.warning('Game tips found, retrying') self.device.screenshot() self.view.load(self.device.image) else: raise e elif 'Camera outside map' in str(e): string = str(e) logger.warning(string) x, y = string.split('=')[1].strip('() ').split(',') self._map_swipe((-int(x.strip()), -int(y.strip()))) else: raise e if self._prev_view is not None and np.linalg.norm( self._prev_swipe) > 0: if self.config.MAP_SWIPE_PREDICT: swipe = self._prev_view.predict_swipe( self.view, with_current_fleet=self.config. MAP_SWIPE_PREDICT_WITH_CURRENT_FLEET, with_sea_grids=self.config.MAP_SWIPE_PREDICT_WITH_SEA_GRIDS ) if swipe is not None: self._prev_swipe = swipe self.camera = tuple(np.add(self.camera, self._prev_swipe)) self._prev_view = None self._prev_swipe = None self.show_camera() # Set camera position if self.view.left_edge: x = 0 + self.view.center_loca[0] elif self.view.right_edge: x = self.map.shape[0] - self.view.shape[0] + self.view.center_loca[ 0] else: x = self.camera[0] if self.view.lower_edge: y = 0 + self.view.center_loca[1] elif self.view.upper_edge: y = self.map.shape[1] - self.view.shape[1] + self.view.center_loca[ 1] else: y = self.camera[1] if self.camera != (x, y): logger.attr_align( 'camera_corrected', f'{location2node(self.camera)} -> {location2node((x, y))}') self.camera = (x, y) self.show_camera() self.predict()
def _update(self, camera=True): """Update map image Args: camera: True to update camera position and perspective data. """ self.device.screenshot() if not camera: self.view.update(image=self.device.image) return True self._view_init() try: if not self.is_in_map() \ and not self.is_in_strategy_submarine_move(): logger.warning('Image to detect is not in_map') raise MapDetectionError('Image to detect is not in_map') self.view.load(self.device.image) except MapDetectionError as e: if self.info_bar_count(): logger.warning('Perspective error caused by info bar') self.handle_info_bar() return False elif self.appear(GET_ITEMS_1): logger.warning('Perspective error caused by get_items') self.handle_mystery() return False elif self.handle_story_skip(): logger.warning('Perspective error caused by story') self.ensure_no_story(skip_first_screenshot=False) return False elif self.is_in_stage(): logger.warning('Image is in stage') raise CampaignEnd('Image is in stage') elif self.appear(AUTO_SEARCH_MENU_CONTINUE, offset=self._auto_search_menu_offset): logger.warning('Image is in auto search menu') self.ensure_auto_search_exit() raise CampaignEnd('Image is in auto search menu') elif self.appear(GLOBE_GOTO_MAP, offset=(20, 20)): logger.warning('Image is in OS globe map') self.ui_click(GLOBE_GOTO_MAP, check_button=self.is_in_map, offset=(20, 20), retry_wait=3, skip_first_screenshot=True) return False elif self.appear(AUTO_SEARCH_REWARD, offset=(50, 50)): logger.warning( 'Perspective error caused by AUTO_SEARCH_REWARD') if hasattr(self, 'os_auto_search_quit'): self.os_auto_search_quit() return False else: logger.warning( 'Cannot find method os_auto_search_quit(), use ui_click() instead' ) self.ui_click(AUTO_SEARCH_REWARD, check_button=self.is_in_map, offset=(50, 50), retry_wait=3, skip_first_screenshot=True) return False elif 'opsi' in self.config.task.command.lower( ) and self.handle_popup_confirm('OPSI'): # Always confirm popups in OpSi, same popups in os_map_goto_globe() logger.warning('Perspective error caused by popups') return False elif not self.is_in_map() \ and not self.is_in_strategy_submarine_move(): if self.appear(GAME_TIPS, offset=(20, 20)): logger.warning('Perspective error caused by game tips') self.device.click(GAME_TIPS) return False else: raise e elif 'Camera outside map' in str(e): string = str(e) logger.warning(string) x, y = string.split('=')[1].strip('() ').split(',') self._map_swipe((-int(x.strip()), -int(y.strip()))) else: raise e if self._prev_view is not None and np.linalg.norm( self._prev_swipe) > 0: if self.config.MAP_SWIPE_PREDICT: swipe = self._prev_view.predict_swipe( self.view, with_current_fleet=self.config. MAP_SWIPE_PREDICT_WITH_CURRENT_FLEET, with_sea_grids=self.config.MAP_SWIPE_PREDICT_WITH_SEA_GRIDS ) if swipe is not None: self._prev_swipe = swipe self.camera = tuple(np.add(self.camera, self._prev_swipe)) self._prev_view = None self._prev_swipe = None self.show_camera() # Set camera position if self.view.left_edge: x = 0 + self.view.center_loca[0] elif self.view.right_edge: x = self.map.shape[0] - self.view.shape[0] + self.view.center_loca[ 0] else: x = self.camera[0] if self.view.upper_edge: y = self.map.shape[1] - self.view.shape[1] + self.view.center_loca[ 1] elif self.view.lower_edge: y = 0 + self.view.center_loca[1] else: y = self.camera[1] if self.camera != (x, y): logger.attr_align( 'camera_corrected', f'{location2node(self.camera)} -> {location2node((x, y))}') self.camera = (x, y) self.show_camera() self.predict() return True
def update(self, camera=True): """Update map image Args: camera: True to update camera position and perspective data. """ self.device.screenshot() if not camera: self.view.update(image=self.device.image) return True self._view_init() try: self.view.load(self.device.image) except (MapDetectionError, AttributeError) as e: if self.info_bar_count(): logger.info('Perspective error cause by info bar. Waiting.') self.handle_info_bar() return self.update(camera=camera) elif self.appear(GET_ITEMS_1): logger.warning('Items got. Trying handling mystery.') self.handle_mystery() return self.update(camera=camera) elif self.is_in_stage(): logger.warning('Image is in stage') raise CampaignEnd('Image is in stage') elif not self.appear(IN_MAP): logger.warning('Image to detect is not in_map') if self.appear_then_click(GAME_TIPS, offset=(20, 20)): logger.warning('Game tips found, retrying') self.device.screenshot() self.view.load(self.device.image) else: raise e elif 'Camera outside map' in str(e): string = str(e) logger.warning(string) x, y = string.split('=')[1].strip('() ').split(',') self._map_swipe((-int(x.strip()), -int(y.strip()))) else: raise e if self._prev_view is not None and np.linalg.norm( self._prev_swipe) > 0: swipe = self._prev_view.predict_swipe(self.view) if swipe is not None: self._prev_swipe = swipe self.camera = tuple(np.add(self.camera, self._prev_swipe)) self._prev_view = None self._prev_swipe = None self.show_camera() if not self._correct_camera: self.show_camera() return False # Set camera position if self.view.left_edge: x = 0 + self.view.center_loca[0] elif self.view.right_edge: x = self.map.shape[0] - self.view.shape[0] + self.view.center_loca[ 0] else: x = self.camera[0] if self.view.lower_edge: y = 0 + self.view.center_loca[1] elif self.view.upper_edge: y = self.map.shape[1] - self.view.shape[1] + self.view.center_loca[ 1] else: y = self.camera[1] if self.camera != (x, y): logger.attr_align( 'camera_corrected', f'{location2node(self.camera)} -> {location2node((x, y))}') self.camera = (x, y) self.show_camera()