def _size_discretization(size): """ Converts continuous values of size to discrete values :param size: continuous value of size :return: discrete value of size """ tiny_bounds = ares( 'image_processing_params\\size_discretization\\tiny_bounds') small_bounds = ares( 'image_processing_params\\size_discretization\\small_bounds') medium_bounds = ares( 'image_processing_params\\size_discretization\\medium_bounds') big_bounds = ares( 'image_processing_params\\size_discretization\\big_bounds') if size is None: return Size.NONE if tiny_bounds[0] < size <= tiny_bounds[1]: return Size.TINY if small_bounds[0] < size <= small_bounds[1]: return Size.SMALL if medium_bounds[0] < size <= medium_bounds[1]: return Size.MEDIUM if big_bounds[0] < size <= big_bounds[1]: return Size.BIG if size > big_bounds[1]: return Size.LARGE
def assume_size(distance, object_size_pixels, image_resolution, h_fov=None, v_fov=None): """ Assumes width and height of object and discretizes its values :param distance: distance from objects scene - this value should be read from sensor :param object_size_pixels: tuple representing width and height of object in pixels :param image_resolution: resolution of camera image :param h_fov: horizontal field of view of agent's camera; this value should be assigned in agent.yaml :param v_fov: vertical field of view of agent's camera; this value should be assigned in agent.yaml :return: tuple representing discrete values of width and height of object """ if h_fov is None: h_fov = ares('camera_info\\horizontal_field_of_view') if v_fov is None: v_fov = ares('camera_info\\vertical_field_of_view') horizontal_ratio = _calculate_pixel_per_metrics_ratio( distance, image_resolution[0], h_fov) vertical_ratio = _calculate_pixel_per_metrics_ratio( distance, image_resolution[1], v_fov) real_width = object_size_pixels[0] * horizontal_ratio real_height = object_size_pixels[1] * vertical_ratio discrete_width = _size_discretization(real_width) discrete_height = _size_discretization(real_height) return discrete_width, discrete_height
def find_contours(image, mode): """ Finds contours in given image :param image: image from which contours are to be found; it is best for image to be binary image :param mode: openCV mode for detecting contours; mode can be one of the following: cv2.RETR_EXTERNAL, cv2.RETR_FLOODFILL, cv2.RETR_LIST, cv2.RETR_CCOMP, cv2.RETR_TREE :return: list of detected contours """ _, contours, _ = cv2.findContours(image, mode, cv2.CHAIN_APPROX_SIMPLE) result_contours = [] for single_contour in contours: e = cv2.arcLength(single_contour, True) contour_area = cv2.contourArea(single_contour) # if contour area is about 98% of whole image size then contour is just frame of the image if contour_area / image.size > 0.98: continue # if contour area is less than given param (recommended 500) it is considered to be noise and should be ignored if contour_area > ares( 'image_processing_params\\contour_area_noise_border'): result_contour = cv2.approxPolyDP(single_contour, e * 0.02, closed=True) result_contours.append(result_contour) return result_contours
def maximum_bound(): """ Maximal possible color in hsv color space :return: tuple in form of (hue, saturation, value) """ return literal_eval( ares('image_processing_params\\colors\\max_color_bound_hsv'))
def _assume_pattern(lines): """ Given ndarray containing lines of pattern assume its pattern :param lines: ndarray containing line of pattern :return: pattern id defined in class Pattern from enums.py """ number_of_lines, _, _ = lines.shape angles = [] for i in range(number_of_lines): begin = (lines[i][0][0], lines[i][0][1]) end = (lines[i][0][2], lines[i][0][3]) angles.append(_line_angle((begin, end))) angle_epsilon = ares( 'image_processing_params\\pattern_recognition\\angle_epsilon') number_of_horizontal_lines = sum( i >= 180 - angle_epsilon or i <= angle_epsilon for i in angles) number_of_vertical_lines = sum( 90 - angle_epsilon <= i <= 90 + angle_epsilon for i in angles) number_of_left_inclined_lines = sum( 90 + angle_epsilon < i < 180 - angle_epsilon for i in angles) number_of_right_inclined_lines = sum(angle_epsilon < i < 90 - angle_epsilon for i in angles) percentage_of_line_type_to_qualify_as_pattern = ares( 'image_processing_params\\pattern_recognition\\' 'percentage_of_line_type_to_qualify_as_pattern') if float(number_of_horizontal_lines) / len( angles) > percentage_of_line_type_to_qualify_as_pattern: return Pattern.HORIZONTAL_LINES if float(number_of_vertical_lines) / len( angles) > percentage_of_line_type_to_qualify_as_pattern: return Pattern.VERTICAL_LINES if float(number_of_left_inclined_lines) / len( angles) > percentage_of_line_type_to_qualify_as_pattern: return Pattern.LEFT_INCLINED_LINES if float(number_of_right_inclined_lines) / len( angles) > percentage_of_line_type_to_qualify_as_pattern: return Pattern.RIGHT_INCLINED_LINES if float(number_of_vertical_lines) / len(angles) >= percentage_of_line_type_to_qualify_as_pattern / 2 and \ float(number_of_horizontal_lines) / len(angles) >= percentage_of_line_type_to_qualify_as_pattern / 2: return Pattern.GRID if float(number_of_left_inclined_lines) / len(angles) >= percentage_of_line_type_to_qualify_as_pattern / 2 and \ float(number_of_right_inclined_lines) / len(angles) >= percentage_of_line_type_to_qualify_as_pattern / 2: return Pattern.INCLINED_GRID return Pattern.NONE
def _find_external_contours(self, image): """ Find contours of combined objects in given image :param image: raw image from camera or prepared for detection image :return: contours list of combined objects in the image """ canny_threshold_1 = ares( 'image_processing_params\\combined_objects_detecion\\canny_threshold_1' ) canny_threshold_2 = ares( 'image_processing_params\\combined_objects_detecion\\canny_threshold_2' ) edges = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) edges = cv2.Canny(edges, canny_threshold_1, canny_threshold_2) edges = cv2.dilate(edges, None, iterations=2) edges = cv2.erode(edges, None, iterations=2) return common.find_contours(edges, cv2.RETR_EXTERNAL)
def color_from_bounds(color): """ Given color tuple in from of (hue, saturation, value) returns it's id from Color in enums.py :param color: tuple representing color in hsv color space in form of (hue, saturation, value) :return: color id defined in class Color from enums.py """ min_bound = minimum_bound() max_bound = maximum_bound() lower_s = min_bound[1] upper_s = max_bound[1] lower_v = min_bound[2] upper_v = max_bound[2] red_bound = literal_eval( ares('image_processing_params\\colors\\red_hue_bound')) yellow_bound = literal_eval( ares('image_processing_params\\colors\\yellow_hue_bound')) green_bound = literal_eval( ares('image_processing_params\\colors\\green_hue_bound')) blue_bound = literal_eval( ares('image_processing_params\\colors\\blue_hue_bound')) violet_bound = literal_eval( ares('image_processing_params\\colors\\violet_hue_bound')) if not (lower_s <= color[1] <= upper_s or lower_v <= color[2] <= upper_v): return Color.NONE if red_bound[0] <= color[0] <= max_bound[0] or min_bound[0] <= color[ 0] <= red_bound[1]: return Color.RED if yellow_bound[0] <= color[0] <= yellow_bound[1]: return Color.YELLOW if green_bound[0] <= color[0] <= green_bound[1]: return Color.GREEN if blue_bound[0] <= color[0] <= blue_bound[1]: return Color.BLUE if violet_bound[0] <= color[0] <= violet_bound[1]: return Color.VIOLET return Color.NONE
def color_bounds(color_id): """ Given color_id returns bounds of this color in hsv color space :param color_id: color id defined in class Color from enums.py :return: tuple represents color bounds of given color """ min_bound = minimum_bound() max_bound = maximum_bound() lower_s = min_bound[1] upper_s = max_bound[1] lower_v = min_bound[2] upper_v = max_bound[2] red_bound = literal_eval( ares('image_processing_params\\colors\\red_hue_bound')) yellow_bound = literal_eval( ares('image_processing_params\\colors\\yellow_hue_bound')) green_bound = literal_eval( ares('image_processing_params\\colors\\green_hue_bound')) blue_bound = literal_eval( ares('image_processing_params\\colors\\blue_hue_bound')) violet_bound = literal_eval( ares('image_processing_params\\colors\\violet_hue_bound')) bounds = { Color.RED: ((red_bound[0], lower_s, lower_v), (red_bound[1], upper_s, upper_v)), Color.YELLOW: ((yellow_bound[0], lower_s, lower_v), (yellow_bound[1], upper_s, upper_v)), Color.GREEN: ((green_bound[0], lower_s, lower_v), (green_bound[1], upper_s, upper_v)), Color.BLUE: ((blue_bound[0], lower_s, lower_v), (blue_bound[1], upper_s, upper_v)), Color.VIOLET: ((violet_bound[0], lower_s, lower_v), (violet_bound[1], upper_s, upper_v)), } return bounds[color_id]
def _prepare_image_for_detection(self, im): """ If image is considered to be dark then it's brightened. After this from image is removed light gray / white background. Then start process of color quantization which reduce color noise :param im: raw image from camera :return: image ready to for process of object detection """ dark_pixels_percentage_border = ares( 'image_processing_params\\image_preparetion\\dark_pixels_percentage_border' ) gamma_increase = ares( 'image_processing_params\\image_preparetion\\gamma_increase') number_of_quantizied_colors = ares( 'image_processing_params\\image_preparetion\\number_of_quantizied_colors' ) if pt.percentage_of_bright_pixels( im, ColorSpace.BGR) < dark_pixels_percentage_border: im = pt.adjust_gamma(im, gamma_increase) im = pt.remove_light_gray_background(im) return pt.color_quantization_using_k_means( im, number_of_quantizied_colors)
def find_pattern(image): """ Find pattern and its color in the given image :param image: image where only pattern is visible, image must be in BGR color space :return: tuple in form of (pattern, color_pattern), where pattern is pattern id defined in class Pattern from enums.py, pattern_color is color id defined in class Color from enums.py """ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) canny_threshold_1 = ares( 'image_processing_params\\pattern_recognition\\canny_threshold_1') canny_threshold_2 = ares( 'image_processing_params\\pattern_recognition\\canny_threshold_2') edges = cv2.Canny(gray, canny_threshold_1, canny_threshold_2) min_line_length = ares( 'image_processing_params\\pattern_recognition\\minimum_line_length') hough_lines_threshold = ares( 'image_processing_params\\pattern_recognition\\hough_lines_threshold') max_line_gap = ares( 'image_processing_params\\pattern_recognition\\max_line_gap') lines = cv2.HoughLinesP(image=edges, rho=1, theta=np.pi / 180, threshold=hough_lines_threshold, lines=np.array([]), minLineLength=min_line_length, maxLineGap=max_line_gap) if lines is None: return Pattern.NONE, Color.NONE pattern_color = _find_patterns_color(cv2.cvtColor(image, cv2.COLOR_BGR2HSV)) pattern = _assume_pattern(lines) return pattern, pattern_color
def merge_pictures(pictures, color_space, ignore_dark_images=False): """ Give list of pictures merges them into one pixture. :param pictures: list of pictures to be merged :param color_space: color space of given pictures :param ignore_dark_images: if True, dark images are ignored :return: merged picture """ if len(pictures) is 0: return None if len(pictures) is 1: return pictures[0] dark_pixels_percentage_border = ares( 'image_processing_params\\pictures_merge\\dark_pixels_percentage_border' ) final_picture = pictures[0] for picture in pictures: if ignore_dark_images and percentage_of_bright_pixels( picture, color_space) < dark_pixels_percentage_border: final_picture = cv2.addWeighted(final_picture, 0.5, picture, 0.5, 0) return final_picture