def find(img, hue_min=20, hue_max=175, sat_min=0, sat_max=255, val_min=0, val_max=255): """ Detect the qualification gate. :param img: HSV image from the bottom camera :return: tuple of location of the center of the gate in a "targeting" coordinate system: origin is at center of image, axes range [-1, 1] """ img = np.copy(img) bin = vision_util.hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max) canny = vision_util.canny(bin, 50) # find contours after first processing it with Canny edge detection contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) hulls = vision_util.convex_hulls(contours) cv2.drawContours(bin, hulls, -1, 255) cv2.imshow('bin', bin) hulls.sort(key=hull_score) if len(hulls) < 2: return () # get the two highest scoring candidates left = cv2.minAreaRect(hulls[0]) right = cv2.minAreaRect(hulls[1]) # if we got left and right mixed up, switch them if right[0][0] < left[0][0]: left, right = right, left confidence = score_pair(left, right) if confidence < 80: return 0, 0 # draw hulls in Blaze Orange cv2.drawContours(img, hulls, -1, (0, 102, 255), -1) # draw green outlines so we know it actually detected it cv2.drawContours(img, hulls, -1, (0, 255, 0), 2) cv2.imshow('img', img) center_actual = (np.mean([left[0][0], right[0][0]]), np.mean([left[0][1], right[0][1]])) # shape[0] is the number of rows because matrices are dumb center = (center_actual[0] / img.shape[1], center_actual[1] / img.shape[0]) # convert to the targeting system of [-1, 1] center = ((center[0] * 2) - 1, (center[1] * 2) - 1) return center
def find(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max, draw_output, output_images): """ Detect high goals in the input image :param img: hsv input image :param hue_min: :param hue_max: :param sat_min: :param sat_max: :param val_min: :param val_max: :param output_images: images that show the output of various stages of the detection process :return: a list of the detected targets """ img = np.copy(img) bin = vision_common.hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max) # erode to remove bad dots erode_kernel = np.ones((1, 1), np.uint8) bin = cv2.erode(bin, erode_kernel, iterations=1) # dilate bin to fill any holes dilate_kernel = np.ones((5, 5), np.uint8) bin = cv2.dilate(bin, dilate_kernel, iterations=1) if draw_output: output_images['bin'] = np.copy(bin) if int(cv2.__version__.split('.')[0]) >= 3: _, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # filter out so only left with good contours original_count = len(contours) filtered_contours = [x for x in contours if contour_filter(contour=x, min_score=95, binary=bin)] # print 'contour filtered', original_count, 'to', len(filtered_contours) if draw_output: # convert img back to bgr so it looks good when displayed img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) # draw outlines so we know it actually detected it polys = [cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True) for contour in filtered_contours] cv2.drawContours(img, polys, -1, (0, 0, 255), 2) original_targets = [(target_center(contour), cv2.boundingRect(contour)) for contour in filtered_contours] original_targets = [(center, (rect[2], rect[3])) for (center, rect) in original_targets] # original_targets is now a list of (x, y) and (width, height) targets = [to_targeting_coords(target, img.shape) for target in original_targets] if draw_output: # draw targeting coordinate system on top of the result image imheight, imwidth, _ = img.shape # axes cv2.line(img, (int(imwidth / 2), 0), (int(imwidth / 2), int(imheight)), (255, 255, 255), 5) cv2.line(img, (0, int(imheight / 2)), (int(imwidth), int(imheight / 2)), (255, 255, 255), 5) # aiming reticle cv2.circle(img, (int(imwidth / 2), int(imheight / 2)), 50, (255, 255, 255), 5) # draw dots on the center of each target for target in original_targets: # use original_targets so we don't have to recalculate image coords x = int(target[0][0]) y = int(target[0][1]) cv2.circle(img, (x, y), 10, (0, 0, 255), -1) if draw_output: output_images['result'] = img output_targets = [ { 'pos': { 'x': target[0][0], 'y': target[0][1] }, 'size': { 'width': target[1][0], 'height': target[1][1] }, 'distance': target_distance(target), 'elevation_angle': target_angle_of_elevation(target_distance(target)), 'azimuth': target_azimuth(target) } for target in targets] return output_targets
def find(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max, draw_output, output_images): """ Detect high goals in the input image :param img: hsv input image :param hue_min: :param hue_max: :param sat_min: :param sat_max: :param val_min: :param val_max: :param output_images: images that show the output of various stages of the detection process :return: a list of the detected targets """ img = np.copy(img) bin = vision_common.hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max) # erode to remove bad dots erode_kernel = np.ones((1, 1), np.uint8) bin = cv2.erode(bin, erode_kernel, iterations=1) # dilate bin to fill any holes dilate_kernel = np.ones((5, 5), np.uint8) bin = cv2.dilate(bin, dilate_kernel, iterations=1) if draw_output: output_images['bin'] = np.copy(bin) if int(cv2.__version__.split('.')[0]) >= 3: _, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # filter out so only left with good contours original_count = len(contours) filtered_contours = [ x for x in contours if contour_filter(contour=x, min_score=95, binary=bin) ] # print 'contour filtered', original_count, 'to', len(filtered_contours) if draw_output: # convert img back to bgr so it looks good when displayed img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) # draw outlines so we know it actually detected it polys = [ cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True) for contour in filtered_contours ] cv2.drawContours(img, polys, -1, (0, 0, 255), 2) original_targets = [(target_center(contour), cv2.boundingRect(contour)) for contour in filtered_contours] original_targets = [(center, (rect[2], rect[3])) for (center, rect) in original_targets] # original_targets is now a list of (x, y) and (width, height) targets = [ to_targeting_coords(target, img.shape) for target in original_targets ] if draw_output: # draw targeting coordinate system on top of the result image imheight, imwidth, _ = img.shape # axes cv2.line(img, (int(imwidth / 2), 0), (int(imwidth / 2), int(imheight)), (255, 255, 255), 5) cv2.line(img, (0, int(imheight / 2)), (int(imwidth), int(imheight / 2)), (255, 255, 255), 5) # aiming reticle cv2.circle(img, (int(imwidth / 2), int(imheight / 2)), 50, (255, 255, 255), 5) # draw dots on the center of each target for target in original_targets: # use original_targets so we don't have to recalculate image coords x = int(target[0][0]) y = int(target[0][1]) cv2.circle(img, (x, y), 10, (0, 0, 255), -1) if draw_output: output_images['result'] = img output_targets = [{ 'pos': { 'x': target[0][0], 'y': target[0][1] }, 'size': { 'width': target[1][0], 'height': target[1][1] }, 'distance': target_distance(target), 'elevation_angle': target_angle_of_elevation(target_distance(target)), 'azimuth': target_azimuth(target) } for target in targets] return output_targets