def _detect_pocket(img, mask_tables, cue, display_list): PARA1 = int(float(img.shape[1]) / 640 * 2 + 0.5) mask_blue, mask_bluer, mask_table, mask_table_fat = mask_tables p_cue_top, p_cue_bottom, cue_length = cue mask_table_convex, _ = zc.make_convex(mask_table_fat.copy(), use_approxPolyDp=False) mask_pocket = cv2.subtract(cv2.subtract(mask_table_convex, mask_table_fat), mask_bluer) mask_pocket = zc.shrink(mask_pocket, PARA1) mask_pocket[p_cue_top[1] - 5:, :] = 0 zc.check_and_display_mask("pocket", img, mask_pocket, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) contours, hierarchy = cv2.findContours(mask_pocket, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_NONE) pocket = None cnt_pocket = None min_dist2cue = img.shape[0] + 1 for cnt_idx, cnt in enumerate(contours): pocket_center = zc.get_contour_center(cnt) dist2cue = zc.calc_triangle_area(pocket_center, p_cue_top, p_cue_bottom) * 2 / cue_length if dist2cue < min_dist2cue: min_dist2cue = dist2cue pocket = pocket_center cnt_pocket = cnt if pocket is None: rtn_msg = {'status': 'fail', 'message': 'Cannot find pocket'} return (rtn_msg, None) rtn_msg = {'status': 'success'} return (rtn_msg, (pocket, cnt_pocket))
def find_table(img, display_list): ## find white border DoB = zc.get_DoB(img, 1, 31, method='Average') zc.check_and_display('DoB', DoB, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) mask_white = zc.color_inrange(DoB, 'HSV', V_L=10) zc.check_and_display_mask('mask_white_raw', img, mask_white, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## find purple table (roughly) #mask_table = zc.color_inrange(img, 'HSV', H_L = 130, H_U = 160, S_L = 50, V_L = 50, V_U = 220) mask_ground = zc.color_inrange(img, 'HSV', H_L=18, H_U=30, S_L=75, S_U=150, V_L=100, V_U=255) zc.check_and_display_mask('ground', img, mask_ground, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) # red car mask_red = zc.color_inrange(img, 'HSV', H_L=170, H_U=10, S_L=150) mask_ground = np.bitwise_or(mask_ground, mask_red) # ceiling mask_ceiling = np.zeros((360, 640), dtype=np.uint8) mask_ceiling[:40, :] = 255 mask_ground = np.bitwise_or(mask_ground, mask_ceiling) # find the screen mask_screen1 = zc.color_inrange(img, 'HSV', H_L=15, H_U=45, S_L=30, S_U=150, V_L=40, V_U=150) mask_screen2 = ((img[:, :, 2] - 5) > img[:, :, 0]).astype(np.uint8) * 255 mask_screen = np.bitwise_or(mask_screen1, mask_screen2) mask_screen = np.bitwise_and(np.bitwise_not(mask_ground), mask_screen) mask_screen = zc.shrink(mask_screen, 5, iterations=3) zc.check_and_display_mask('screen_raw', img, mask_screen, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) bool_screen = zc.mask2bool([mask_screen])[0] c_pixels = img[bool_screen].astype(np.int8) d_pixels = c_pixels[:, 2] - c_pixels[:, 0] rb_diff = np.median(d_pixels) print rb_diff if rb_diff > 20: print "Case 1" mask_table1 = zc.color_inrange(img, 'HSV', H_L=0, H_U=115, S_U=45, V_L=35, V_U=120) mask_table2 = zc.color_inrange(img, 'HSV', H_L=72, H_U=120, S_L=20, S_U=60, V_L=35, V_U=150) mask_table = np.bitwise_or(mask_table1, mask_table2) mask_screen = zc.color_inrange(img, 'HSV', H_L=15, H_U=45, S_L=60, S_U=150, V_L=40, V_U=150) elif rb_diff > 15: print "Case 2" mask_table1 = zc.color_inrange(img, 'HSV', H_L=0, H_U=115, S_U=45, V_L=35, V_U=120) mask_table2 = zc.color_inrange(img, 'HSV', H_L=72, H_U=120, S_L=20, S_U=60, V_L=35, V_U=150) mask_table = np.bitwise_or(mask_table1, mask_table2) mask_screen = zc.color_inrange(img, 'HSV', H_L=15, H_U=45, S_L=35, S_U=150, V_L=40, V_U=150) else: print "Case 3" mask_table1 = zc.color_inrange(img, 'HSV', H_L=0, H_U=115, S_U=20, V_L=35, V_U=115) mask_table2 = zc.color_inrange(img, 'HSV', H_L=72, H_U=120, S_L=20, S_U=60, V_L=35, V_U=150) mask_table = np.bitwise_or(mask_table1, mask_table2) mask_screen1 = zc.color_inrange(img, 'HSV', H_L=15, H_U=45, S_L=30, S_U=150, V_L=40, V_U=150) mask_screen2 = ( (img[:, :, 2] - 1) > img[:, :, 0]).astype(np.uint8) * 255 mask_screen = np.bitwise_or(mask_screen1, mask_screen2) mask_screen = np.bitwise_and(np.bitwise_not(mask_ground), mask_screen) zc.check_and_display_mask('screen', img, mask_screen, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) mask_table = np.bitwise_and(np.bitwise_not(mask_screen), mask_table) mask_table = np.bitwise_and(np.bitwise_not(zc.shrink(mask_ground, 3)), mask_table) mask_table, _ = zc.get_big_blobs(mask_table, min_area=50) mask_table = cv2.morphologyEx(mask_table, cv2.MORPH_CLOSE, zc.generate_kernel(7, 'circular'), iterations=1) #mask_table, _ = zc.find_largest_CC(mask_table) zc.check_and_display_mask('table_purple_raw', img, mask_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) if mask_table is None: rtn_msg = {'status': 'fail', 'message': 'Cannot find table'} return (rtn_msg, None) #mask_table_convex, _ = zc.make_convex(mask_table.copy(), app_ratio = 0.005) #mask_table = np.bitwise_or(mask_table, mask_table_convex) mask_table_raw = mask_table.copy() zc.check_and_display_mask('table_purple', img, mask_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) mask_table_convex, _ = zc.make_convex(zc.shrink(mask_table, 5, iterations=5), app_ratio=0.01) mask_table_shrunk = zc.shrink(mask_table_convex, 5, iterations=3) zc.check_and_display_mask('table_shrunk', img, mask_table_shrunk, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## fine tune the purple table based on white border mask_white = np.bitwise_and(np.bitwise_not(mask_table_shrunk), mask_white) if 'mask_white' in display_list: gray = np.float32(mask_white) dst = cv2.cornerHarris(gray, 10, 3, 0.04) dst = cv2.dilate(dst, None) img_white = img.copy() img_white[mask_white > 0, :] = [0, 255, 0] img_white[dst > 2.4e7] = [0, 0, 255] zc.check_and_display('mask_white', img_white, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) #mask_table, _ = zc.make_convex(mask_table, app_ratio = 0.005) for i in xrange(15): mask_table = zc.expand(mask_table, 3) mask_table = np.bitwise_and(np.bitwise_not(mask_white), mask_table) mask_table, _ = zc.find_largest_CC(mask_table) if mask_table is None: rtn_msg = { 'status': 'fail', 'message': 'Cannot find table, case 2' } return (rtn_msg, None) if i % 4 == 3: mask_table, _ = zc.make_convex(mask_table, app_ratio=0.01) #img_display = img.copy() #img_display[mask_table > 0, :] = [0, 0, 255] #zc.display_image('table%d-b' % i, img_display, resize_max = config.DISPLAY_MAX_PIXEL, wait_time = config.DISPLAY_WAIT_TIME) #mask_white = np.bitwise_and(np.bitwise_not(mask_table), mask_white) mask_table = np.bitwise_and(np.bitwise_not(mask_white), mask_table) mask_table, _ = zc.find_largest_CC(mask_table) mask_table, hull_table = zc.make_convex(mask_table, app_ratio=0.01) zc.check_and_display_mask('table_purple_fixed', img, mask_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## check if table is big enough table_area = cv2.contourArea(hull_table) table_area_percentage = float(table_area) / img.shape[0] / img.shape[1] if table_area_percentage < 0.06: rtn_msg = { 'status': 'fail', 'message': "Detected table too small: %f" % table_area_percentage } return (rtn_msg, None) ## find top line of table hull_table = np.array(zc.sort_pts(hull_table[:, 0, :], order_first='y')) ul = hull_table[0] ur = hull_table[1] if ul[0] > ur[0]: t = ul ul = ur ur = t i = 2 # the top two points in the hull are probably on the top line, but may not be the corners while i < hull_table.shape[0] and hull_table[i, 1] - hull_table[0, 1] < 80: pt_tmp = hull_table[i] if pt_tmp[0] < ul[0] or pt_tmp[0] > ur[0]: # computing the area of the part of triangle that lies inside the table triangle = np.vstack([pt_tmp, ul, ur]).astype(np.int32) mask_triangle = np.zeros_like(mask_table) cv2.drawContours(mask_triangle, [triangle], 0, 255, -1) pts = mask_table_raw[mask_triangle.astype(bool)] if np.sum(pts == 255) > 10: break if pt_tmp[0] < ul[0]: ul = pt_tmp else: ur = pt_tmp i += 1 else: break ul = [int(x) for x in ul] ur = [int(x) for x in ur] if 'table' in display_list: img_table = img.copy() img_table[mask_table.astype(bool), :] = [255, 0, 255] #cv2.line(img_table, tuple(ul), tuple(ur), [0, 255, 0], 3) zc.check_and_display('table', img_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## sanity checks about table top line detection if zc.euc_dist(ul, ur)**2 * 3.1 < table_area: rtn_msg = { 'status': 'fail', 'message': "Table top line too short: %f, %f" % (zc.euc_dist(ul, ur)**2 * 3.1, table_area) } return (rtn_msg, None) if abs(zc.line_angle(ul, ur)) > 0.4: rtn_msg = { 'status': 'fail', 'message': "Table top line tilted too much" } return (rtn_msg, None) # check if two table sides form a reasonable angle mask_table_bottom = mask_table.copy() mask_table_bottom[:-30] = 0 p_left_most = zc.get_edge_point(mask_table_bottom, (-1, 0)) p_right_most = zc.get_edge_point(mask_table_bottom, (1, 0)) if p_left_most is None or p_right_most is None: rtn_msg = { 'status': 'fail', 'message': "Table doesn't occupy bottom part of image" } return (rtn_msg, None) left_side_angle = zc.line_angle(ul, p_left_most) right_side_angle = zc.line_angle(ur, p_right_most) angle_diff = zc.angle_dist(left_side_angle, right_side_angle, angle_range=math.pi * 2) if abs(angle_diff) > 2.0: rtn_msg = { 'status': 'fail', 'message': "Angle between two side edge not right: %f" % angle_diff } return (rtn_msg, None) if 'table' in display_list: img_table = img.copy() img_table[mask_table.astype(bool), :] = [255, 0, 255] cv2.line(img_table, tuple(ul), tuple(ur), [0, 255, 0], 3) zc.check_and_display('table', img_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## rotate to make opponent upright, use table edge as reference pts1 = np.float32( [ul, ur, [ul[0] + (ur[1] - ul[1]), ul[1] - (ur[0] - ul[0])]]) pts2 = np.float32([[0, config.O_IMG_HEIGHT], [config.O_IMG_WIDTH, config.O_IMG_HEIGHT], [0, 0]]) M = cv2.getAffineTransform(pts1, pts2) img[np.bitwise_not(zc.get_mask(img, rtn_type="bool", th=3)), :] = [3, 3, 3] img_rotated = cv2.warpAffine(img, M, (config.O_IMG_WIDTH, config.O_IMG_HEIGHT)) ## sanity checks about rotated opponent image bool_img_rotated_valid = zc.get_mask(img_rotated, rtn_type="bool") if float(bool_img_rotated_valid.sum() ) / config.O_IMG_WIDTH / config.O_IMG_HEIGHT < 0.6: rtn_msg = { 'status': 'fail', 'message': "Valid area too small after rotation: %f" % (float(bool_img_rotated_valid.sum()) / config.O_IMG_WIDTH / config.O_IMG_HEIGHT) } return (rtn_msg, None) rtn_msg = {'status': 'success'} return (rtn_msg, (img_rotated, mask_table, M))
def find_opponent(img, img_prev, display_list): def draw_flow(img, flow, step=16): h, w = img.shape[:2] y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1) fx, fy = flow[y, x].T lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2) lines = np.int32(lines + 0.5) vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) cv2.polylines(vis, lines, 0, (0, 255, 0)) for (x1, y1), (x2, y2) in lines: cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1) return vis def draw_rects(img, rects, color): for x1, y1, x2, y2 in rects: cv2.rectangle(img, (x1, y1), (x2, y2), color, 2) #start_time = current_milli_time() ## General preparations if 'opponent' in display_list: img_opponent = img_prev.copy() zc.check_and_display('rotated', img, display_list, is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) zc.check_and_display('rotated_prev', img_prev, display_list, is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) bw_prev = cv2.cvtColor(img_prev, cv2.COLOR_BGR2GRAY) # valid part of img_prev mask_img_prev_valid = zc.get_mask(img_prev, rtn_type="mask") bool_img_prev_valid = zc.shrink(mask_img_prev_valid, 15, iterations=3).astype(bool) bool_img_prev_invalid = np.bitwise_not(bool_img_prev_valid) mask_white_prev = zc.color_inrange(img_prev, 'HSV', S_U=50, V_L=130) bool_white_prev = zc.shrink(mask_white_prev, 13, iterations=3, method='circular').astype(bool) # valid part of img mask_img_valid = zc.get_mask(img, rtn_type="mask") bool_img_valid = zc.shrink(mask_img_valid, 15, iterations=3).astype(bool) bool_img_invalid = np.bitwise_not(bool_img_valid) mask_white = zc.color_inrange(img, 'HSV', S_U=50, V_L=130) bool_white = zc.shrink(mask_white, 13, iterations=3, method='circular').astype(bool) # prior score according to height row_score, col_score = np.mgrid[0:img.shape[0], 0:img.shape[1]] row_score = img.shape[0] * 1.2 - row_score.astype(np.float32) #print "time0: %f" % (current_milli_time() - start_time) ## method 1: optical flow - dense opt_flow = np.zeros((bw.shape[0], bw.shape[1], 2), dtype=np.float32) opt_flow[::2, ::2, :] = cv2.calcOpticalFlowFarneback(bw_prev[::2, ::2], bw[::2, ::2], pyr_scale=0.5, levels=1, winsize=15, iterations=3, poly_n=7, poly_sigma=1.5, flags=0) if 'denseflow' in display_list: zc.display_image('denseflow', draw_flow(bw, opt_flow, step=16), is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) # clean optical flow mag_flow = np.sqrt(np.sum(np.square(opt_flow), axis=2)) bool_flow_valid = mag_flow > 2 bool_flow_valid = np.bitwise_and(bool_flow_valid, bool_img_prev_valid) bool_flow_valid = np.bitwise_and(bool_flow_valid, np.bitwise_not(bool_white_prev)) bool_flow_invalid = np.bitwise_not(bool_flow_valid) # substract all the flow by flow average x_ave = np.mean(opt_flow[bool_flow_valid, 0]) y_ave = np.mean(opt_flow[bool_flow_valid, 1]) opt_flow[:, :, 0] -= x_ave opt_flow[:, :, 1] -= y_ave opt_flow[bool_flow_invalid, :] = 0 if 'denseflow_cleaned' in display_list: zc.display_image('denseflow_cleaned', draw_flow(bw, opt_flow, step=16), is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) # give the flow a "score" score_flow = np.sqrt(np.sum(np.square(opt_flow), axis=2)) score_flow = score_flow * row_score score_horizonal = np.sum(score_flow, axis=0) low_pass_h = np.ones(120) low_pass_h /= low_pass_h.sum() score_horizonal_filtered_dense = np.convolve(score_horizonal, low_pass_h, mode='same') if 'dense_hist' in display_list: plot_bar(score_horizonal_filtered_dense, name='dense_hist') print np.argmax(score_horizonal_filtered_dense) if 'opponent' in display_list: cv2.circle(img_opponent, (np.argmax(score_horizonal_filtered_dense), 220), 20, (0, 255, 0), -1) #print "time1: %f" % (current_milli_time() - start_time) ## method 2: optical flow - LK feature_params = dict(maxCorners=100, qualityLevel=0.03, minDistance=5, blockSize=3) lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) p0 = cv2.goodFeaturesToTrack(bw_prev, mask=mask_img_prev_valid, useHarrisDetector=False, **feature_params) if p0 is None: # TODO: this is also a possible indication that the rally is not on rtn_msg = { 'status': 'fail', 'message': 'No good featuresToTrack at all, probably no one in the scene' } return (rtn_msg, None) p1, st, err = cv2.calcOpticalFlowPyrLK(bw_prev, bw, p0, None, **lk_params) # Select good points good_new = p1[st == 1] good_old = p0[st == 1] # draw the tracks if 'LKflow' in display_list: img_LK = img_prev.copy() for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() c, d = old.ravel() cv2.line(img_LK, (a, b), (c, d), (0, 255, 0), 2) cv2.circle(img_LK, (c, d), 5, (0, 255, 0), -1) zc.display_image('LKflow', img_LK, is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) bool_flow_valid = np.bitwise_and(bool_img_valid, np.bitwise_not(bool_white)) bool_flow_invalid = np.bitwise_not(bool_flow_valid) bool_flow_valid_prev = np.bitwise_and(bool_img_prev_valid, np.bitwise_not(bool_white_prev)) bool_flow_invalid_prev = np.bitwise_not(bool_flow_valid_prev) is_reallygood = np.zeros((good_new.shape[0]), dtype=bool) for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() c, d = old.ravel() if bool_flow_invalid_prev[d, c] or max(a, b) > config.O_IMG_HEIGHT or min( a, b) < 0 or bool_flow_invalid[b, a]: continue is_reallygood[i] = True reallygood_new = good_new[is_reallygood] reallygood_old = good_old[is_reallygood] motion = reallygood_new - reallygood_old motion_real = motion - np.mean(motion, axis=0) if 'LKflow_cleaned' in display_list: img_LK_cleaned = img_prev.copy() img_LK_cleaned[bool_flow_invalid_prev, :] = [0, 0, 255] for i, (new, old) in enumerate(zip(reallygood_new, reallygood_old)): c, d = old.ravel() cv2.line(img_LK_cleaned, (c, d), (c + motion_real[i, 0], d + motion_real[i, 1]), (0, 255, 0), 2) cv2.circle(img_LK_cleaned, (c, d), 5, (0, 255, 0), -1) zc.display_image('LKflow_cleaned', img_LK_cleaned, is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) score_flow = np.zeros(bw.shape, dtype=np.float32) score_flow[reallygood_old[:, 1].astype(np.int), reallygood_old[:, 0].astype(np.int)] = np.sqrt( np.sum(np.square(motion_real), axis=1)) score_flow = score_flow * row_score score_horizonal = np.sum(score_flow, axis=0) low_pass_h = np.ones(120) low_pass_h /= low_pass_h.sum() score_horizonal_filtered_LK = np.convolve(score_horizonal, low_pass_h, mode='same') if 'LK_hist' in display_list: plot_bar(score_horizonal_filtered_LK, name='LK_hist') print np.argmax(score_horizonal_filtered_LK) # if motion too small, probably no one is there... if np.max(score_horizonal_filtered_LK) < 300: # TODO: this is also a possible indication that the rally is not on rtn_msg = { 'status': 'fail', 'message': 'Motion too small, probably no one in the scene' } return (rtn_msg, None) if 'opponent' in display_list: cv2.circle(img_opponent, (np.argmax(score_horizonal_filtered_LK), 220), 20, (0, 0, 255), -1) #print "time2: %f" % (current_milli_time() - start_time) ## method 3: remove white wall mask_white = zc.color_inrange(img_prev, 'HSV', S_U=50, V_L=130) zc.check_and_display('mask_white_wall', mask_white, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) score = row_score score[bool_img_invalid] = 0 score[bool_white] = 0 score_horizonal = np.sum(score, axis=0) low_pass_h = np.ones(120) low_pass_h /= low_pass_h.sum() score_horizonal_filtered_wall = np.convolve(score_horizonal, low_pass_h, mode='same') if 'wall_hist' in display_list: plot_bar(score_horizonal_filtered_wall, name='wall_hist') print np.argmax(score_horizonal_filtered_wall) if 'opponent' in display_list: cv2.circle(img_opponent, (np.argmax(score_horizonal_filtered_wall), 220), 20, (255, 0, 0), -1) #print "time3: %f" % (current_milli_time() - start_time) ## combining results of three methods #score_horizonal_filtered = score_horizonal_filtered_dense * score_horizonal_filtered_LK * score_horizonal_filtered_wall score_horizonal_filtered = score_horizonal_filtered_dense / 10 + score_horizonal_filtered_LK * 10 opponent_x = np.argmax(score_horizonal_filtered) if 'opponent' in display_list: cv2.circle(img_opponent, (opponent_x, 220), 20, (200, 200, 200), -1) zc.check_and_display('opponent', img_opponent, display_list, is_resize=False, wait_time=config.DISPLAY_WAIT_TIME) rtn_msg = {'status': 'success'} return (rtn_msg, opponent_x)
def _detect_table(img, display_list): SE1 = int(float(img.shape[1]) / 640 * 5 + 0.5) DOB_PARA = int(float(img.shape[1]) / 640 * 51 + 0.5) ## detect blue/purple table hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) mask_blue = zc.color_inrange(img, 'HSV', hsv=hsv, H_L=88, H_U=125, S_L=120, V_L=100) mask_blue = zc.expand(mask_blue, SE1) mask_table, _ = zc.find_largest_CC(mask_blue) if mask_table is None: rtn_msg = {'status': 'fail', 'message': 'Cannot find table'} return (rtn_msg, None) mask_blue = zc.shrink(mask_blue, SE1) # revise table detection based on how blue the current table is table_hsv_ave = np.mean(hsv[mask_blue.astype(bool)], axis=0) mask_blue = zc.color_inrange(img, 'HSV', hsv=hsv, H_L=table_hsv_ave[0] - 15, H_U=table_hsv_ave[0] + 15, S_L=min(120, table_hsv_ave[1] - 30), V_L=min(80, table_hsv_ave[2] - 50)) #mask_blue = cv2.morphologyEx(mask_blue, cv2.MORPH_OPEN, zc.generate_kernel(3, 'square'), iterations = 2) mask_table, _ = zc.find_largest_CC(mask_blue) if mask_table is None: rtn_msg = {'status': 'fail', 'message': 'Cannot find table'} return (rtn_msg, None) mask_table_fat, _ = zc.find_largest_CC( zc.expand(mask_blue, SE1, iterations=2)) mask_table_fat = zc.shrink(mask_table_fat, SE1, iterations=2) zc.check_and_display_mask("blue", img, mask_blue, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) zc.check_and_display_mask("table", img, mask_table, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## detect the part that is bluer than neighbors, which is likely table edge blue_dist = zc.color_dist(img, 'HSV', HSV_ref=table_hsv_ave, useV=False) blue_DoB = zc.get_DoB(blue_dist, DOB_PARA, 1, method='Average') mask_bluer = zc.color_inrange(blue_DoB, 'single', L=20) zc.check_and_display_mask("bluer", img, mask_bluer, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) rtn_msg = {'status': 'success'} return (rtn_msg, (mask_blue, mask_bluer, mask_table, mask_table_fat))
def _detect_cue(img, mask_tables, mask_balls, display_list): CUE_MIN_LENGTH = int(float(img.shape[1]) / 640 * 40 + 0.5) PARA1 = int(float(img.shape[1]) / 640 * 2 + 0.5) mask_blue, mask_bluer, mask_table, mask_table_fat = mask_tables ### edges on the table #img_table = np.zeros(img.shape, dtype=np.uint8) #img_table = cv2.bitwise_and(img, img, dst = img_table, mask = mask_table_convex) #bw_table = cv2.cvtColor(img_table, cv2.COLOR_BGR2GRAY) #edge_table = cv2.Canny(bw_table, 80, 160) #edge_table = zc.expand(edge_table, 2) #zc.check_and_display("edge_table", edge_table, display_list, resize_max = config.DISPLAY_MAX_PIXEL, wait_time = config.DISPLAY_WAIT_TIME) ### detect cue #lines = cv2.HoughLinesP(edge_table, 1, np.pi/180, 30, minLineLength = 70, maxLineGap = 3) #if lines is None: # rtn_msg = {'status': 'fail', 'message' : 'Cannot find cue'} # return (rtn_msg, None) #lines = lines[0] #if 'cue_edge' in display_list: # img_cue = img.copy() # for line in lines: # pt1 = (line[0], line[1]) # pt2 = (line[2], line[3]) # cv2.line(img_cue, pt1, pt2, (255, 0, 255), 2) # zc.check_and_display("cue_edge", img_cue, display_list, resize_max = config.DISPLAY_MAX_PIXEL, wait_time = config.DISPLAY_WAIT_TIME) ## interesting parts on the table (pockets, cue, hand, etc.) mask_table_convex, _ = zc.make_convex(mask_table.copy(), use_approxPolyDp=False) zc.check_and_display_mask("table_convex", img, mask_table_convex, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) mask_interesting = cv2.subtract( cv2.subtract(mask_table_convex, mask_table), mask_bluer) mask_interesting = cv2.subtract(mask_interesting, mask_balls) mask_interesting = zc.shrink(mask_interesting, PARA1) zc.check_and_display_mask("interesting", img, mask_interesting, display_list, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) # find the blob with cue (and probably hand) # TODO: this may be more robust with find_largest_CC function, in the case of half ball close to the bottom mask_cue_hand = zc.get_closest_blob( mask_interesting.copy(), (img.shape[0], img.shape[1] / 2), min_length=CUE_MIN_LENGTH, hierarchy_req='outer') # cue must be close to the bottom ## find cue top p_cue_top = zc.get_edge_point(mask_cue_hand, (0, -1)) if p_cue_top is None: rtn_msg = {'status': 'fail', 'message': 'Cannot find cue top'} return (rtn_msg, None) ## find cue bottom # the cue detected initially may not have reached the bottom of the image for i in xrange(10): mask_cue_hand = zc.expand_with_bound(mask_cue_hand, cv2.bitwise_not(mask_bluer)) mask_cue_bottom = mask_cue_hand.copy() mask_cue_bottom[:-2, :] = 0 mask_cue_bottom[:, :p_cue_top[0] - 40] = 0 mask_cue_bottom[:, p_cue_top[0] + 40:] = 0 nonzero = np.nonzero(mask_cue_bottom) if len(nonzero) < 2 or len(nonzero[0]) == 0: rtn_msg = {'status': 'fail', 'message': 'Cannot find cue bottom'} return (rtn_msg, None) rows, cols = nonzero p_cue_bottom = ((np.min(cols) + np.max(cols)) / 2, img.shape[0]) ## cue info cue_length = zc.euc_dist(p_cue_top, p_cue_bottom) if 'cue' in display_list: img_cue = img.copy() img_cue[mask_cue_hand > 0, :] = [0, 255, 255] cv2.circle(img_cue, p_cue_top, 3, (255, 0, 255), -1) cv2.line(img_cue, p_cue_top, p_cue_bottom, (255, 0, 255), 2) zc.display_image("cue", img_cue, resize_max=config.DISPLAY_MAX_PIXEL, wait_time=config.DISPLAY_WAIT_TIME) ## skeletonize #skeleton_cue_hand = zc.skeletonize(mask_cue_hand) rtn_msg = {'status': 'success'} return (rtn_msg, (p_cue_top, p_cue_bottom, cue_length))