def window(name): # Create a frame for this window and fill it with a nice color frame = np.zeros((200, 500, 3), np.uint8) frame[:] = (49, 52, 49) # Inform cvui that the components to be rendered from now one belong to # a window in particular. # # If you don't inform that, cvui will assume the components belong to # the default window (informed in cvui.init()). In that case, the # interactions with all other windows being used will not work. cvui.context(name) # Show info regarding the window cvui.printf(frame, 110, 50, '%s - click the button', name) # Buttons return true if they are clicked if cvui.button(frame, 110, 90, 'Button'): cvui.printf(frame, 200, 95, 'Button clicked!') print('Button clicked on: ', name) # Tell cvui to update its internal structures regarding a particular window. # # If cvui is being used in multiple windows, you need to enclose all component # calls between the pair cvui.context(NAME)/cvui.update(NAME), where NAME is # the name of the window being worked on. cvui.update(name) # Show the content of this window on the screen cvui.imshow(name, frame)
def windows_show(self): cvui.update() cvui.context(self.mainwindow_name) cvui.imshow(self.mainwindow_name, self.mainframe) cv2.displayStatusBar( self.mainwindow_name, "{:3.2f}pmm X:{:03d} Y:{:03d} Focus:{:05d} Contours:{:02d} Xmm:{:7.2f} Ymm{:7.2f} Color:{}" .format(self.cal_line_distance_total, self.mouse_x, self.mouse_y, int(self.focus_value), self.contours_found_quan, self.actual_mm_x, self.actual_mm_y, str(self.mouse_color)), 0)
def main(): # We have one mat for each window. frame1 = np.zeros((1024, 768, 3), np.uint8) # Create variables used by some components window1_values = [] window2_values = [] img = cv2.imread('Images/yoga.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('Images/mic.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('Images/gamb.jpg', cv2.IMREAD_COLOR) img = cv2.resize(img, (200, 200)) imgRed = cv2.resize(imgGray, (200, 200)) imgGray = cv2.resize(imgRed, (200, 200)) padding = 10 # Fill the vector with a few random values for i in range(0, 20): window1_values.append(random.uniform(0., 300.0)) window2_values.append(random.uniform(0., 300.0)) # Start two OpenCV windows cv2.namedWindow(WINDOW1_NAME) cv2.namedWindow(WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, -1, -1, 10) cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def compact(name): # Create a frame for this window and fill it with a nice color frame = np.zeros((200, 500, 3), np.uint8) frame[:] = (49, 52, 49) # Inform cvui that the components to be rendered from now one belong to # a window in particular. # # If you don't inform that, cvui will assume the components belong to # the default window (informed in cvui.init()). In that case, the # interactions with all other windows being used will not work. cvui.context(name) cvui.printf(frame, 110, 50, '%s - click the button', name) if cvui.button(frame, 110, 90, 'Button'): cvui.printf(frame, 200, 95, 'Button clicked!') print('Button clicked on: ', name) # Tell cvui to update its internal structures regarding a particular window # then show it. Below we are using cvui.imshow(), which is cvui's version of # the existing cv2.imshow(). They behave exactly the same, the only difference # is that cvui.imshow() will automatically call cvui.update(name) for you. cvui.imshow(name, frame)
def run(self): self.main_frame = np.zeros((380, 400, 3), np.uint8) cv.namedWindow(self.main_window_name) cvui.init(self.main_window_name) while True: cvui.context(self.main_window_name) self.main_frame[:] = (49, 52, 49) cvui.beginColumn(self.main_frame, 50, 20, -1, -1, 10) cvui.text('Click to open an image') if cvui.button('Select Image'): self.load_image() cvui.text('Load a previously saved txt file to current image') if cvui.button("Read Txt file"): self.read_txt() cvui.text('Save to txt file') if cvui.button('Save'): self.save_to_txt() if cvui.button("Show/Hide Index"): self.has_index = not self.has_index if cvui.button("Clear All Points"): self.points.clear() self.high_light_point = None cvui.text('Max click distance to select one point') cvui.text('adjust smaller if you want to click two close points') cvui.trackbar(200, self.threshold, 0.000, 0.02, 2, '%.4Lf') cvui.endColumn() cvui.update(self.main_window_name) cv.imshow(self.main_window_name, self.main_frame) key = cv.waitKey(20) if key == 27 or not self.is_window_open(self.main_window_name): self.tkinter_root.destroy() break if self.is_window_open(self.image_window_name): cvui.context(self.image_window_name) self.show_image = cv.resize( self.origin_image, (self.actual_width, self.actual_height)) for i, point in enumerate(self.points): if i not in self.deleted_list: self.draw_intersection(self.show_image, point, i) if self.high_light_point is not None: point = self.points[self.high_light_point] cv.circle(self.show_image, (int(point[0] * self.actual_width), int(point[1] * self.actual_height)), 5, (0, 255, 255), 1) # Fill the error window if a vibrant color # if self.image_window_flag: cvui.update(self.image_window_name) cv.imshow(self.image_window_name, self.show_image) self.keyboard(key)
def main(): # We have one mat for each window. frame1 = np.zeros((200, 500, 3), np.uint8) frame2 = np.zeros((200, 500, 3), np.uint8) frame3 = np.zeros((200, 500, 3), np.uint8) # Init cvui, instructing it to create 3 OpenCV windows. windows = [WINDOW1_NAME, WINDOW2_NAME, WINDOW3_NAME] cvui.init(windows, 3) while (True): # clear all frames frame1[:] = (49, 52, 49) frame2[:] = (49, 52, 49) frame3[:] = (49, 52, 49) # Inform cvui that all subsequent component calls and events are related to window 1. # We do that by calling cvui.context(). cvui.context(WINDOW1_NAME) cvui.printf( frame1, 10, 10, 'In window1, mouse is at: %d,%d (obtained from window name)', cvui.mouse(WINDOW1_NAME).x, cvui.mouse(WINDOW1_NAME).y) if cvui.mouse(WINDOW1_NAME, cvui.LEFT_BUTTON, cvui.IS_DOWN): cvui.printf(frame1, 10, 30, 'In window1, mouse LEFT_BUTTON is DOWN') cvui.imshow(WINDOW1_NAME, frame1) # From this point on, we are going to render the second window. We need to inform cvui # that all updates and components from now on are connected to window 2. cvui.context(WINDOW2_NAME) cvui.printf(frame2, 10, 10, 'In window2, mouse is at: %d,%d (obtained from context)', cvui.mouse().x, cvui.mouse().y) if cvui.mouse(cvui.LEFT_BUTTON, cvui.IS_DOWN): cvui.printf(frame2, 10, 30, 'In window2, mouse LEFT_BUTTON is DOWN') cvui.imshow(WINDOW2_NAME, frame2) # Finally we are going to render the thrid window. Again we need to inform cvui # that all updates and components from now on are connected to window 3. cvui.context(WINDOW3_NAME) cvui.printf(frame3, 10, 10, 'In window1, mouse is at: %d,%d', cvui.mouse(WINDOW1_NAME).x, cvui.mouse(WINDOW1_NAME).y) cvui.printf(frame3, 10, 30, 'In window2, mouse is at: %d,%d', cvui.mouse(WINDOW2_NAME).x, cvui.mouse(WINDOW2_NAME).y) cvui.printf(frame3, 10, 50, 'In window3, mouse is at: %d,%d', cvui.mouse(WINDOW3_NAME).x, cvui.mouse(WINDOW3_NAME).y) if cvui.mouse(WINDOW1_NAME, cvui.LEFT_BUTTON, cvui.IS_DOWN): cvui.printf(frame3, 10, 90, 'window1: LEFT_BUTTON is DOWN') if cvui.mouse(WINDOW2_NAME, cvui.LEFT_BUTTON, cvui.IS_DOWN): cvui.printf(frame3, 10, 110, 'window2: LEFT_BUTTON is DOWN') if cvui.mouse(WINDOW3_NAME, cvui.LEFT_BUTTON, cvui.IS_DOWN): cvui.printf(frame3, 10, 130, 'window3: LEFT_BUTTON is DOWN') cvui.imshow(WINDOW3_NAME, frame3) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def teach_step(posed='Images/yoga.jpg', Check=False): Estimator = TfPoseEstimator frame1 = np.zeros((768, 1024, 3), np.uint8) WINDOW1_NAME = 'Dance Dance Pose' cv2.namedWindow(WINDOW1_NAME) cvui.init(WINDOW1_NAME) inferred = infer(posed) original = cv2.imread(posed) #time.sleep(5) if original.shape[0] != 480 or original.shape[1] != 640: original = cv2.resize(original, (368, 368)) if Check: cv2.imwrite('check.jpg', inferred) inferred = inferred - original #inferred=cv2.copyMakeBorder(inferred[:,int(np.nonzero(inferred)[1][0]/2):],0,0,0,int(np.nonzero(inferred)[1][0]/2),cv2.BORDER_REPLICATE) timeout = time.time() + 10 capture = cv2.VideoCapture(0) counter = [time.time()] x = 1 while True: cvui.context(WINDOW1_NAME) ret, frame = capture.read() gray = frame if gray.shape[0] != 4810 or gray.shape[1] != 640: gray = cv2.resize(gray, (368, 368)) dst = cv2.addWeighted(inferred, 0.5, gray, 0.5, 0) frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, -1, -1, 30) cvui.image(dst) cvui.image(original) cvui.endRow() cvui.beginRow(frame1, 10, 400, -1, -1, 30) cvui.counter(frame1, 100, 410, counter, 0.1, '%.1f') counter = [timeout - time.time() for x in counter] cvui.text(frame1, 10, 410, "Tick tick") cvui.endRow() cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) if cv2.waitKey(1) & 0xFF == ord('q') or time.time() > timeout: filename = 'captures/capture' + \ str(int(x)) + ".png" x = x + 1 cv2.imwrite(filename, frame) break inferred_capture = infer(filename) original_inferred = cv2.imread(filename) if original_inferred.shape[0] != 4380 or original_inferred.shape[1] != 640: inferred_capture = cv2.resize(inferred_capture, (368, 368)) original_inferred = cv2.resize(original_inferred, (368, 368)) while True: final = cv2.addWeighted(inferred, 0.5, inferred_capture, 0.5, 0) cv2.imshow('final', final) if cv2.waitKey(1) & 0xFF == ord('q'): break diff_inferred = inferred_capture - original_inferred bw_inferred = cv2.cvtColor(diff_inferred, cv2.COLOR_BGR2GRAY) bw_inferred[bw_inferred >= 1] = 1 bw_inferred[bw_inferred < 1] = 0 bw_orig_inferred = cv2.cvtColor(inferred, cv2.COLOR_BGR2GRAY) bw_orig_inferred[bw_orig_inferred >= 1] = 1 bw_orig_inferred[bw_orig_inferred < 1] = 0 total = bw_orig_inferred == bw_inferred print('') print('') print('Overlap:' + str((1 - np.sum(total) / np.size(total)) * 10))
def main(): global keep_running # This is for saving video *with* detection boxes on it # To save raw video, use the CameraSaver.py script save_video = True if save_video: sz = (p.IMG_WIDTH_SPOTTER, p.IMG_HEIGHT_SPOTTER) fourcc = cv2.VideoWriter_fourcc(*'mp4v') vout = cv2.VideoWriter() vout.open('track_output.mp4', fourcc, p.FPS_SPOTTER, sz, False) signal.signal(signal.SIGINT, sigint_handler) control_panes = ControlPanes() control_panes.stage_control_pane = EnhancedWindow(0, 0, 300, 500, 'Stage Control') control_panes.focus_control_pane = EnhancedWindow(0, 20, 300, 500, 'Focus Control') control_panes.tracker_select_pane = EnhancedWindow(0, 40, 300, 500, 'Tracker Select') control_panes.canny_settings_pane = EnhancedWindow(0, 60, 300, 500, 'Canny Tuning') control_panes.threshold_setting_pane = EnhancedWindow( 0, 80, 300, 500, 'Threshold Tuning') cvui.init(p.CTRL_WINDOW_NAME) cvui.init(p.VIDEO_WINDOW_NAME) context = zmq.Context() (video_socket, focus_sub, stage_sub, focus_state_sub, macro_sharpness_sub, track_socket, roi_socket, af_pub) = setup_zmq(context) stage_zero_offset = np.load('tank_corners_offset.npy') world_points = np.load('tank_corners.npy') intrinsic = np.load('intrinsic_calibration/ll_65/intrinsic.npy') stage_x = None stage_y = None stage_z = None z_moving = True current_ll_focus = None object_distance_ll = 0 target_pos_obs = None target_pos = np.array([1, 1]) target_pos_slow = target_pos.copy() feature_delta = np.array([0, 0]) target_track_init = False STAGE_MODE = 'PAUSED' FOCUS_MODE = 'MANUAL' tracker_type = 'KCF' # options are KCF or CANNY # These three structs store the state information necessary for the trackers canny_tracker_state = CannyTracker() canny_tracker_state.canny_low = [50] canny_tracker_state.canny_high = [150] kcf_tracker_state = KCFTracker() kcf_tracker_state.kcf_box_anchor = cvui.Point() kcf_tracker_state.kcf_roi = cvui.Rect(0, 0, 0, 0) kcf_tracker_state.kcf_tracker_init = False threshold_tracker_state = ThresholdTracker() threshold_tracker_state.threshold = [30] threshold_tracker_state.roi = cvui.Rect(0, 0, 0, 0) threshold_tracker_state.box_anchor = cvui.Point threshold_tracker_state.show_binary = [False] sharpness_focus_state = SharpnessFocusState() sharpness_focus_state.mode = 'COARSE' macro_sharpness = 0 while keep_running: ctrl_frame = np.zeros((700, 300, 3), np.uint8) # Receive stage position updates try: stage_pos = stage_sub.recv_string() (stage_x, stage_y, stage_z_new) = [float(x) for x in stage_pos.split(' ')] if stage_z_new == stage_z: z_moving = False else: z_moving = True stage_z = stage_z_new except zmq.Again: # the stage publisher only publishes at ~10hz, so not having an update is common pass # Receive macro sharpness try: macro_sharpness_last = macro_sharpness macro_sharpness = float(macro_sharpness_sub.recv_string()) except zmq.Again: # no sharpness value, which is unexpected print('No Macro Image Sharpness!') # receive next frame try: frame = recv_img(video_socket) except zmq.Again: print('Timed Out!') time.sleep(1) continue cvui.context(p.VIDEO_WINDOW_NAME) if cvui.mouse(cvui.IS_DOWN): (target_pos, feature_delta) = reset_target_selection() target_pos_slow = target_pos.copy() target_track_init = True feature_delta += get_feature_2delta() if stage_x is not None: stage_pos = np.array([stage_x, stage_y, -stage_z], ndmin=2).T frame = tank_corners_clip(frame, stage_pos, stage_zero_offset, world_points, intrinsic) # This is where the tracking happens. tracker_type is controlled by a button on the interface # Adding a new tracker is as simple as adding another case to this if/else and adding a button in # the UI to switch into the new tracking mode if tracker_type == 'CANNY': canny_tracker_state.target_pos = target_pos (target_pos_obs, roi, canny_tracker_state) = update_canny_tracker( frame, canny_tracker_state) elif tracker_type == 'KCF': cvui.context(p.VIDEO_WINDOW_NAME) (target_pos_obs, roi, kcf_tracker_state) = update_kcf_tracker(frame, kcf_tracker_state) elif tracker_type == 'THRESHOLD': cvui.context(p.VIDEO_WINDOW_NAME) threshold_tracker_state.target_pos = target_pos (target_pos_obs, roi, threshold_tracker_state) = update_threshold_tracker( frame, threshold_tracker_state) else: print('Invalid tracker mode: %s' % tracker_type) roi = None keep_running = False # This roi_msg takes an roi that may have been identified around the animal and sends it over zmq # This enables any cameras trying to autofocus to know which roi to keep in focus # if no autofocusing is happening, then these messages don't do anything if roi is not None: roi_msg = m.SetFocusROI(roi[0], roi[1]) else: roi_msg = m.SetFocusROI(None, None) roi_socket.send_pyobj( roi_msg ) # tell the LL camera (or anything else I guess) which ROI to focus (target_track_ok, target_pos, target_pos_slow) = filter_target_position(target_pos, target_pos_slow, target_pos_obs) # This is probably where we want to use the other camera to estimate depth # Now we have a giant state machine. We need to structure the code this way, because we want 2D tracking and # user interaction to update even when we are waiting on some slower action to occur related to object depth # and focusing. The state machine provides a mechanism to handle these slower processes while not impeding the # rest of the tracking process. # STAGE_MODE = {MANUAL | AUTO | PAUSED} # -- In MANUAL mode, dx,dy,dz all set by keyboard input. # -- In AUTO mode, dx and dy are set by tracker. dz is set by autofocus if FOCUS_MODE is set to AUTO # -- In PAUSED mode, dx = dy = dz = 0. The tracker will keep tracking, but the stage won't move # # FOCUS_MODE = {MANUAL | SHARPNESS | DEPTH} # -- In MANUAL mode, dz is set by keyboard input # -- In SHARPNESS mode, dz is set by trying to maximize sharpness, although the final position can be tweaked # by user input. SHARPNESS mode does nothing if STAGE_MODE is MANUAL # -- In DEPTH mode, dz is set by a target depth measurement that is estimated from a second camera # (stereo or perpendicular) # Determine dx and dy if STAGE_MODE == 'PAUSED': # -> Stage Control track_socket.send_string('0 0 0') dx = 0 dy = 0 dz = 0 elif STAGE_MODE == 'MANUAL': # TODO: Probably tune this better (dx, dy) = get_feature_2delta() dx = 10 * dx dy = 10 * dy print('FULL_MANUAL %f, %f' % (dx, dy)) dz = manual_focus_update() elif STAGE_MODE == 'AUTO': # The tracker makes a determination in pixel space, then we may decide to filter it. We then determine the # dx and dy based on the distance between the feature of interest and the macro lens center # how much do we need to move in pixel-space? # Note dx and dy are 0 if there are no target tracks if stage_z is None: print('Waiting on stage node') dx = 0 dy = 0 dz = 0 else: if target_pos_obs is not None: if target_track_ok: (dx, dy) = calculate_movement_offsets( frame, target_pos, target_pos_slow, feature_delta) else: dx = 0 dy = 0 else: dx = 0 dy = 0 target_track_ok = False # When STAGE_MODE == 'AUTO', we need to determine how to handle the focusing if FOCUS_MODE == 'MANUAL': dz = manual_focus_update() elif FOCUS_MODE == 'SHARPNESS': sharpness_focus_state.stage_z = stage_z sharpness_focus_state.macro_sharpness = macro_sharpness sharpness_focus_state.z_moving = z_moving dz, sharpness_focus_state = sharpness_focus( sharpness_focus_state, af_pub, focus_state_sub, video_socket, focus_sub) elif FOCUS_MODE == 'DEPTH': # this is the mode when we have a second camera to estimate depth dz = 0 else: # invalid focus mode print('Invalid focus mode %s' % FOCUS_MODE) sys.exit(1) else: print('Unknown stage mode: %s' % STAGE_MODE) dx = 0 dy = 0 dz = 0 print(dx, dy, dz) track_socket.send_string( '%f %f %f' % (dx, dy, dz)) # 'wasteful', but easier debugging for now frame = cv2.resize( frame, (p.IMG_DISP_WIDTH_SPOTTER, p.IMG_DISP_HEIGHT_SPOTTER)) # draw dots on frame centers cv2.circle(frame, (int( p.IMG_DISP_WIDTH_SPOTTER / 2), int(p.IMG_DISP_HEIGHT_SPOTTER / 2)), 5, (0, 0, 255), -1) # center of frame cv2.circle(frame, (p.MACRO_LL_CENTER[0], p.MACRO_LL_CENTER[1]), 5, (255, 0, 255), -1) # center of macro frame frame cvui.update(p.VIDEO_WINDOW_NAME) cv2.imshow(p.VIDEO_WINDOW_NAME, frame) if save_video: vout.write(frame) cvui.context(p.CTRL_WINDOW_NAME) STAGE_MODE, FOCUS_MODE, tracker_type, macro_resweep, ll_resweep = draw_settings( ctrl_frame, control_panes, canny_tracker_state, threshold_tracker_state, STAGE_MODE, FOCUS_MODE, tracker_type) if macro_resweep: p.BYPASS_LL_ESTIMATE = True sharpness_focus_state.mode = 'FINE_UNINITIALIZED' if ll_resweep: if stage_z is not None: print('Liquid Lens Refocus!') dist_to_tank = (300 - stage_z) + p.STAGE_TANK_OFFSET ll_max = 2953.5 * dist_to_tank**-0.729 ll_min = 2953.5 * (dist_to_tank + p.TANK_DEPTH_MM)**-0.729 print('llmin, llmax: (%f, %f)' % (ll_min, ll_max)) af_pub.send_pyobj(m.AutofocusMessage(ll_min, ll_max, 1)) else: print('Cannot refocus liquid lens until stage node is running') cvui.update(p.CTRL_WINDOW_NAME) cv2.imshow(p.CTRL_WINDOW_NAME, ctrl_frame) cv2.waitKey(1) if save_video: vout.release()
def main(): # We have one mat for each window. frame1 = np.zeros((600, 800, 3), np.uint8) frame2 = np.zeros((600, 800, 3), np.uint8) # Create variables used by some components window1_values = [] window2_values = [] window1_checked = [False] window1_checked2 = [False] window2_checked = [False] window2_checked2 = [False] window1_value = [1.0] window1_value2 = [1.0] window1_value3 = [1.0] window2_value = [1.0] window2_value2 = [1.0] window2_value3 = [1.0] img = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR) padding = 10 # Fill the vector with a few random values for i in range(0, 20): window1_values.append(random.uniform(0., 300.0)) window2_values.append(random.uniform(0., 300.0)) # Start two OpenCV windows cv2.namedWindow(WINDOW1_NAME) cv2.namedWindow(WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', window1_checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000) cvui.sparkline(window1_values, 50, 50) cvui.counter(window1_value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() padding = 50 cvui.beginRow(frame1, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', window1_checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px paddin7hg.') cvui.endRow() cvui.beginRow(frame1, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') cvui.trackbar(150, window1_value2, 0., 5.) cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() cvui.beginColumn(frame1, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() padding = 10 cvui.beginColumn(frame1, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') cvui.trackbar(150, window1_value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25) cvui.text('End of column 2') cvui.endColumn() cvui.beginColumn(frame1, 550, 330, 100, 200) cvui.text('Column 3 (use space)') cvui.space(5) cvui.button('button1 5px below') cvui.space(50) cvui.text('Text 50px below') cvui.space(20) cvui.button('Button 20px below') cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) # From this point on, we are going to render the second window. We need to inform cvui # that all updates and components from now on are connected to window 2. # We do that by calling cvui.context(). cvui.context(WINDOW2_NAME) frame2[:] = (49, 52, 49) cvui.beginRow(frame2, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', window2_checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000) cvui.sparkline(window2_values, 50, 50) cvui.counter(window2_value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() padding = 50 cvui.beginRow(frame2, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', window2_checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px paddin7hg.') cvui.endRow() # Another row mixing several components cvui.beginRow(frame2, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') cvui.trackbar(150, window2_value2, 0., 5.) cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() cvui.beginColumn(frame2, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() padding = 10 cvui.beginColumn(frame2, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') cvui.trackbar(150, window2_value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25) cvui.text('End of column 2') cvui.endColumn() cvui.beginColumn(frame2, 550, 330, 100, 200) cvui.text('Column 3 (use space)') cvui.space(5) cvui.button('button1 5px below') cvui.space(50) cvui.text('Text 50px below') cvui.space(20) cvui.button('Button 20px below') cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # Update all components of window2, e.g. mouse clicks, and show it. cvui.update(WINDOW2_NAME) cv2.imshow(WINDOW2_NAME, frame2) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): # We have one mat for each window. frame1 = np.zeros((150, 600, 3), np.uint8) frame2 = np.zeros((150, 600, 3), np.uint8) error_frame = np.zeros((100, 300, 3), np.uint8) # Flag to control if we should show an error window. error = False # Create two OpenCV windows cv2.namedWindow(GUI_WINDOW1_NAME) cv2.namedWindow(GUI_WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(GUI_WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(GUI_WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(GUI_WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginColumn(frame1, 50, 20, -1, -1, 10) cvui.text('[Win1] Use the buttons below to control the error window') if cvui.button('Close'): closeWindow(ERROR_WINDOW_NAME) # If the button is clicked, we open the error window. # The content and rendering of such error window will be performed # after we handled all other windows. if cvui.button('Open'): error = True openWindow(ERROR_WINDOW_NAME) cvui.endColumn() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(GUI_WINDOW1_NAME) cv2.imshow(GUI_WINDOW1_NAME, frame1) # From this point on, we are going to render the second window. We need to inform cvui # that all updates and components from now on are connected to window 2. # We do that by calling cvui.context(). cvui.context(GUI_WINDOW2_NAME) frame2[:] = (49, 52, 49) cvui.beginColumn(frame2, 50, 20, -1, -1, 10) cvui.text('[Win2] Use the buttons below to control the error window') if cvui.button('Close'): closeWindow(ERROR_WINDOW_NAME) # If the button is clicked, we open the error window. # The content and rendering of such error window will be performed # after we handled all other windows. if cvui.button('Open'): openWindow(ERROR_WINDOW_NAME) error = True cvui.endColumn() # Update all components of window2, e.g. mouse clicks, and show it. cvui.update(GUI_WINDOW2_NAME) cv2.imshow(GUI_WINDOW2_NAME, frame2) # Handle the content and rendering of the error window, # if we have un active error and the window is actually open. if error and isWindowOpen(ERROR_WINDOW_NAME): # Inform cvui that all subsequent component calls and events are # related to the error window from now on cvui.context(ERROR_WINDOW_NAME) # Fill the error window if a vibrant color error_frame[:] = (10, 10, 49) cvui.text(error_frame, 70, 20, 'This is an error message', 0.4, 0xff0000) if cvui.button(error_frame, 110, 40, 'Close'): error = False if error: # We still have an active error. # Update all components of the error window, e.g. mouse clicks, and show it. cvui.update(ERROR_WINDOW_NAME) cv2.imshow(ERROR_WINDOW_NAME, error_frame) else: # No more active error. Let's close the error window. closeWindow(ERROR_WINDOW_NAME) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): context = zmq.Context() # Receive video frames from camera video_socket = context.socket(zmq.SUB) video_socket.setsockopt(zmq.CONFLATE, 1) video_socket.setsockopt(zmq.RCVTIMEO, 1000) video_socket.connect('tcp://%s:%d' % (p.VIDEO_SPOTTER_IP, p.VIDEO_SPOTTER_PORT)) topicfilter = '' video_socket.setsockopt_string(zmq.SUBSCRIBE, topicfilter) # Receive updates from stage stage_sub = context.socket(zmq.SUB) stage_sub.setsockopt(zmq.CONFLATE, 1) stage_sub.setsockopt(zmq.RCVTIMEO, 0) stage_sub.connect('tcp://%s:%d' % (p.STAGE_POSITION_IP, p.STAGE_POSITION_PORT)) topicfilter = '' stage_sub.setsockopt_string(zmq.SUBSCRIBE, topicfilter) # TODO: We need to connect here instead of binding if we want to use this at the same time as CameraDisplaySpotter # Publish tracking deltas track_socket = context.socket(zmq.PUB) track_socket.bind('tcp://*:%s' % p.TRACK_PORT) intrinsic = np.load('intrinsic_calibration/ll_65/intrinsic.npy') cvui.init('MaskWindow1') # Need to wait for stage to be up and running: while 1: try: stage_msg = stage_sub.recv_string() (stage_x, stage_y, stage_z) = [float(x) for x in stage_msg.split(' ')] break except zmq.Again: print('Waiting for stage controller node') time.sleep(1) stage_zero_offset = np.array([stage_x, stage_y, -stage_z], ndmin=2).T np.save('tank_corners_offset.npy', stage_zero_offset) while keep_running: MODE = '1' # phase 1: First position corner_points = np.zeros( (3, 8)) # first 4 for front, second 4 for back corner_ix = 0 print('Click on corners at front of tank') while MODE == '1': try: frame = recv_img(video_socket) except zmq.Again: print('No new frame!') continue for pt_ix in range(corner_ix): center = (int(corner_points[0, pt_ix]), int(corner_points[1, pt_ix])) if pt_ix < 4: color = (0, 0, 255) else: color = (255, 0, 0) cv2.circle(frame, center, 5, color, -1) cvui.update('MaskWindow1') cv2.imshow('MaskWindow1', frame) cv2.waitKey(1) cvui.context('MaskWindow1') if cvui.mouse(cvui.CLICK): print('CLICK') corner_points[0, corner_ix] = cvui.mouse().x corner_points[1, corner_ix] = cvui.mouse().y corner_points[2, corner_ix] = 1 corner_ix += 1 if corner_ix == 4: print('Click on corners at back of tank') if corner_ix == 8: MODE = '2' # Move stage # TODO: Add a REP/REQ instead of PUB/SUB for one-off reliably stage movements stage_delta = 15.0 track_socket.send_string('%f 0 0' % (stage_delta * p.X_MOVE_SCALE)) print('Waiting for stage motion') time.sleep(2) E = np.array([[0, 0, 0], [0, 0, -stage_delta], [0, stage_delta, 0]]) # essential matrix - only X motion F = np.linalg.inv(intrinsic).T @ E @ np.linalg.inv( intrinsic) # Fundamental Matrix # phase 2: Click corners in second position corner_points2 = np.zeros((3, 8)) corner_ix2 = 0 print('Click on corners at front of tank') while MODE == '2': try: frame = recv_img(video_socket) except zmq.Again: print('No new frame!') # Plot the epipolar line for the current point # i.e. p_r^T * F * p_l^T = 0 where F is fundamental matrix and p_r and p_l are points in (homogeneous) image space # y0 and y1 are the y coordinates of this line for x=0 and x=img_width im1_pt = corner_points[:, corner_ix2:corner_ix2 + 1] d = im1_pt.T @ F y0 = int(-d[0, 2] / d[0, 1]) y1 = int((-d[0, 0] * p.IMG_WIDTH_SPOTTER - d[0, 2]) / d[0, 1]) cv2.line(frame, (0, y0), (640, y1), (0, 255, 0)) for pt_ix in range(corner_ix2): center = (int(corner_points2[0, pt_ix]), int(corner_points2[1, pt_ix])) if pt_ix < 4: color = (0, 0, 255) else: color = (255, 0, 0) cv2.circle(frame, center, 5, color, -1) cvui.update('MaskWindow1') cv2.imshow('MaskWindow1', frame) cv2.waitKey(1) cvui.context('MaskWindow1') if cvui.mouse(cvui.CLICK): corner_points2[0, corner_ix2] = cvui.mouse().x corner_points2[1, corner_ix2] = cvui.mouse().y corner_points2[2, corner_ix2] = 1 corner_ix2 += 1 if corner_ix2 == 4: print('Click on corners at back of tank') if corner_ix2 == 8: MODE = '3' # Intermezzo: Compute the 3d location of the tank corners world_rays1 = np.linalg.inv(intrinsic) @ corner_points world_rays2 = np.linalg.inv(intrinsic) @ corner_points2 world_points = np.zeros((3, 8)) A = np.zeros((2, 2)) B = np.zeros((2, 1)) for point_ix in range(8): p1 = np.zeros((3, 1)) p2 = np.zeros((3, 1)) v1 = world_rays1[:, point_ix:point_ix + 1] v2 = world_rays2[:, point_ix:point_ix + 1] p2[0] = -stage_delta A[0, 0] = -np.inner(v1.T, v1.T) A[0, 1] = np.inner(v1.T, v2.T) B[0, 0] = -(np.inner(p2.T, v1.T) - np.inner(p1.T, v1.T)) A[1, 0] = -np.inner(v1.T, v2.T) A[1, 1] = np.inner(v2.T, v2.T) B[1, 0] = -(np.inner(p2.T, v2.T) - np.inner(p1.T, v2.T)) sol = np.linalg.solve(A, B) point1 = p1 + sol[0] * v1 point2 = p2 + sol[1] * v2 mp = (point2 - point1) / 2.0 + point1 world_points[:, point_ix] = mp.squeeze() print(world_points) np.save('tank_corners.npy', world_points) # phase 3: Compute world-frame corners and update as stage moves stage_pos_cur = np.zeros((3, 1)) while MODE == '3': try: frame = recv_img(video_socket) except zmq.Again: print('No new frame!') try: stage_msg = stage_sub.recv_string() (stage_x, stage_y, stage_z) = [float(x) for x in stage_msg.split(' ')] stage_pos_cur[0] = stage_x stage_pos_cur[1] = stage_y stage_pos_cur[2] = -stage_z except zmq.Again: pass stage_delta = stage_pos_cur - stage_zero_offset corners_translated = world_points - stage_delta full_corners_img = np.zeros((2, 8)) for pt_ix in range(8): corner_img = intrinsic @ corners_translated[:, pt_ix] corner_img = corner_img / corner_img[2] full_corners_img[0, pt_ix] = corner_img[0] full_corners_img[1, pt_ix] = corner_img[1] center = (int(corner_img[0]), int(corner_img[1])) if pt_ix < 4: color = (0, 0, 255) else: color = (255, 0, 0) cv2.circle(frame, center, 5, color, -1) cv2.imshow('MaskWindow1', frame) print([ np.array([ full_corners_img[:, 0], full_corners_img[:, 1], full_corners_img[:, 2], full_corners_img[:, 3] ]) ]) poly_frame_front = np.zeros(frame.shape, dtype=np.uint8) poly_frame_back = np.zeros(frame.shape, dtype=np.uint8) poly_frame_front = cv2.fillPoly(poly_frame_front, [ np.array([ full_corners_img[:, 0], full_corners_img[:, 1], full_corners_img[:, 2], full_corners_img[:, 3] ], dtype='int32') ], (255, 255, 255)) poly_frame_back = cv2.fillPoly(poly_frame_back, [ np.array([ full_corners_img[:, 4], full_corners_img[:, 5], full_corners_img[:, 6], full_corners_img[:, 7] ], dtype='int32') ], (255, 255, 255)) clipped_frame = cv2.bitwise_and(frame, poly_frame_front) clipped_frame = cv2.bitwise_and(clipped_frame, poly_frame_back) cv2.imshow('MaskWindow2', clipped_frame) cv2.waitKey(1)