def main(): frame = np.zeros((300, 600, 3), np.uint8) out = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR) down = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR) over = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Render an image-based button. You can provide images # to be used to render the button when the mouse cursor is # outside, over or down the button area. if cvui.button(frame, 200, 80, out, over, down): print('Image button clicked!') cvui.text(frame, 150, 200, 'This image behaves as a button') # Render a regular button. if cvui.button(frame, 360, 80, 'Button'): print('Regular button clicked!') # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): frame = np.zeros((150, 650, 3), np.uint8) # Init cvui and tell it to use a value of 20 for cv2.waitKey() # because we want to enable keyboard shortcut for # all components, e.g. button with label "&Quit". # If cvui has a value for waitKey, it will call # waitKey() automatically for us within cvui.update(). cvui.init(WINDOW_NAME, 20); while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) cvui.text(frame, 40, 40, 'To exit this app click the button below or press Q (shortcut for the button below).') # Exit the application if the quit button was pressed. # It can be pressed because of a mouse click or because # the user pressed the "q" key on the keyboard, which is # marked as a shortcut in the button label ("&Quit"). if cvui.button(frame, 300, 80, "&Quit"): break # Since cvui.init() received a param regarding waitKey, # there is no need to call cv.waitKey() anymore. cvui.update() # will do it automatically. cvui.update() cv2.imshow(WINDOW_NAME, frame)
def main(): frame = np.zeros((150, 650, 3), np.uint8) # Init cvui and tell it to use a value of 20 for cv2.waitKey() # because we want to enable keyboard shortcut for # all components, e.g. button with label "&Quit". # If cvui has a value for waitKey, it will call # waitKey() automatically for us within cvui.update(). cvui.init(WINDOW_NAME, 20) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) cvui.text( frame, 40, 40, 'To exit this app click the button below or press Q (shortcut for the button below).' ) # Exit the application if the quit button was pressed. # It can be pressed because of a mouse click or because # the user pressed the "q" key on the keyboard, which is # marked as a shortcut in the button label ("&Quit"). if cvui.button(frame, 300, 80, "&Quit"): break # Since cvui.init() received a param regarding waitKey, # there is no need to call cv.waitKey() anymore. cvui.update() # will do it automatically. cvui.update() cv2.imshow(WINDOW_NAME, frame)
def main(): frame = np.zeros((300, 500, 3), np.uint8) c1 = Class1() c2 = Class2() # Init cvui and tell it to create a OpenCV window, i.e. cv2.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) c1.renderInfo(frame) c2.renderMessage(frame) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): height = 220 spacing = 10 frame = np.zeros((height * 3, 1300, 3), np.uint8) # Init cvui and tell it to create a OpenCV window, i.e. cv2.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) rows, cols, channels = frame.shape # Render three groups of components. group(frame, 0, 0, cols, height - spacing) group(frame, 0, height, cols, height - spacing) group(frame, 0, height * 2, cols, height - spacing) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): window_name = 'cvui-double-knob-trackbar' cvui.init(window_name) trackbar_value1 = [25.0] trackbar_value2 = [75.0] while True: cvuiframe = np.zeros((140, 460, 3), np.uint8) cvuiframe[:] = (49, 52, 49) cvui.trackbar2(cvuiframe, 30, 30, 400, trackbar_value1, trackbar_value2, 0., 100.) cvui.text(cvuiframe, 50, 100, 'value1 : ' + "{0:.1f}".format(trackbar_value1[0])) cvui.text(cvuiframe, 160, 100, 'value2 : ' + "{0:.1f}".format(trackbar_value2[0])) cvui.update() cv.imshow(window_name, cvuiframe) key = cv.waitKey(50) if key == 27: # ESC break
def window(name): # Create a frame for this window and fill it with a nice color frame = np.zeros((200, 500, 3), np.uint8) frame[:] = (49, 52, 49) # Inform cvui that the components to be rendered from now one belong to # a window in particular. # # If you don't inform that, cvui will assume the components belong to # the default window (informed in cvui.init()). In that case, the # interactions with all other windows being used will not work. cvui.context(name) # Show info regarding the window cvui.printf(frame, 110, 50, '%s - click the button', name) # Buttons return true if they are clicked if cvui.button(frame, 110, 90, 'Button'): cvui.printf(frame, 200, 95, 'Button clicked!') print('Button clicked on: ', name) # Tell cvui to update its internal structures regarding a particular window. # # If cvui is being used in multiple windows, you need to enclose all component # calls between the pair cvui.context(NAME)/cvui.update(NAME), where NAME is # the name of the window being worked on. cvui.update(name) # Show the content of this window on the screen cvui.imshow(name, frame)
def anotate(img, out_path, wnd_height=800): img_height, img_width = img.shape[:2] wnd_width = int(wnd_height * img_width / img_height) frame = np.zeros((wnd_height + 200, wnd_width, 3), np.uint8) roi_size = [img_height // 10 * 5] roi_shiftx = [0] roi_shifty = [0] x1 = [0] x2 = [img_width // 2] y1 = [0] y2 = [img_height // 2] WINDOW_NAME = 'Roi Anotater' cvui.init(WINDOW_NAME) anchor = cvui.Point() while (True): y = cvui.mouse().y x = cvui.mouse().x if (x > 0 and x < wnd_width and y > 0 and y < wnd_height): if cvui.mouse(cvui.DOWN): anchor.y = y anchor.x = x if cvui.mouse(cvui.IS_DOWN): if x > anchor.x and y > anchor.y: x1[0] = anchor.x * img_width // wnd_width x2[0] = x * img_width // wnd_width y1[0] = anchor.y * img_height // wnd_height y2[0] = y * img_height // wnd_height frame[:] = (49, 52, 49) img_tmp = img.copy() img_tmp = cv2.rectangle(img_tmp, (x1[0], y1[0]), (x2[0], y2[0]), (0, 0, 255), 1) cvui.image(frame, 0, 0, cv2.resize(img_tmp, (wnd_width, wnd_height))) param(frame, 50, wnd_height + 10, wnd_width - 400, x1, 0, x2[0], 'x1', 1) param(frame, 50, wnd_height + 50, wnd_width - 400, x2, x1[0], img_width, 'x2', 1) param(frame, 50, wnd_height + 90, wnd_width - 400, y1, 0, y2[0], 'y1', 1) param(frame, 50, wnd_height + 130, wnd_width - 400, y2, y1[0], img_height, 'y2', 1) cvui.update() cv2.imshow(WINDOW_NAME, frame) if cv2.waitKey(20) == 27: np.savez(out_path, x1=x1[0] / img_width, x2=x2[0] / img_width, y1=y1[0] / img_height, y2=y2[0] / img_height) break
def main(camera_toml_path, enable_distortion_correction, scale_val = 0.65): camera_config = get_config(camera_toml_path) camera = Camera(camera_config) print(camera) scaling = partial(scaling_int, scale=scale_val) if camera_config.roi_size != 4: sys.exit('This script is only supported on "camera_config.roi_size == 4" ') if camera_config.auto_exposure != "roi": sys.exit('This script is only supported on "camera_config.auto_exposure == roi" ') image_width = camera.image_width image_height = camera.image_height roi = cvui.Rect(0, 0, 0, 0) WINDOW_NAME = "Capture" cvui.init(WINDOW_NAME) click_pos_x = image_width // 2 click_pos_y = image_height // 2 while True: key = cv2.waitKey(10) frame = np.zeros((scaling(image_height), scaling(image_width), 3), np.uint8) frame[:] = (49, 52, 49) status = camera.update() if status: # WARNING:If distortion correction is enabled, the rectangle on windows doesn't indicate actual RoI area for auto exposure. see3cam_rgb_image = camera.remap_image if enable_distortion_correction else camera.image scaled_width = scaling(image_width) scaled_height = scaling(image_height) see3cam_rgb_image_resized = cv2.resize(see3cam_rgb_image, (scaled_width, scaled_height)) frame[:scaled_height, :scaled_width, :] = see3cam_rgb_image_resized window_w = image_width // 2 window_h = image_height // 2 if cvui.mouse(cvui.DOWN): click_pos_x = int(cvui.mouse().x / scale_val) click_pos_y = int(cvui.mouse().y / scale_val) camera.set_roi_properties(click_pos_x, click_pos_y, win_size=4) roi = cvui.Rect(scaling(click_pos_x - image_width // 4), scaling(click_pos_y - image_height // 4), scaling(window_w), scaling(window_h)) # Ensure ROI is within bounds roi.x = 0 if roi.x < 0 else roi.x roi.y = 0 if roi.y < 0 else roi.y roi.width = roi.width + scaled_width - (roi.x + roi.width) if roi.x + roi.width > scaled_width else roi.width roi.height = roi.height + scaled_height - (roi.y + roi.height) if roi.y + roi.height > scaled_height else roi.height cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, 0xFF0000) if key == 27 or key == ord("q"): break cvui.update() cvui.imshow(WINDOW_NAME, frame) cv2.destroyAllWindows()
def main(save_dir, laser_off): make_save_dir(save_dir) rs_mng = RealSenseManager() # default image size = (1280, 720) if laser_off: rs_mng.laser_turn_off() else: rs_mng.laser_turn_on() image_width, image_height = rs_mng.image_size res_image_width = int(image_width * 2 / 3) res_image_height = int(image_height * 2 / 3) window_image_width = int(image_width * 4 / 3) window_image_height = int(image_height) cvui.init("capture") frame = np.zeros((window_image_height, window_image_width, 3), np.uint8) captured_frame_count = count_images(save_dir) while True: key = cv2.waitKey(10) frame[:] = (49, 52, 49) status = rs_mng.update() if status: # Get Images ir_image_left = rs_mng.ir_frame_left ir_image_right = rs_mng.ir_frame_right color_image = rs_mng.color_frame depth_image = rs_mng.depth_frame depth_image_aligned2color = rs_mng.depth_frame_aligned2color # Visualize Images frame = draw_frames(frame, color_image, depth_image, res_image_width, res_image_height) if cvui.button(frame, 50, res_image_height + 50, 130, 50, "Save Result Image") or key & 0xFF == ord("s"): save_images(color_image, depth_image, depth_image_aligned2color, ir_image_left, ir_image_right, save_dir) captured_frame_count += 1 if cvui.button(frame, 200, res_image_height + 50, 130, 50, "Clear"): clean_save_dir(save_dir) captured_frame_count = 0 cvui.printf(frame, 50, res_image_height + 150, 0.8, 0x00FF00, "Number of Captured Images : %d", captured_frame_count) if key & 0xFF == ord("q"): break cvui.update() cvui.imshow("capture", frame) cv2.destroyAllWindows() del rs_mng
def main(): thresholdValue = [108] chooseInt = [0] # Size of trackbars trackbar_width = 400 #init cvui cvui.init(WINDOW_NAME, 20) ui_frame = np.zeros((480, 640, 3), np.uint8) #camera setting cap = cv2.VideoCapture(0) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480) #main loop while (True): ret, frame = cap.read() #print frame.dtype #to gray gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ui_frame[:] = (49, 52, 49) cvui.beginColumn(ui_frame, 20, 20, -1, -1, 6) cvui.text('Threshold Adject') cvui.trackbar(trackbar_width, thresholdValue, 0, 255) cvui.counter(chooseInt) cvui.space(5) cvui.text(strArr[chooseInt[0]]) if cvui.button('&Quit'): break cvui.space(5) cvui.endColumn() th1, gray = cv2.threshold(gray, thresholdValue[0], 255, chooseArr[chooseInt[0]]) merged = cv2.merge([gray, gray, gray]) dst = cv2.addWeighted(ui_frame, 1.0, merged, 0.4, 0.0) cvui.update() cv2.imshow(WINDOW_NAME, dst) cap.release() cv2.destroyAllWindows()
def windows_show(self): cvui.update() cvui.context(self.mainwindow_name) cvui.imshow(self.mainwindow_name, self.mainframe) cv2.displayStatusBar( self.mainwindow_name, "{:3.2f}pmm X:{:03d} Y:{:03d} Focus:{:05d} Contours:{:02d} Xmm:{:7.2f} Ymm{:7.2f} Color:{}" .format(self.cal_line_distance_total, self.mouse_x, self.mouse_y, int(self.focus_value), self.contours_found_quan, self.actual_mm_x, self.actual_mm_y, str(self.mouse_color)), 0)
def main(): # We have one mat for each window. frame1 = np.zeros((1024, 768, 3), np.uint8) # Create variables used by some components window1_values = [] window2_values = [] img = cv2.imread('Images/yoga.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('Images/mic.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('Images/gamb.jpg', cv2.IMREAD_COLOR) img = cv2.resize(img, (200, 200)) imgRed = cv2.resize(imgGray, (200, 200)) imgGray = cv2.resize(imgRed, (200, 200)) padding = 10 # Fill the vector with a few random values for i in range(0, 20): window1_values.append(random.uniform(0., 300.0)) window2_values.append(random.uniform(0., 300.0)) # Start two OpenCV windows cv2.namedWindow(WINDOW1_NAME) cv2.namedWindow(WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, -1, -1, 10) cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): frame = np.zeros((300, 800, 3), np.uint8) value = [2.25] values = [] # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME, 20) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # In a row, all added elements are # horizontally placed, one next the other (from left to right) # # Within the cvui.beginRow() and cvui.endRow(), # all elements will be automatically positioned by cvui. # # Notice that all component calls within the begin/end block # DO NOT have (x,y) coordinates. # # Let's create a row at position (20,80) with automatic width and height, and a padding of 10 cvui.beginRow(frame, 20, 80, -1, -1, 10); # trackbar accepts a pointer to a variable that controls their value. # Here we define a double trackbar between 0. and 5. if cvui.trackbar(150, value, 0., 5.): print('Trackbar was modified, value : ', value[0]) values.append(value[0]) if len(values) > 5: cvui.text('Your edits on a sparkline ->') cvui.sparkline(values, 240, 60) if cvui.button('Clear sparkline'): values = [] else: cvui.text('<- Move the trackbar') cvui.endRow(); # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def button_click(frame, win_name_cvui): cvui.init(win_name_cvui) h, w = frame.shape[:2] start_col = w - 100 start_row = 50 button_width = 50 button_height = 30 setting_col = start_col - 20 setting_row = start_row - 30 setting_width = button_width + 40 setting_height = button_height * 12 frame[int(h / 2 - 1):int(h / 2 + 1)][:] = [0, 255, 0] for i in range(0, h): frame[i][int(w / 2 - 1):int(w / 2 + 1)] = [0, 255, 0] object = "undefined" while True: cvui.window(frame, setting_col, setting_row, setting_width, setting_height, "Setting") if (cvui.button(frame, start_col, start_row, button_width, button_height, "Ball")): object = "B" print("ball clicked") if (cvui.button(frame, start_col, start_row + 50, button_width, button_height, "Rubick")): object = "R" print("rubick clicked") if (cvui.button(frame, start_col, start_row + 100, button_width, button_height, "Up")): object = "U" print("Up clicked") if (cvui.button(frame, start_col, start_row + 150, button_width, button_height, "Down")): object = "D" print("Down clicked") if (cvui.button(frame, start_col, start_row + 250, button_width, button_height, "Send")): object = "S" print("Send clicked") cvui.update() cv.namedWindow(win_name_cvui, cv.WINDOW_NORMAL) cv.imshow(win_name_cvui, frame) cv.waitKey(0)
def main(toml_path, directory_for_save, config_name, rgb_rate, scale_for_visualization): rgb_manager = RGBCaptureManager(toml_path) inference = create_inference(config_name) width, height = rgb_manager.size scaling = partial(scaling_int, scale=scale_for_visualization) width_resized = scaling(width) height_resized = scaling(height) frame = np.zeros((height_resized + 300, width_resized * 2, 3), np.uint8) WINDOW_NAME = "Capture" cvui.init(WINDOW_NAME) while True: frame[:] = (49, 52, 49) key = cv2.waitKey(10) status = rgb_manager.update() if not status: continue rgb_image_raw = rgb_manager.read() rgb_image_masked = get_masked_image_with_segmentation( rgb_image_raw, rgb_manager, inference, rgb_rate) number_of_saved_frame = get_number_of_saved_image(directory_for_save) cvui.printf(frame, 50, height_resized + 50, 0.8, 0x00FF00, "Number of Captured Images : %d", number_of_saved_frame) if cvui.button(frame, 50, height_resized + 110, 200, 100, "capture image") or key & 0xFF == ord("s"): save_image(rgb_image_raw, directory_for_save) if cvui.button(frame, 300, height_resized + 110, 200, 100, "erase"): clean_save_dir(directory_for_save) rgb_image_resized = cv2.resize(rgb_image_raw, (width_resized, height_resized)) masked_image_resized = cv2.resize(rgb_image_masked, (width_resized, height_resized)) frame[0:height_resized, 0:width_resized, :] = rgb_image_resized frame[0:height_resized, width_resized:(width_resized * 2), :] = masked_image_resized if key == 27 or key == ord("q"): break cvui.update() cvui.imshow(WINDOW_NAME, frame) cv2.destroyAllWindows()
def main(): frame = np.zeros((300, 600, 3), np.uint8) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Render a rectangle on the screen. rectangle = cvui.Rect(50, 50, 100, 100) cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width, rectangle.height, 0xff0000) # Check what is the current status of the mouse cursor # regarding the previously rendered rectangle. status = cvui.iarea(rectangle.x, rectangle.y, rectangle.width, rectangle.height) # cvui::iarea() will return the current mouse status: # CLICK: mouse just clicked the interaction are # DOWN: mouse button was pressed on the interaction area, but not released yet. # OVER: mouse cursor is over the interaction area # OUT: mouse cursor is outside the interaction area if status == cvui.CLICK: print('Rectangle was clicked!') if status == cvui.DOWN: cvui.printf(frame, 240, 70, "Mouse is: DOWN") if status == cvui.OVER: cvui.printf(frame, 240, 70, "Mouse is: OVER") if status == cvui.OUT: cvui.printf(frame, 240, 70, "Mouse is: OUT") # Show the coordinates of the mouse pointer on the screen cvui.printf(frame, 240, 50, "Mouse pointer is at (%d,%d)", cvui.mouse().x, cvui.mouse().y) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): fruits = cv2.imread('fruits.jpg', cv2.IMREAD_COLOR) frame = np.zeros(fruits.shape, np.uint8) low_threshold = [50] high_threshold = [150] use_canny = [False] # Create a settings window using the EnhancedWindow class. settings = EnhancedWindow(10, 50, 270, 180, 'Settings') # Init cvui and tell it to create a OpenCV window, i.e. cv2.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Should we apply Canny edge? if use_canny[0]: # Yes, we should apply it. frame = cv2.cvtColor(fruits, cv2.COLOR_BGR2GRAY) frame = cv2.Canny(frame, low_threshold[0], high_threshold[0], 3) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) else: # No, so just copy the original image to the displaying frame. frame[:] = fruits[:] # Render the settings window and its content, if it is not minimized. settings.begin(frame) if settings.isMinimized() == False: cvui.checkbox('Use Canny Edge', use_canny) cvui.trackbar(settings.width() - 20, low_threshold, 5, 150) cvui.trackbar(settings.width() - 20, high_threshold, 80, 300) cvui.space(20); # add 20px of empty space cvui.text('Drag and minimize this settings window', 0.4, 0xff0000) settings.end() # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def user_interface(self, global_image): handsign=global_image.copy() cvui.checkbox(global_image, 0,50, "handsign", self.handsign_state) cvui.checkbox(global_image, 0,75, "start recording", self.record) x=cv2.getTrackbarPos("x", "Control") y=cv2.getTrackbarPos("y", "Control") w=cv2.getTrackbarPos("w", "Control") h=cv2.getTrackbarPos("h", "Control") if self.handsign_state[0] == True: self.handsign_capture(handsign, global_image, x,y,w,h) if self.record[0] == True: self.record_images(handsign) #cv2.destroyWindow("Image") cv2.imshow("Control", global_image) cvui.update()
def main(): frame = np.zeros((600, 800, 3), np.uint8) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) # Load some data points from a file points = load('sparkline.csv') # Create less populated sets few_points = [] no_points = [] for i in range(0, 30): few_points.append(random.uniform(0., 300.0)) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Add 3 sparklines that are displaying the same data, but with # different width/height/colors. cvui.sparkline(frame, points, 0, 0, 800, 200); cvui.sparkline(frame, points, 0, 200, 800, 100, 0xff0000); cvui.sparkline(frame, points, 0, 300, 400, 100, 0x0000ff); # Add a sparkline with few points cvui.sparkline(frame, few_points, 10, 400, 790, 80, 0xff00ff); # Add a sparkline that has no data. In that case, cvui will # render it with a visual warning. cvui.sparkline(frame, no_points, 10, 500, 750, 100, 0x0000ff); # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): lena = cv2.imread('lena.jpg', cv2.IMREAD_COLOR) frame = np.zeros(lena.shape, np.uint8) low_threshold = [50] high_threshold = [150] use_canny = [False] # Init cvui and tell it to create a OpenCV window, i.e. cv2.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Should we apply Canny edge? if use_canny[0]: # Yes, we should apply it. frame = cv2.cvtColor(lena, cv2.COLOR_BGR2GRAY) frame = cv2.Canny(frame, low_threshold[0], high_threshold[0], 3) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) else: # No, so just copy the original image to the displaying frame. frame[:] = lena[:] # Render the settings window to house the checkbox # and the trackbars below. cvui.window(frame, 10, 50, 180, 180, 'Settings') # Checkbox to enable/disable the use of Canny edge cvui.checkbox(frame, 15, 80, 'Use Canny Edge', use_canny) # Two trackbars to control the low and high threshold values # for the Canny edge algorithm. cvui.trackbar(frame, 15, 110, 165, low_threshold, 5, 150) cvui.trackbar(frame, 15, 180, 165, high_threshold, 80, 300) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): frame = np.zeros((300, 600, 3), np.uint8) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Render a rectangle on the screen. rectangle = cvui.Rect(50, 50, 100, 100) cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width, rectangle.height, 0xff0000) # Check what is the current status of the mouse cursor # regarding the previously rendered rectangle. status = cvui.iarea(rectangle.x, rectangle.y, rectangle.width, rectangle.height); # cvui::iarea() will return the current mouse status: # CLICK: mouse just clicked the interaction are # DOWN: mouse button was pressed on the interaction area, but not released yet. # OVER: mouse cursor is over the interaction area # OUT: mouse cursor is outside the interaction area if status == cvui.CLICK: print('Rectangle was clicked!') if status == cvui.DOWN: cvui.printf(frame, 240, 70, "Mouse is: DOWN") if status == cvui.OVER: cvui.printf(frame, 240, 70, "Mouse is: OVER") if status == cvui.OUT: cvui.printf(frame, 240, 70, "Mouse is: OUT") # Show the coordinates of the mouse pointer on the screen cvui.printf(frame, 240, 50, "Mouse pointer is at (%d,%d)", cvui.mouse().x, cvui.mouse().y) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def fizeau(frame, expr): curr_expr = expr experiments = [[False] for i in range(2)] fizeau_default = cv2.imread(resource_path("images/fizeau_default.png"), 3) fizeau_not_default = cv2.imread(resource_path("images/fizeau_not_default.png"), 3) fizeau_line = cv2.imread(resource_path("images/fizeau_line.png"), 3) fizeau_distance = cv2.imread(resource_path("images/fizeau_distance.png"), 3) fizeau_dot = cv2.imread(resource_path("images/fizeau_dot.png"), cv2.IMREAD_UNCHANGED) fizeau_dot_grey = cv2.imread(resource_path("images/fizeau_dot_grey.png"), cv2.IMREAD_UNCHANGED) fizeau_sprocket = cv2.imread(resource_path("images/fizeau_sprocket.png"), cv2.IMREAD_UNCHANGED) fizeau_metre = cv2.imread(resource_path("images/fizeau_metre.png"), cv2.IMREAD_UNCHANGED) fizeau_blank = cv2.imread(resource_path("images/fizeau_blank.png"), cv2.IMREAD_UNCHANGED) fizeau_darkstripes = cv2.imread(resource_path("images/fizeau_darkstripes.png"), cv2.IMREAD_UNCHANGED) fizeau_lightstripes = cv2.imread(resource_path("images/fizeau_lightstripes.png"), cv2.IMREAD_UNCHANGED) wheel_sprites_pos = get_120(fizeau_default, fizeau_sprocket) wheel_sprites_pos = put_dots_pos(wheel_sprites_pos, fizeau_dot, fizeau_dot_grey) wheel_sprites_pos.rotate(30) wheel_sprites_pos = put_dots_pos(wheel_sprites_pos, fizeau_dot, fizeau_dot_grey) wheel_sprites_pos.rotate(30) wheel_sprites_pos = put_dots_pos(wheel_sprites_pos, fizeau_dot, fizeau_dot_grey) wheel_sprites_pos.rotate(30) wheel_sprites_pos = put_dots_pos(wheel_sprites_pos, fizeau_dot, fizeau_dot_grey) wheel_sprites_neg = get_120(fizeau_not_default, fizeau_sprocket) wheel_sprites_neg = put_dots_neg(wheel_sprites_neg, fizeau_dot, fizeau_dot_grey) wheel_sprites_neg.rotate(30) wheel_sprites_neg = put_dots_neg(wheel_sprites_neg, fizeau_dot, fizeau_dot_grey) wheel_sprites_neg.rotate(30) wheel_sprites_neg = put_dots_neg(wheel_sprites_neg, fizeau_dot, fizeau_dot_grey) wheel_sprites_neg.rotate(30) wheel_sprites_neg = put_dots_neg(wheel_sprites_neg, fizeau_dot, fizeau_dot_grey) animation = 0 frequency_tr = [6.0] while (True): frame[:] = (49,52,49) #Animation window coord = get_coord(frequency_tr[0]) if coord == 23: cvui.image(frame, 0, 0, wheel_sprites_neg[animation]) cvui.text(frame, 33, 200, "Calculation of the speed of light", 0.5) cvui.text(frame, 34, 220, "distance ... s = {:,} m" .format(8633), 0.5) cvui.text(frame, 34, 240, "number of teeth in sprocket ... N = 720", 0.5) cvui.text(frame, 34, 260, "c=4*s*N*f=4*{:,}*{:,}*{:,}={:,} km/s" .format(8633, 7200, round(frequency_tr[0], 2), round((4 * 8633 * 7200 * frequency_tr[0])/1000, 2)), 0.5) else: cvui.image(frame, 0, 0, wheel_sprites_pos[animation]) cvui.text(frame, 33, 200, "Calculation of the speed of light", 0.5) cvui.text(frame, 34, 220, "distance ... s = {:,} m" .format(8633), 0.5) cvui.text(frame, 34, 240, "number of teeth in sprocket ... N = 720", 0.5) cvui.text(frame, 34, 260, "c=4*s*N*f ... proper frequency wasn't found", 0.5) cvui.image(frame, 872, 499, fizeau_line) cvui.image(frame, 874, 687, fizeau_distance) animation += 1 if animation == 120: animation = 0 cvui.text(frame, 58, 660, "Light source") cvui.text(frame, 473, 660, "Semipermeable mirror") cvui.text(frame, 840, 660, "Sprocket") cvui.text(frame, 994, 673, "Distance") cvui.text(frame, 1145, 660, "Mirror") cvui.text(frame, 892, 277, "-> Light behind sprocket") img = get_graph(fizeau_metre, fizeau_blank, fizeau_darkstripes, fizeau_lightstripes, coord=0, crop_light=False) cvui.image(frame, 896, 298, img) cvui.text(frame, 892, 377, "<- Reflected light before sprocket") img = get_graph(fizeau_metre, fizeau_blank, fizeau_darkstripes, fizeau_lightstripes, coord=coord, crop_light=False) cvui.image(frame, 896, 397, img) cvui.text(frame, 434, 100, "Reflected light behind sprocket") img = get_graph(fizeau_metre, fizeau_blank, fizeau_darkstripes, fizeau_lightstripes, coord=coord, crop_light=True) cvui.image(frame, 434, 120, img) #Experiment settings window cvui.window(frame, 1033.5, 2, 243.5, 104, 'Experiment settings') cvui.trackbar(frame, 1030, 39, 249, frequency_tr, 0.01, 12.0577) cvui.rect(frame, 1035, 39, 240, 12, 0x313131, 0x313131) cvui.rect(frame, 1035, 74, 240, 25, 0x313131, 0x313131) cvui.text(frame, 1041, 32, "Frequency") cvui.text(frame, 1042, 82, "{:,} Hz".format(round(frequency_tr[0], 2))) #Experiments window cvui.window(frame, 2, 2, 155, 75, 'Experiments') cvui.checkbox(frame, 10, 30, "1638 - Galileo", experiments[0]) cvui.checkbox(frame, 10, 53, "1849 - Fizeau", experiments[1]) curr_expr = exp_type(curr_expr, experiments) experiments = [[False] for i in range(2)] experiments[curr_expr] = [True] cvui.update() cv2.imshow('Speed of Light Measurement', frame) if cv2.waitKey(20) == 27: return -1 if curr_expr != expr: return curr_expr
def main(): lena = cv2.imread('lena.jpg') frame = np.zeros(lena.shape, np.uint8) anchors = [cvui.Point() for i in range(3)] # one anchor for each mouse button rois = [cvui.Rect() for i in range(3)] # one ROI for each mouse button colors = [0xff0000, 0x00ff00, 0x0000ff] # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with Lena's image frame[:] = lena[:] # Show the coordinates of the mouse pointer on the screen cvui.text(frame, 10, 10, 'Click (any) mouse button then drag the pointer around to select a ROI.') cvui.text(frame, 10, 25, 'Use different mouse buttons (right, middle and left) to select different ROIs.') # Iterate all mouse buttons (left, middle and right button) button = cvui.LEFT_BUTTON while button <= cvui.RIGHT_BUTTON: # Get the anchor, ROI and color associated with the mouse button anchor = anchors[button] roi = rois[button] color = colors[button] # The function 'bool cvui.mouse(int button, int query)' allows you to query a particular mouse button for events. # E.g. cvui.mouse(cvui.RIGHT_BUTTON, cvui.DOWN) # # Available queries: # - cvui.DOWN: mouse button was pressed. cvui.mouse() returns true for single frame only. # - cvui.UP: mouse button was released. cvui.mouse() returns true for single frame only. # - cvui.CLICK: mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only. # - cvui.IS_DOWN: mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed. # Did the mouse button go down? if cvui.mouse(button, cvui.DOWN): # Position the anchor at the mouse pointer. anchor.x = cvui.mouse().x anchor.y = cvui.mouse().y # Is any mouse button down (pressed)? if cvui.mouse(button, cvui.IS_DOWN): # Adjust roi dimensions according to mouse pointer width = cvui.mouse().x - anchor.x height = cvui.mouse().y - anchor.y roi.x = anchor.x + width if width < 0 else anchor.x roi.y = anchor.y + height if height < 0 else anchor.y roi.width = abs(width) roi.height = abs(height) # Show the roi coordinates and size cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, color, '(%d,%d)', roi.x, roi.y) cvui.printf(frame, cvui.mouse().x + 5, cvui.mouse().y + 5, 0.3, color, 'w:%d, h:%d', roi.width, roi.height) # Ensure ROI is within bounds lenaRows, lenaCols, lenaChannels = lena.shape roi.x = 0 if roi.x < 0 else roi.x roi.y = 0 if roi.y < 0 else roi.y roi.width = roi.width + lenaCols - (roi.x + roi.width) if roi.x + roi.width > lenaCols else roi.width roi.height = roi.height + lenaRows - (roi.y + roi.height) if roi.y + roi.height > lenaRows else roi.height # If the ROI is valid, render it in the frame and show in a window. if roi.area() > 0: cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, color) cvui.printf(frame, roi.x + 5, roi.y - 10, 0.3, color, 'ROI %d', button) lenaRoi = lena[roi.y : roi.y + roi.height, roi.x : roi.x + roi.width] cv2.imshow('ROI button' + str(button), lenaRoi) button += 1 # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): # We have one mat for each window. frame1 = np.zeros((150, 600, 3), np.uint8) frame2 = np.zeros((150, 600, 3), np.uint8) error_frame = np.zeros((100, 300, 3), np.uint8) # Flag to control if we should show an error window. error = False # Create two OpenCV windows cv2.namedWindow(GUI_WINDOW1_NAME) cv2.namedWindow(GUI_WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(GUI_WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(GUI_WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(GUI_WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginColumn(frame1, 50, 20, -1, -1, 10) cvui.text('[Win1] Use the buttons below to control the error window') if cvui.button('Close'): closeWindow(ERROR_WINDOW_NAME) # If the button is clicked, we open the error window. # The content and rendering of such error window will be performed # after we handled all other windows. if cvui.button('Open'): error = True openWindow(ERROR_WINDOW_NAME) cvui.endColumn() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(GUI_WINDOW1_NAME) cv2.imshow(GUI_WINDOW1_NAME, frame1) # From this point on, we are going to render the second window. We need to inform cvui # that all updates and components from now on are connected to window 2. # We do that by calling cvui.context(). cvui.context(GUI_WINDOW2_NAME) frame2[:] = (49, 52, 49) cvui.beginColumn(frame2, 50, 20, -1, -1, 10) cvui.text('[Win2] Use the buttons below to control the error window') if cvui.button('Close'): closeWindow(ERROR_WINDOW_NAME) # If the button is clicked, we open the error window. # The content and rendering of such error window will be performed # after we handled all other windows. if cvui.button('Open'): openWindow(ERROR_WINDOW_NAME) error = True cvui.endColumn() # Update all components of window2, e.g. mouse clicks, and show it. cvui.update(GUI_WINDOW2_NAME) cv2.imshow(GUI_WINDOW2_NAME, frame2) # Handle the content and rendering of the error window, # if we have un active error and the window is actually open. if error and isWindowOpen(ERROR_WINDOW_NAME): # Inform cvui that all subsequent component calls and events are # related to the error window from now on cvui.context(ERROR_WINDOW_NAME) # Fill the error window if a vibrant color error_frame[:] = (10, 10, 49) cvui.text(error_frame, 70, 20, 'This is an error message', 0.4, 0xff0000) if cvui.button(error_frame, 110, 40, 'Close'): error = False if error: # We still have an active error. # Update all components of the error window, e.g. mouse clicks, and show it. cvui.update(ERROR_WINDOW_NAME) cv2.imshow(ERROR_WINDOW_NAME, error_frame) else: # No more active error. Let's close the error window. closeWindow(ERROR_WINDOW_NAME) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): lena = cv2.imread('lena.jpg') frame = np.zeros(lena.shape, np.uint8) anchor = cvui.Point() roi = cvui.Rect(0, 0, 0, 0) working = False # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with Lena's image frame[:] = lena[:] # Show the coordinates of the mouse pointer on the screen cvui.text( frame, 10, 10, 'Click (any) mouse button and drag the pointer around to select a ROI.' ) # The function 'bool cvui.mouse(int query)' allows you to query the mouse for events. # E.g. cvui.mouse(cvui.DOWN) # # Available queries: # - cvui.DOWN: any mouse button was pressed. cvui.mouse() returns true for single frame only. # - cvui.UP: any mouse button was released. cvui.mouse() returns true for single frame only. # - cvui.CLICK: any mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only. # - cvui.IS_DOWN: any mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed. # Did any mouse button go down? if cvui.mouse(cvui.DOWN): # Position the anchor at the mouse pointer. anchor.x = cvui.mouse().x anchor.y = cvui.mouse().y # Inform we are working, so the ROI window is not updated every frame working = True # Is any mouse button down (pressed)? if cvui.mouse(cvui.IS_DOWN): # Adjust roi dimensions according to mouse pointer width = cvui.mouse().x - anchor.x height = cvui.mouse().y - anchor.y roi.x = anchor.x + width if width < 0 else anchor.x roi.y = anchor.y + height if height < 0 else anchor.y roi.width = abs(width) roi.height = abs(height) # Show the roi coordinates and size cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, 0xff0000, '(%d,%d)', roi.x, roi.y) cvui.printf(frame, cvui.mouse().x + 5, cvui.mouse().y + 5, 0.3, 0xff0000, 'w:%d, h:%d', roi.width, roi.height) # Was the mouse clicked (any button went down then up)? if cvui.mouse(cvui.UP): # We are done working with the ROI. working = False # Ensure ROI is within bounds lenaRows, lenaCols, lenaChannels = lena.shape roi.x = 0 if roi.x < 0 else roi.x roi.y = 0 if roi.y < 0 else roi.y roi.width = roi.width + lena.cols - ( roi.x + roi.width) if roi.x + roi.width > lenaCols else roi.width roi.height = roi.height + lena.rows - ( roi.y + roi.height) if roi.y + roi.height > lenaRows else roi.height # Render the roi cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, 0xff0000) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # If the ROI is valid, show it. if roi.area() > 0 and working == False: lenaRoi = lena[roi.y:roi.y + roi.height, roi.x:roi.x + roi.width] cv2.imshow(ROI_WINDOW, lenaRoi) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): lena = cv2.imread('lena.jpg', cv2.IMREAD_COLOR) frame = np.zeros(lena.shape, np.uint8) doubleBuffer = np.zeros(lena.shape, np.uint8) trackbarWidth = 130 # Adjustments values for RGB and HSV rgb = [[1.], [1.], [1]] hsv = [[1.], [1.], [1]] # Copy the loaded image to the buffer doubleBuffer[:] = lena[:] # Init cvui and tell it to use a value of 20 for cv2.waitKey() # because we want to enable keyboard shortcut for # all components, e.g. button with label '&Quit'. # If cvui has a value for waitKey, it will call # waitKey() automatically for us within cvui.update(). cvui.init(WINDOW_NAME, 20) while (True): frame[:] = doubleBuffer[:] frameRows, frameCols, frameChannels = frame.shape # Exit the application if the quit button was pressed. # It can be pressed because of a mouse click or because # the user pressed the 'q' key on the keyboard, which is # marked as a shortcut in the button label ('&Quit'). if cvui.button(frame, frameCols - 100, frameRows - 30, '&Quit'): break # RGB HUD cvui.window(frame, 20, 50, 180, 240, 'RGB adjust') # Within the cvui.beginColumns() and cvui.endColumn(), # all elements will be automatically positioned by cvui. # In a columns, all added elements are vertically placed, # one under the other (from top to bottom). # # Notice that all component calls within the begin/end block # below DO NOT have (x,y) coordinates. # # Let's create a row at position (35,80) with automatic # width and height, and a padding of 10 cvui.beginColumn(frame, 35, 80, -1, -1, 10) rgbModified = False # Trackbar accept a pointer to a variable that controls their value # They return true upon edition if cvui.trackbar(trackbarWidth, rgb[0], 0., 2., 2, '%3.02Lf'): rgbModified = True if cvui.trackbar(trackbarWidth, rgb[1], 0., 2., 2, '%3.02Lf'): rgbModified = True if cvui.trackbar(trackbarWidth, rgb[2], 0., 2., 2, '%3.02Lf'): rgbModified = True cvui.space(2) cvui.printf(0.35, 0xcccccc, ' RGB: %3.02lf,%3.02lf,%3.02lf', rgb[0][0], rgb[1][0], rgb[2][0]) if (rgbModified): b, g, r = cv2.split(lena) b = b * rgb[2][0] g = g * rgb[1][0] r = r * rgb[0][0] cv2.merge((b, g, r), doubleBuffer) cvui.endColumn() # HSV lenaRows, lenaCols, lenaChannels = lena.shape cvui.window(frame, lenaCols - 200, 50, 180, 240, 'HSV adjust') cvui.beginColumn(frame, lenaCols - 180, 80, -1, -1, 10) hsvModified = False if cvui.trackbar(trackbarWidth, hsv[0], 0., 2., 2, '%3.02Lf'): hsvModified = True if cvui.trackbar(trackbarWidth, hsv[1], 0., 2., 2, '%3.02Lf'): hsvModified = True if cvui.trackbar(trackbarWidth, hsv[2], 0., 2., 2, '%3.02Lf'): hsvModified = True cvui.space(2) cvui.printf(0.35, 0xcccccc, ' HSV: %3.02lf,%3.02lf,%3.02lf', hsv[0][0], hsv[1][0], hsv[2][0]) if hsvModified: hsvMat = cv2.cvtColor(lena, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsvMat) h = h * hsv[0][0] s = s * hsv[1][0] v = v * hsv[2][0] cv2.merge((h, s, v), hsvMat) doubleBuffer = cv2.cvtColor(hsvMat, cv2.COLOR_HSV2BGR) cvui.endColumn() # Display the lib version at the bottom of the screen cvui.printf(frame, frameCols - 300, frameRows - 20, 0.4, 0xCECECE, 'cvui v.%s', cvui.VERSION) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. # # Since cvui.init() received a param regarding waitKey, # there is no need to call cv2.waitKey() anymore. cvui.update() # will do it automatically. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame)
def coord_get_video(path): a, b = [0], [0] main_ui = "mainui" win_name_cvui = "cvui" frame = None def on_EVENT_LBUTTONDOWN(event, x, y, flags, param): if event == cv.EVENT_LBUTTONDOWN: xy = "%d,%d" % (x, y) a.append(x) b.append(y) cv.circle(frame, (x, y), 1, (0, 0, 255), thickness=-1) cv.putText(frame, xy, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=1) cv.imshow("image", frame) print(a[-1], b[-1]) return a, b cap = cv.VideoCapture(path) cvui.init(win_name_cvui) while True: ret, frame = cap.read() h, w = frame.shape[:2] start_col = w - 100 start_row = 50 button_width = 50 button_height = 30 setting_col = start_col - 20 setting_row = start_row - 30 setting_width = button_width + 40 setting_height = button_height * 12 object = "undefined" frame[int(h / 2 - 1):int(h / 2 + 1)][:] = [0, 255, 0] for i in range(0, h): frame[i][int(w / 2 - 1):int(w / 2 + 1)] = [0, 255, 0] cvui.window(frame, setting_col, setting_row, setting_width, setting_height, "Setting") if (cvui.button(frame, start_col, start_row, button_width, button_height, "Ball")): object = "B" print("ball clicked") if (cvui.button(frame, start_col, start_row + 50, button_width, button_height, "Rubick")): object = "R" print("rubick clicked") if (cvui.button(frame, start_col, start_row + 100, button_width, button_height, "Up")): object = "U" print("Up clicked") if (cvui.button(frame, start_col, start_row + 150, button_width, button_height, "Down")): object = "D" print("Down clicked") if (cvui.button(frame, start_col, start_row + 250, button_width, button_height, "Send")): object = "S" print("Send clicked") cvui.update() cv.namedWindow(win_name_cvui, cv.WINDOW_NORMAL) cv.imshow(win_name_cvui, frame) cv.namedWindow("image", cv.WINDOW_NORMAL) cv.setMouseCallback("image", on_EVENT_LBUTTONDOWN) cv.imshow("image", frame) print(a[-1], b[-1], "ss") if cv.waitKey(300) & 0xff == 27: cv.destroyAllWindows() break
def main(): global TOML_PATH_ZENSE global DATA_SAVE_DIR global WINDOW_NAME global IMAGE_WIDTH global IMAGE_HEIGHT if not os.path.exists(DATA_SAVE_DIR): os.mkdir(DATA_SAVE_DIR) if not os.path.exists(osp.join(DATA_SAVE_DIR, "depth")): os.mkdir(osp.join(DATA_SAVE_DIR, "depth")) if not os.path.exists(osp.join(DATA_SAVE_DIR, "color")): os.mkdir(osp.join(DATA_SAVE_DIR, "color")) number_of_saved_frame = len( glob.glob(osp.join(DATA_SAVE_DIR, "depth", "*.png"))) zense_mng = PyPicoZenseManager(0) cvui.init(WINDOW_NAME) key = cv2.waitKey(20) while ((key & 0xFF != ord('q')) or (key & 0xFF != 27)): status = zense_mng.update() if status: rgb_img = zense_mng.getRGBImage() depth_img = zense_mng.getDepthImage() rgb_img_resized = cv2.resize(rgb_img, (IMAGE_WIDTH, IMAGE_HEIGHT)) depth_img_colorized = np.zeros([IMAGE_HEIGHT, IMAGE_WIDTH, 3]).astype(np.uint8) depth_img_colorized[:, :, 1] = 255 depth_img_colorized[:, :, 2] = 255 _depth_img_zense_hue = depth_img.copy().astype(np.float32) _depth_img_zense_hue[np.where(_depth_img_zense_hue > 2000)] = 0 zero_idx = np.where((_depth_img_zense_hue > 2000) | (_depth_img_zense_hue == 0)) _depth_img_zense_hue *= 255.0 / 2000.0 depth_img_colorized[:, :, 0] = _depth_img_zense_hue.astype(np.uint8) depth_img_colorized = cv2.cvtColor(depth_img_colorized, cv2.COLOR_HSV2RGB) depth_img_colorized[zero_idx[0], zero_idx[1], :] = 0 frame = np.zeros((IMAGE_HEIGHT * 2, IMAGE_WIDTH * 2, 3), np.uint8) frame[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH, :] = rgb_img_resized frame[0:IMAGE_HEIGHT, IMAGE_WIDTH:IMAGE_WIDTH * 2, :] = depth_img_colorized cvui.printf(frame, 50, IMAGE_HEIGHT + 50, 0.8, 0x00ff00, "Number of Captured Images : %d", number_of_saved_frame) if (cvui.button(frame, 100, IMAGE_HEIGHT + 100, 200, 100, "Capture")) or (key & 0xFF == ord('s')): cv2.imwrite( osp.join(DATA_SAVE_DIR, "depth", "%06d.png" % (number_of_saved_frame)), depth_img) cv2.imwrite( osp.join(DATA_SAVE_DIR, "color", "%06d.png" % (number_of_saved_frame)), rgb_img) number_of_saved_frame += 1 if cvui.button(frame, 350, IMAGE_HEIGHT + 100, 200, 100, "Erase Images"): shutil.rmtree(osp.join(DATA_SAVE_DIR, "depth")) os.mkdir(osp.join(DATA_SAVE_DIR, "depth")) shutil.rmtree(osp.join(DATA_SAVE_DIR, "color")) os.mkdir(osp.join(DATA_SAVE_DIR, "color")) number_of_saved_frame = 0 cvui.update() cv2.imshow(WINDOW_NAME, frame) key = cv2.waitKey(20) if key == 27: break cv2.destroyAllWindows()
def main(): frame = np.zeros((300, 600, 3), np.uint8) checked = [False] checked2 = [True] count = [0] countFloat = [0.0] trackbarValue = [0.0] # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Show some pieces of text. cvui.text(frame, 50, 30, 'Hey there!') # You can also specify the size of the text and its color # using hex 0xRRGGBB CSS-like style. cvui.text(frame, 200, 30, 'Use hex 0xRRGGBB colors easily', 0.4, 0xff0000) # Sometimes you want to show text that is not that simple, e.g. strings + numbers. # You can use cvui.printf for that. It accepts a variable number of parameter, pretty # much like printf does. cvui.printf(frame, 200, 50, 0.4, 0x00ff00, 'Use printf formatting: %d + %.2f = %f', 2, 3.2, 5.2) # Buttons will return true if they were clicked, which makes # handling clicks a breeze. if cvui.button(frame, 50, 60, 'Button'): print('Button clicked') # If you do not specify the button width/height, the size will be # automatically adjusted to properly house the label. cvui.button(frame, 200, 70, 'Button with large label') # You can tell the width and height you want cvui.button(frame, 410, 70, 15, 15, 'x') # Window components are useful to create HUDs and similars. At the # moment, there is no implementation to constraint content within a # a window. cvui.window(frame, 50, 120, 120, 100, 'Window') # The counter component can be used to alter int variables. Use # the 4th parameter of the function to point it to the variable # to be changed. cvui.counter(frame, 200, 120, count) # Counter can be used with doubles too. You can also specify # the counter's step (how much it should change # its value after each button press), as well as the format # used to print the value. cvui.counter(frame, 320, 120, countFloat, 0.1, '%.1f') # The trackbar component can be used to create scales. # It works with all numerical types (including chars). cvui.trackbar(frame, 420, 110, 150, trackbarValue, 0., 50.) # Checkboxes also accept a pointer to a variable that controls # the state of the checkbox (checked or not). cvui.checkbox() will # automatically update the value of the boolean after all # interactions, but you can also change it by yourself. Just # do "checked = [True]" somewhere and the checkbox will change # its appearance. cvui.checkbox(frame, 200, 160, 'Checkbox', checked) cvui.checkbox(frame, 200, 190, 'A checked checkbox', checked2) # Display the lib version at the bottom of the screen cvui.printf(frame, 600 - 80, 300 - 20, 0.4, 0xCECECE, 'cvui v.%s', cvui.VERSION) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): frame = np.zeros((600, 800, 3), np.uint8) values = [] checked = [False] value = [1.0] # Fill the vector with a few random values for i in range(0, 20): values.append(random.uniform(0., 300.0)) # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # Define a row at position (10, 50) with width 100 and height 150. cvui.beginRow(frame, 10, 50, 100, 150) # The components below will be placed one beside the other. cvui.text('Row starts') cvui.button('here') # When a column or row is nested within another, it behaves like # an ordinary component with the specified size. In this case, # let's create a column with width 100 and height 50. The # next component added will behave like it was added after # a component with width 100 and heigth 150. cvui.beginColumn(100, 150) cvui.text('Column 1') cvui.button('button1') cvui.button('button2') cvui.button('button3') cvui.text('End of column 1') cvui.endColumn() # Add two pieces of text cvui.text('Hi again,') cvui.text('its me!') # Start a new column cvui.beginColumn(100, 50) cvui.text('Column 2') cvui.button('button1') cvui.button('button2') cvui.button('button3') cvui.space() cvui.text('Another text') cvui.space(40) cvui.text('End of column 2') cvui.endColumn() # Add more text cvui.text('this is the ') cvui.text('end of the row!') cvui.endRow() # Here is another nested row/column cvui.beginRow(frame, 50, 300, 100, 150) # If you don't want to calculate the size of any row/column WITHIN # a begin*()/end*() block, just use negative width/height when # calling beginRow() or beginColumn() (or don't provide width/height at all!) # For instance, the following column will have its width/height # automatically adjusted according to its content. cvui.beginColumn() cvui.text('Column 1') cvui.button('button with very large label') cvui.text('End of column 1') cvui.endColumn() # Add two pieces of text cvui.text('Hi again,') cvui.text('its me!') # Start a new column cvui.beginColumn() cvui.text('Column 2') cvui.button('btn') cvui.space() cvui.text('text') cvui.button('btn2') cvui.text('text2') if cvui.button('&Quit'): break cvui.endColumn() # Add more text cvui.text('this is the ') cvui.text('end of the row!') cvui.endRow() # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): # We have one mat for each window. frame1 = np.zeros((600, 800, 3), np.uint8) frame2 = np.zeros((600, 800, 3), np.uint8) # Create variables used by some components window1_values = [] window2_values = [] window1_checked = [False] window1_checked2 = [False] window2_checked = [False] window2_checked2 = [False] window1_value = [1.0] window1_value2 = [1.0] window1_value3 = [1.0] window2_value = [1.0] window2_value2 = [1.0] window2_value3 = [1.0] img = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR) padding = 10 # Fill the vector with a few random values for i in range(0, 20): window1_values.append(random.uniform(0., 300.0)) window2_values.append(random.uniform(0., 300.0)) # Start two OpenCV windows cv2.namedWindow(WINDOW1_NAME) cv2.namedWindow(WINDOW2_NAME) # Init cvui and inform it to use the first window as the default one. # cvui.init() will automatically watch the informed window. cvui.init(WINDOW1_NAME) # Tell cvui to keep track of mouse events in window2 as well. cvui.watch(WINDOW2_NAME) while (True): # Inform cvui that all subsequent component calls and events are related to window 1. cvui.context(WINDOW1_NAME) # Fill the frame with a nice color frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', window1_checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000) cvui.sparkline(window1_values, 50, 50) cvui.counter(window1_value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() padding = 50 cvui.beginRow(frame1, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', window1_checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px paddin7hg.') cvui.endRow() cvui.beginRow(frame1, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') cvui.trackbar(150, window1_value2, 0., 5.) cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() cvui.beginColumn(frame1, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() padding = 10 cvui.beginColumn(frame1, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') cvui.trackbar(150, window1_value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25) cvui.text('End of column 2') cvui.endColumn() cvui.beginColumn(frame1, 550, 330, 100, 200) cvui.text('Column 3 (use space)') cvui.space(5) cvui.button('button1 5px below') cvui.space(50) cvui.text('Text 50px below') cvui.space(20) cvui.button('Button 20px below') cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # Update all components of window1, e.g. mouse clicks, and show it. cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) # From this point on, we are going to render the second window. We need to inform cvui # that all updates and components from now on are connected to window 2. # We do that by calling cvui.context(). cvui.context(WINDOW2_NAME) frame2[:] = (49, 52, 49) cvui.beginRow(frame2, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', window2_checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000) cvui.sparkline(window2_values, 50, 50) cvui.counter(window2_value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() padding = 50 cvui.beginRow(frame2, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', window2_checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px paddin7hg.') cvui.endRow() # Another row mixing several components cvui.beginRow(frame2, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') cvui.trackbar(150, window2_value2, 0., 5.) cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() cvui.beginColumn(frame2, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() padding = 10 cvui.beginColumn(frame2, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') cvui.trackbar(150, window2_value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25) cvui.text('End of column 2') cvui.endColumn() cvui.beginColumn(frame2, 550, 330, 100, 200) cvui.text('Column 3 (use space)') cvui.space(5) cvui.button('button1 5px below') cvui.space(50) cvui.text('Text 50px below') cvui.space(20) cvui.button('Button 20px below') cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # Update all components of window2, e.g. mouse clicks, and show it. cvui.update(WINDOW2_NAME) cv2.imshow(WINDOW2_NAME, frame2) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
import cvui import os path = os.path.abspath(os.path.dirname(__file__)) src = cv2.imread(path + "/image/1.jpg") hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) cvui.init("opencv") frame = np.zeros((800, 800, 3), np.uint8) floatValue_min = [20.] floatValue_max = [30.] h_min = [20.] h_max = [140.] while True: frame[:] = (49, 52, 49) cvui.trackbar(frame, 10, 60, 300, h_min, 0, 179) cvui.trackbar(frame, 320, 60, 300, h_max, 0, 179) lower_blue = np.array([h_min[0], 90, 90]) upper_blue = np.array([h_max[0], 255, 255]) mask = cv2.inRange(hsv, lower_blue, upper_blue) mask = ~mask dest = cv2.bitwise_and(src, src, mask=mask) cvui.image(frame, 20, 120, dest) cvui.update() cv2.imshow("opencv", frame) cv2.waitKey(20)
def main(): intValue = [30] ucharValue = [30] charValue = [30] floatValue = [12.] doubleValue1 = [15.] doubleValue2 = [10.3] doubleValue3 = [2.25] frame = np.zeros((650, 450, 3), np.uint8) # Size of trackbars width = 400 # Init cvui and tell it to use a value of 20 for cv2.waitKey() # because we want to enable keyboard shortcut for # all components, e.g. button with label '&Quit'. # If cvui has a value for waitKey, it will call # waitKey() automatically for us within cvui.update(). cvui.init(WINDOW_NAME, 20) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) cvui.beginColumn(frame, 20, 20, -1, -1, 6) cvui.text('int trackbar, no customization') cvui.trackbar(width, intValue, 0, 100) cvui.space(5) cvui.text('uchar trackbar, no customization') cvui.trackbar(width, ucharValue, 0, 255) cvui.space(5) cvui.text('signed char trackbar, no customization') cvui.trackbar(width, charValue, -128, 127) cvui.space(5) cvui.text('float trackbar, no customization') cvui.trackbar(width, floatValue, 10., 15.) cvui.space(5) cvui.text('float trackbar, 4 segments') cvui.trackbar(width, doubleValue1, 10., 20., 4) cvui.space(5) cvui.text('double trackbar, label %.1Lf, TRACKBAR_DISCRETE') cvui.trackbar(width, doubleValue2, 10., 10.5, 1, '%.1Lf', cvui.TRACKBAR_DISCRETE, 0.1) cvui.space(5) cvui.text('double trackbar, label %.2Lf, 2 segments, TRACKBAR_DISCRETE') cvui.trackbar(width, doubleValue3, 0., 4., 2, '%.2Lf', cvui.TRACKBAR_DISCRETE, 0.25) cvui.space(10) # Exit the application if the quit button was pressed. # It can be pressed because of a mouse click or because # the user pressed the 'q' key on the keyboard, which is # marked as a shortcut in the button label ('&Quit'). if cvui.button('&Quit'): break cvui.endColumn() # Since cvui.init() received a param regarding waitKey, # there is no need to call cv.waitKey() anymore. cvui.update() # will do it automatically. cvui.update() cv2.imshow(WINDOW_NAME, frame)
def main(): lena = cv2.imread('lena.jpg') frame = np.zeros(lena.shape, np.uint8) anchor = cvui.Point() roi = cvui.Rect(0, 0, 0, 0) working = False # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with Lena's image frame[:] = lena[:] # Show the coordinates of the mouse pointer on the screen cvui.text(frame, 10, 10, 'Click (any) mouse button and drag the pointer around to select a ROI.') # The function 'bool cvui.mouse(int query)' allows you to query the mouse for events. # E.g. cvui.mouse(cvui.DOWN) # # Available queries: # - cvui.DOWN: any mouse button was pressed. cvui.mouse() returns true for single frame only. # - cvui.UP: any mouse button was released. cvui.mouse() returns true for single frame only. # - cvui.CLICK: any mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only. # - cvui.IS_DOWN: any mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed. # Did any mouse button go down? if cvui.mouse(cvui.DOWN): # Position the anchor at the mouse pointer. anchor.x = cvui.mouse().x anchor.y = cvui.mouse().y # Inform we are working, so the ROI window is not updated every frame working = True # Is any mouse button down (pressed)? if cvui.mouse(cvui.IS_DOWN): # Adjust roi dimensions according to mouse pointer width = cvui.mouse().x - anchor.x height = cvui.mouse().y - anchor.y roi.x = anchor.x + width if width < 0 else anchor.x roi.y = anchor.y + height if height < 0 else anchor.y roi.width = abs(width) roi.height = abs(height) # Show the roi coordinates and size cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, 0xff0000, '(%d,%d)', roi.x, roi.y) cvui.printf(frame, cvui.mouse().x + 5, cvui.mouse().y + 5, 0.3, 0xff0000, 'w:%d, h:%d', roi.width, roi.height) # Was the mouse clicked (any button went down then up)? if cvui.mouse(cvui.UP): # We are done working with the ROI. working = False # Ensure ROI is within bounds lenaRows, lenaCols, lenaChannels = lena.shape roi.x = 0 if roi.x < 0 else roi.x roi.y = 0 if roi.y < 0 else roi.y roi.width = roi.width + lena.cols - (roi.x + roi.width) if roi.x + roi.width > lenaCols else roi.width roi.height = roi.height + lena.rows - (roi.y + roi.height) if roi.y + roi.height > lenaRows else roi.height # Render the roi cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, 0xff0000) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # If the ROI is valid, show it. if roi.area() > 0 and working == False: lenaRoi = lena[roi.y : roi.y + roi.height, roi.x : roi.x + roi.width] cv2.imshow(ROI_WINDOW, lenaRoi) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): frame = np.zeros((600, 800, 3), np.uint8) # Create variables used by some components values = [] checked = [False] checked2 = [False] value = [1.0] value2 = [1.0] value3 = [1.0] padding = 10 img = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR) # Fill the vector with a few random values for i in range(0, 20): values.append(random.uniform(0., 300.0)) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # In a row, all added elements are # horizontally placed, one next the other (from left to right) # # Within the cvui.beginRow() and cvui.endRow(), # all elements will be automatically positioned by cvui. # # Notice that all component calls within the begin/end block # DO NOT have (x,y) coordinates. # # Let's create a row at position (10,20) with width 100 and height 50. cvui.beginRow(frame, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000) cvui.sparkline(values, 50, 50) cvui.counter(value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() # Here is another row, this time with a padding of 50px among components. padding = 50 cvui.beginRow(frame, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px padding.') cvui.endRow() # Another row mixing several components cvui.beginRow(frame, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') #cvui.trackbar(150, &value2, 0., 5.); cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() # In a column, all added elements are vertically placed, # one below the other, from top to bottom. Let's create # a column at (50, 300) with width 100 and height 200. cvui.beginColumn(frame, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() # Here is another column, using a padding value of 10, # which will add an space of 10px between each component. padding = 10 cvui.beginColumn(frame, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') #cvui.trackbar(150, &value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25); cvui.text('End of column 2') cvui.endColumn() # You can also add an arbitrary amount of space between # components by calling cvui.space(). # # cvui.space() is aware of context, so if it is used # within a beginColumn()/endColumn() block, the space will # be vertical. If it is used within a beginRow()/endRow() # block, space will be horizontal. cvui.beginColumn(frame, 550, 330, 100, 200) cvui.text('Column 3 (use space)') # Add 5 pixels of (vertical) space. cvui.space(5) cvui.button('button1 5px below') # Add 50 pixels of (vertical) space. cvui.space(50) cvui.text('Text 50px below') # Add 20 pixels of (vertical) space. cvui.space(20) cvui.button('Button 20px below') # Add 40 pixels of (vertical) space. cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): lena = cv2.imread('lena.jpg') frame = np.zeros(lena.shape, np.uint8) anchors = [cvui.Point() for i in range(3)] # one anchor for each mouse button rois = [cvui.Rect() for i in range(3)] # one ROI for each mouse button colors = [0xff0000, 0x00ff00, 0x0000ff] # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with Lena's image frame[:] = lena[:] # Show the coordinates of the mouse pointer on the screen cvui.text( frame, 10, 10, 'Click (any) mouse button then drag the pointer around to select a ROI.' ) cvui.text( frame, 10, 25, 'Use different mouse buttons (right, middle and left) to select different ROIs.' ) # Iterate all mouse buttons (left, middle and right button) button = cvui.LEFT_BUTTON while button <= cvui.RIGHT_BUTTON: # Get the anchor, ROI and color associated with the mouse button anchor = anchors[button] roi = rois[button] color = colors[button] # The function 'bool cvui.mouse(int button, int query)' allows you to query a particular mouse button for events. # E.g. cvui.mouse(cvui.RIGHT_BUTTON, cvui.DOWN) # # Available queries: # - cvui.DOWN: mouse button was pressed. cvui.mouse() returns true for single frame only. # - cvui.UP: mouse button was released. cvui.mouse() returns true for single frame only. # - cvui.CLICK: mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only. # - cvui.IS_DOWN: mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed. # Did the mouse button go down? if cvui.mouse(button, cvui.DOWN): # Position the anchor at the mouse pointer. anchor.x = cvui.mouse().x anchor.y = cvui.mouse().y # Is any mouse button down (pressed)? if cvui.mouse(button, cvui.IS_DOWN): # Adjust roi dimensions according to mouse pointer width = cvui.mouse().x - anchor.x height = cvui.mouse().y - anchor.y roi.x = anchor.x + width if width < 0 else anchor.x roi.y = anchor.y + height if height < 0 else anchor.y roi.width = abs(width) roi.height = abs(height) # Show the roi coordinates and size cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, color, '(%d,%d)', roi.x, roi.y) cvui.printf(frame, cvui.mouse().x + 5, cvui.mouse().y + 5, 0.3, color, 'w:%d, h:%d', roi.width, roi.height) # Ensure ROI is within bounds lenaRows, lenaCols, lenaChannels = lena.shape roi.x = 0 if roi.x < 0 else roi.x roi.y = 0 if roi.y < 0 else roi.y roi.width = roi.width + lenaCols - ( roi.x + roi.width) if roi.x + roi.width > lenaCols else roi.width roi.height = roi.height + lenaRows - ( roi.y + roi.height) if roi.y + roi.height > lenaRows else roi.height # If the ROI is valid, render it in the frame and show in a window. if roi.area() > 0: cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, color) cvui.printf(frame, roi.x + 5, roi.y - 10, 0.3, color, 'ROI %d', button) lenaRoi = lena[roi.y:roi.y + roi.height, roi.x:roi.x + roi.width] cv2.imshow('ROI button' + str(button), lenaRoi) button += 1 # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def main(): global keep_running # This is for saving video *with* detection boxes on it # To save raw video, use the CameraSaver.py script save_video = True if save_video: sz = (p.IMG_WIDTH_SPOTTER, p.IMG_HEIGHT_SPOTTER) fourcc = cv2.VideoWriter_fourcc(*'mp4v') vout = cv2.VideoWriter() vout.open('track_output.mp4', fourcc, p.FPS_SPOTTER, sz, False) signal.signal(signal.SIGINT, sigint_handler) control_panes = ControlPanes() control_panes.stage_control_pane = EnhancedWindow(0, 0, 300, 500, 'Stage Control') control_panes.focus_control_pane = EnhancedWindow(0, 20, 300, 500, 'Focus Control') control_panes.tracker_select_pane = EnhancedWindow(0, 40, 300, 500, 'Tracker Select') control_panes.canny_settings_pane = EnhancedWindow(0, 60, 300, 500, 'Canny Tuning') control_panes.threshold_setting_pane = EnhancedWindow( 0, 80, 300, 500, 'Threshold Tuning') cvui.init(p.CTRL_WINDOW_NAME) cvui.init(p.VIDEO_WINDOW_NAME) context = zmq.Context() (video_socket, focus_sub, stage_sub, focus_state_sub, macro_sharpness_sub, track_socket, roi_socket, af_pub) = setup_zmq(context) stage_zero_offset = np.load('tank_corners_offset.npy') world_points = np.load('tank_corners.npy') intrinsic = np.load('intrinsic_calibration/ll_65/intrinsic.npy') stage_x = None stage_y = None stage_z = None z_moving = True current_ll_focus = None object_distance_ll = 0 target_pos_obs = None target_pos = np.array([1, 1]) target_pos_slow = target_pos.copy() feature_delta = np.array([0, 0]) target_track_init = False STAGE_MODE = 'PAUSED' FOCUS_MODE = 'MANUAL' tracker_type = 'KCF' # options are KCF or CANNY # These three structs store the state information necessary for the trackers canny_tracker_state = CannyTracker() canny_tracker_state.canny_low = [50] canny_tracker_state.canny_high = [150] kcf_tracker_state = KCFTracker() kcf_tracker_state.kcf_box_anchor = cvui.Point() kcf_tracker_state.kcf_roi = cvui.Rect(0, 0, 0, 0) kcf_tracker_state.kcf_tracker_init = False threshold_tracker_state = ThresholdTracker() threshold_tracker_state.threshold = [30] threshold_tracker_state.roi = cvui.Rect(0, 0, 0, 0) threshold_tracker_state.box_anchor = cvui.Point threshold_tracker_state.show_binary = [False] sharpness_focus_state = SharpnessFocusState() sharpness_focus_state.mode = 'COARSE' macro_sharpness = 0 while keep_running: ctrl_frame = np.zeros((700, 300, 3), np.uint8) # Receive stage position updates try: stage_pos = stage_sub.recv_string() (stage_x, stage_y, stage_z_new) = [float(x) for x in stage_pos.split(' ')] if stage_z_new == stage_z: z_moving = False else: z_moving = True stage_z = stage_z_new except zmq.Again: # the stage publisher only publishes at ~10hz, so not having an update is common pass # Receive macro sharpness try: macro_sharpness_last = macro_sharpness macro_sharpness = float(macro_sharpness_sub.recv_string()) except zmq.Again: # no sharpness value, which is unexpected print('No Macro Image Sharpness!') # receive next frame try: frame = recv_img(video_socket) except zmq.Again: print('Timed Out!') time.sleep(1) continue cvui.context(p.VIDEO_WINDOW_NAME) if cvui.mouse(cvui.IS_DOWN): (target_pos, feature_delta) = reset_target_selection() target_pos_slow = target_pos.copy() target_track_init = True feature_delta += get_feature_2delta() if stage_x is not None: stage_pos = np.array([stage_x, stage_y, -stage_z], ndmin=2).T frame = tank_corners_clip(frame, stage_pos, stage_zero_offset, world_points, intrinsic) # This is where the tracking happens. tracker_type is controlled by a button on the interface # Adding a new tracker is as simple as adding another case to this if/else and adding a button in # the UI to switch into the new tracking mode if tracker_type == 'CANNY': canny_tracker_state.target_pos = target_pos (target_pos_obs, roi, canny_tracker_state) = update_canny_tracker( frame, canny_tracker_state) elif tracker_type == 'KCF': cvui.context(p.VIDEO_WINDOW_NAME) (target_pos_obs, roi, kcf_tracker_state) = update_kcf_tracker(frame, kcf_tracker_state) elif tracker_type == 'THRESHOLD': cvui.context(p.VIDEO_WINDOW_NAME) threshold_tracker_state.target_pos = target_pos (target_pos_obs, roi, threshold_tracker_state) = update_threshold_tracker( frame, threshold_tracker_state) else: print('Invalid tracker mode: %s' % tracker_type) roi = None keep_running = False # This roi_msg takes an roi that may have been identified around the animal and sends it over zmq # This enables any cameras trying to autofocus to know which roi to keep in focus # if no autofocusing is happening, then these messages don't do anything if roi is not None: roi_msg = m.SetFocusROI(roi[0], roi[1]) else: roi_msg = m.SetFocusROI(None, None) roi_socket.send_pyobj( roi_msg ) # tell the LL camera (or anything else I guess) which ROI to focus (target_track_ok, target_pos, target_pos_slow) = filter_target_position(target_pos, target_pos_slow, target_pos_obs) # This is probably where we want to use the other camera to estimate depth # Now we have a giant state machine. We need to structure the code this way, because we want 2D tracking and # user interaction to update even when we are waiting on some slower action to occur related to object depth # and focusing. The state machine provides a mechanism to handle these slower processes while not impeding the # rest of the tracking process. # STAGE_MODE = {MANUAL | AUTO | PAUSED} # -- In MANUAL mode, dx,dy,dz all set by keyboard input. # -- In AUTO mode, dx and dy are set by tracker. dz is set by autofocus if FOCUS_MODE is set to AUTO # -- In PAUSED mode, dx = dy = dz = 0. The tracker will keep tracking, but the stage won't move # # FOCUS_MODE = {MANUAL | SHARPNESS | DEPTH} # -- In MANUAL mode, dz is set by keyboard input # -- In SHARPNESS mode, dz is set by trying to maximize sharpness, although the final position can be tweaked # by user input. SHARPNESS mode does nothing if STAGE_MODE is MANUAL # -- In DEPTH mode, dz is set by a target depth measurement that is estimated from a second camera # (stereo or perpendicular) # Determine dx and dy if STAGE_MODE == 'PAUSED': # -> Stage Control track_socket.send_string('0 0 0') dx = 0 dy = 0 dz = 0 elif STAGE_MODE == 'MANUAL': # TODO: Probably tune this better (dx, dy) = get_feature_2delta() dx = 10 * dx dy = 10 * dy print('FULL_MANUAL %f, %f' % (dx, dy)) dz = manual_focus_update() elif STAGE_MODE == 'AUTO': # The tracker makes a determination in pixel space, then we may decide to filter it. We then determine the # dx and dy based on the distance between the feature of interest and the macro lens center # how much do we need to move in pixel-space? # Note dx and dy are 0 if there are no target tracks if stage_z is None: print('Waiting on stage node') dx = 0 dy = 0 dz = 0 else: if target_pos_obs is not None: if target_track_ok: (dx, dy) = calculate_movement_offsets( frame, target_pos, target_pos_slow, feature_delta) else: dx = 0 dy = 0 else: dx = 0 dy = 0 target_track_ok = False # When STAGE_MODE == 'AUTO', we need to determine how to handle the focusing if FOCUS_MODE == 'MANUAL': dz = manual_focus_update() elif FOCUS_MODE == 'SHARPNESS': sharpness_focus_state.stage_z = stage_z sharpness_focus_state.macro_sharpness = macro_sharpness sharpness_focus_state.z_moving = z_moving dz, sharpness_focus_state = sharpness_focus( sharpness_focus_state, af_pub, focus_state_sub, video_socket, focus_sub) elif FOCUS_MODE == 'DEPTH': # this is the mode when we have a second camera to estimate depth dz = 0 else: # invalid focus mode print('Invalid focus mode %s' % FOCUS_MODE) sys.exit(1) else: print('Unknown stage mode: %s' % STAGE_MODE) dx = 0 dy = 0 dz = 0 print(dx, dy, dz) track_socket.send_string( '%f %f %f' % (dx, dy, dz)) # 'wasteful', but easier debugging for now frame = cv2.resize( frame, (p.IMG_DISP_WIDTH_SPOTTER, p.IMG_DISP_HEIGHT_SPOTTER)) # draw dots on frame centers cv2.circle(frame, (int( p.IMG_DISP_WIDTH_SPOTTER / 2), int(p.IMG_DISP_HEIGHT_SPOTTER / 2)), 5, (0, 0, 255), -1) # center of frame cv2.circle(frame, (p.MACRO_LL_CENTER[0], p.MACRO_LL_CENTER[1]), 5, (255, 0, 255), -1) # center of macro frame frame cvui.update(p.VIDEO_WINDOW_NAME) cv2.imshow(p.VIDEO_WINDOW_NAME, frame) if save_video: vout.write(frame) cvui.context(p.CTRL_WINDOW_NAME) STAGE_MODE, FOCUS_MODE, tracker_type, macro_resweep, ll_resweep = draw_settings( ctrl_frame, control_panes, canny_tracker_state, threshold_tracker_state, STAGE_MODE, FOCUS_MODE, tracker_type) if macro_resweep: p.BYPASS_LL_ESTIMATE = True sharpness_focus_state.mode = 'FINE_UNINITIALIZED' if ll_resweep: if stage_z is not None: print('Liquid Lens Refocus!') dist_to_tank = (300 - stage_z) + p.STAGE_TANK_OFFSET ll_max = 2953.5 * dist_to_tank**-0.729 ll_min = 2953.5 * (dist_to_tank + p.TANK_DEPTH_MM)**-0.729 print('llmin, llmax: (%f, %f)' % (ll_min, ll_max)) af_pub.send_pyobj(m.AutofocusMessage(ll_min, ll_max, 1)) else: print('Cannot refocus liquid lens until stage node is running') cvui.update(p.CTRL_WINDOW_NAME) cv2.imshow(p.CTRL_WINDOW_NAME, ctrl_frame) cv2.waitKey(1) if save_video: vout.release()
def main(): lena = cv2.imread('lena.jpg', cv2.IMREAD_COLOR) frame = np.zeros(lena.shape, np.uint8) doubleBuffer = np.zeros(lena.shape, np.uint8) trackbarWidth = 130 # Adjustments values for RGB and HSV rgb = [[1.], [1.], [1]] hsv = [[1.], [1.], [1]] # Copy the loaded image to the buffer doubleBuffer[:] = lena[:] # Init cvui and tell it to use a value of 20 for cv2.waitKey() # because we want to enable keyboard shortcut for # all components, e.g. button with label '&Quit'. # If cvui has a value for waitKey, it will call # waitKey() automatically for us within cvui.update(). cvui.init(WINDOW_NAME, 20) while (True): frame[:] = doubleBuffer[:] frameRows,frameCols,frameChannels = frame.shape # Exit the application if the quit button was pressed. # It can be pressed because of a mouse click or because # the user pressed the 'q' key on the keyboard, which is # marked as a shortcut in the button label ('&Quit'). if cvui.button(frame, frameCols - 100, frameRows - 30, '&Quit'): break # RGB HUD cvui.window(frame, 20, 50, 180, 240, 'RGB adjust') # Within the cvui.beginColumns() and cvui.endColumn(), # all elements will be automatically positioned by cvui. # In a columns, all added elements are vertically placed, # one under the other (from top to bottom). # # Notice that all component calls within the begin/end block # below DO NOT have (x,y) coordinates. # # Let's create a row at position (35,80) with automatic # width and height, and a padding of 10 cvui.beginColumn(frame, 35, 80, -1, -1, 10) rgbModified = False # Trackbar accept a pointer to a variable that controls their value # They return true upon edition if cvui.trackbar(trackbarWidth, rgb[0], 0., 2., 2, '%3.02Lf'): rgbModified = True if cvui.trackbar(trackbarWidth, rgb[1], 0., 2., 2, '%3.02Lf'): rgbModified = True if cvui.trackbar(trackbarWidth, rgb[2], 0., 2., 2, '%3.02Lf'): rgbModified = True cvui.space(2) cvui.printf(0.35, 0xcccccc, ' RGB: %3.02lf,%3.02lf,%3.02lf', rgb[0][0], rgb[1][0], rgb[2][0]) if (rgbModified): b,g,r = cv2.split(lena) b = b * rgb[2][0] g = g * rgb[1][0] r = r * rgb[0][0] cv2.merge((b,g,r), doubleBuffer) cvui.endColumn() # HSV lenaRows,lenaCols,lenaChannels = lena.shape cvui.window(frame, lenaCols - 200, 50, 180, 240, 'HSV adjust') cvui.beginColumn(frame, lenaCols - 180, 80, -1, -1, 10) hsvModified = False if cvui.trackbar(trackbarWidth, hsv[0], 0., 2., 2, '%3.02Lf'): hsvModified = True if cvui.trackbar(trackbarWidth, hsv[1], 0., 2., 2, '%3.02Lf'): hsvModified = True if cvui.trackbar(trackbarWidth, hsv[2], 0., 2., 2, '%3.02Lf'): hsvModified = True cvui.space(2) cvui.printf(0.35, 0xcccccc, ' HSV: %3.02lf,%3.02lf,%3.02lf', hsv[0][0], hsv[1][0], hsv[2][0]) if hsvModified: hsvMat = cv2.cvtColor(lena, cv2.COLOR_BGR2HSV) h,s,v = cv2.split(hsvMat) h = h * hsv[0][0] s = s * hsv[1][0] v = v * hsv[2][0] cv2.merge((h,s,v), hsvMat) doubleBuffer = cv2.cvtColor(hsvMat, cv2.COLOR_HSV2BGR) cvui.endColumn() # Display the lib version at the bottom of the screen cvui.printf(frame, frameCols - 300, frameRows - 20, 0.4, 0xCECECE, 'cvui v.%s', cvui.VERSION) # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. # # Since cvui.init() received a param regarding waitKey, # there is no need to call cv2.waitKey() anymore. cvui.update() # will do it automatically. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame)
def main(): intValue = [30] ucharValue = [30] charValue = [30] floatValue = [12.] doubleValue = [45.] doubleValue2 = [15.] doubleValue3 = [10.3] frame = np.zeros((770, 350, 3), np.uint8) # The width of all trackbars used in this example. width = 300 # The x position of all trackbars used in this example x = 10 # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # The trackbar component uses templates to guess the type of its arguments. # You have to be very explicit about the type of the value, the min and # the max params. For instance, if they are double, use 100.0 instead of 100. cvui.text(frame, x, 10, 'double, step 1.0 (default)') cvui.trackbar(frame, x, 40, width, doubleValue, 0., 100.) cvui.text(frame, x, 120, 'float, step 1.0 (default)') cvui.trackbar(frame, x, 150, width, floatValue, 10., 15.) # You can specify segments and custom labels. Segments are visual marks in # the trackbar scale. Internally the value for the trackbar is stored as # long double, so the custom labels must always format long double numbers, no # matter the type of the numbers being used for the trackbar. E.g. %.2Lf cvui.text(frame, x, 230, 'double, 4 segments, custom label %.2Lf') cvui.trackbar(frame, x, 260, width, doubleValue2, 0., 20., 4, '%.2Lf') # Again: you have to be very explicit about the value, the min and the max params. # Below is a uchar trackbar. Observe the uchar cast for the min, the max and # the step parameters. cvui.text(frame, x, 340, 'uchar, custom label %.0Lf') cvui.trackbar(frame, x, 370, width, ucharValue, 0, 255, 0, '%.0Lf') # You can change the behavior of any tracker by using the options parameter. # Options are defined as a bitfield, so you can combine them. # E.g. # TRACKBAR_DISCRETE # value changes are discrete # TRACKBAR_DISCRETE | TRACKBAR_HIDE_LABELS # discrete changes and no labels cvui.text(frame, x, 450, 'double, step 0.1, option TRACKBAR_DISCRETE') cvui.trackbar(frame, x, 480, width, doubleValue3, 10., 10.5, 1, '%.1Lf', cvui.TRACKBAR_DISCRETE, 0.1) # More customizations using options. options = cvui.TRACKBAR_DISCRETE | cvui.TRACKBAR_HIDE_SEGMENT_LABELS cvui.text(frame, x, 560, 'int, 3 segments, DISCRETE | HIDE_SEGMENT_LABELS') cvui.trackbar(frame, x, 590, width, intValue, 10, 50, 3, '%.0Lf', options, 2) # Trackbar using char type. cvui.text(frame, x, 670, 'char, 2 segments, custom label %.0Lf') cvui.trackbar(frame, x, 700, width, charValue, -128, 127, 2, '%.0Lf') # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break
def run(self): self.main_frame = np.zeros((380, 400, 3), np.uint8) cv.namedWindow(self.main_window_name) cvui.init(self.main_window_name) while True: cvui.context(self.main_window_name) self.main_frame[:] = (49, 52, 49) cvui.beginColumn(self.main_frame, 50, 20, -1, -1, 10) cvui.text('Click to open an image') if cvui.button('Select Image'): self.load_image() cvui.text('Load a previously saved txt file to current image') if cvui.button("Read Txt file"): self.read_txt() cvui.text('Save to txt file') if cvui.button('Save'): self.save_to_txt() if cvui.button("Show/Hide Index"): self.has_index = not self.has_index if cvui.button("Clear All Points"): self.points.clear() self.high_light_point = None cvui.text('Max click distance to select one point') cvui.text('adjust smaller if you want to click two close points') cvui.trackbar(200, self.threshold, 0.000, 0.02, 2, '%.4Lf') cvui.endColumn() cvui.update(self.main_window_name) cv.imshow(self.main_window_name, self.main_frame) key = cv.waitKey(20) if key == 27 or not self.is_window_open(self.main_window_name): self.tkinter_root.destroy() break if self.is_window_open(self.image_window_name): cvui.context(self.image_window_name) self.show_image = cv.resize( self.origin_image, (self.actual_width, self.actual_height)) for i, point in enumerate(self.points): if i not in self.deleted_list: self.draw_intersection(self.show_image, point, i) if self.high_light_point is not None: point = self.points[self.high_light_point] cv.circle(self.show_image, (int(point[0] * self.actual_width), int(point[1] * self.actual_height)), 5, (0, 255, 255), 1) # Fill the error window if a vibrant color # if self.image_window_flag: cvui.update(self.image_window_name) cv.imshow(self.image_window_name, self.show_image) self.keyboard(key)
def teach_step(posed='Images/yoga.jpg', Check=False): Estimator = TfPoseEstimator frame1 = np.zeros((768, 1024, 3), np.uint8) WINDOW1_NAME = 'Dance Dance Pose' cv2.namedWindow(WINDOW1_NAME) cvui.init(WINDOW1_NAME) inferred = infer(posed) original = cv2.imread(posed) #time.sleep(5) if original.shape[0] != 480 or original.shape[1] != 640: original = cv2.resize(original, (368, 368)) if Check: cv2.imwrite('check.jpg', inferred) inferred = inferred - original #inferred=cv2.copyMakeBorder(inferred[:,int(np.nonzero(inferred)[1][0]/2):],0,0,0,int(np.nonzero(inferred)[1][0]/2),cv2.BORDER_REPLICATE) timeout = time.time() + 10 capture = cv2.VideoCapture(0) counter = [time.time()] x = 1 while True: cvui.context(WINDOW1_NAME) ret, frame = capture.read() gray = frame if gray.shape[0] != 4810 or gray.shape[1] != 640: gray = cv2.resize(gray, (368, 368)) dst = cv2.addWeighted(inferred, 0.5, gray, 0.5, 0) frame1[:] = (49, 52, 49) cvui.beginRow(frame1, 10, 20, -1, -1, 30) cvui.image(dst) cvui.image(original) cvui.endRow() cvui.beginRow(frame1, 10, 400, -1, -1, 30) cvui.counter(frame1, 100, 410, counter, 0.1, '%.1f') counter = [timeout - time.time() for x in counter] cvui.text(frame1, 10, 410, "Tick tick") cvui.endRow() cvui.update(WINDOW1_NAME) cv2.imshow(WINDOW1_NAME, frame1) if cv2.waitKey(1) & 0xFF == ord('q') or time.time() > timeout: filename = 'captures/capture' + \ str(int(x)) + ".png" x = x + 1 cv2.imwrite(filename, frame) break inferred_capture = infer(filename) original_inferred = cv2.imread(filename) if original_inferred.shape[0] != 4380 or original_inferred.shape[1] != 640: inferred_capture = cv2.resize(inferred_capture, (368, 368)) original_inferred = cv2.resize(original_inferred, (368, 368)) while True: final = cv2.addWeighted(inferred, 0.5, inferred_capture, 0.5, 0) cv2.imshow('final', final) if cv2.waitKey(1) & 0xFF == ord('q'): break diff_inferred = inferred_capture - original_inferred bw_inferred = cv2.cvtColor(diff_inferred, cv2.COLOR_BGR2GRAY) bw_inferred[bw_inferred >= 1] = 1 bw_inferred[bw_inferred < 1] = 0 bw_orig_inferred = cv2.cvtColor(inferred, cv2.COLOR_BGR2GRAY) bw_orig_inferred[bw_orig_inferred >= 1] = 1 bw_orig_inferred[bw_orig_inferred < 1] = 0 total = bw_orig_inferred == bw_inferred print('') print('') print('Overlap:' + str((1 - np.sum(total) / np.size(total)) * 10))
def main(): frame = np.zeros((600, 800, 3), np.uint8) # Create variables used by some components values = [] checked = [False] checked2 = [False] value = [1.0] value2 = [1.0] value3 = [1.0] padding = 10 img = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR) imgRed = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR) imgGray = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR) # Fill the vector with a few random values for i in range(0, 20): values.append(random.uniform(0., 300.0)) # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME). cvui.init(WINDOW_NAME) while (True): # Fill the frame with a nice color frame[:] = (49, 52, 49) # In a row, all added elements are # horizontally placed, one next the other (from left to right) # # Within the cvui.beginRow() and cvui.endRow(), # all elements will be automatically positioned by cvui. # # Notice that all component calls within the begin/end block # DO NOT have (x,y) coordinates. # # Let's create a row at position (10,20) with width 100 and height 50. cvui.beginRow(frame, 10, 20, 100, 50) cvui.text('This is ') cvui.printf('a row') cvui.checkbox('checkbox', checked) cvui.window(80, 80, 'window') cvui.rect(50, 50, 0x00ff00, 0xff0000); cvui.sparkline(values, 50, 50); cvui.counter(value) cvui.button(100, 30, 'Fixed') cvui.image(img) cvui.button(img, imgGray, imgRed) cvui.endRow() # Here is another row, this time with a padding of 50px among components. padding = 50; cvui.beginRow(frame, 10, 150, 100, 50, padding) cvui.text('This is ') cvui.printf('another row') cvui.checkbox('checkbox', checked2) cvui.window(80, 80, 'window') cvui.button(100, 30, 'Fixed') cvui.printf('with 50px padding.') cvui.endRow() # Another row mixing several components cvui.beginRow(frame, 10, 250, 100, 50) cvui.text('This is ') cvui.printf('another row with a trackbar ') #cvui.trackbar(150, &value2, 0., 5.); cvui.printf(' and a button ') cvui.button(100, 30, 'button') cvui.endRow() # In a column, all added elements are vertically placed, # one below the other, from top to bottom. Let's create # a column at (50, 300) with width 100 and height 200. cvui.beginColumn(frame, 50, 330, 100, 200) cvui.text('Column 1 (no padding)') cvui.button('button1') cvui.button('button2') cvui.text('End of column 1') cvui.endColumn() # Here is another column, using a padding value of 10, # which will add an space of 10px between each component. padding = 10 cvui.beginColumn(frame, 300, 330, 100, 200, padding) cvui.text('Column 2 (padding = 10)') cvui.button('button1') cvui.button('button2') #cvui.trackbar(150, &value3, 0., 5., 1, '%3.2Lf', cvui.TRACKBAR_DISCRETE, 0.25); cvui.text('End of column 2') cvui.endColumn() # You can also add an arbitrary amount of space between # components by calling cvui.space(). # # cvui.space() is aware of context, so if it is used # within a beginColumn()/endColumn() block, the space will # be vertical. If it is used within a beginRow()/endRow() # block, space will be horizontal. cvui.beginColumn(frame, 550, 330, 100, 200) cvui.text('Column 3 (use space)') # Add 5 pixels of (vertical) space. cvui.space(5) cvui.button('button1 5px below') # Add 50 pixels of (vertical) space. cvui.space(50) cvui.text('Text 50px below') # Add 20 pixels of (vertical) space. cvui.space(20) cvui.button('Button 20px below') # Add 40 pixels of (vertical) space. cvui.space(40) cvui.text('End of column 2 (40px below)') cvui.endColumn() # This function must be called *AFTER* all UI components. It does # all the behind the scenes magic to handle mouse clicks, etc. cvui.update() # Show everything on the screen cv2.imshow(WINDOW_NAME, frame) # Check if ESC key was pressed if cv2.waitKey(20) == 27: break