コード例 #1
0
def main(camera_toml_path, enable_distortion_correction, scale_val = 0.65):
    camera_config = get_config(camera_toml_path)
    camera = Camera(camera_config)
    print(camera)

    scaling = partial(scaling_int, scale=scale_val)
    if camera_config.roi_size != 4:
        sys.exit('This script is only supported on "camera_config.roi_size == 4" ')
    if camera_config.auto_exposure != "roi":
        sys.exit('This script is only supported on "camera_config.auto_exposure == roi" ')

    image_width = camera.image_width
    image_height = camera.image_height

    roi = cvui.Rect(0, 0, 0, 0)
    WINDOW_NAME = "Capture"
    cvui.init(WINDOW_NAME)
    click_pos_x = image_width // 2
    click_pos_y = image_height // 2

    while True:
        key = cv2.waitKey(10)
        frame = np.zeros((scaling(image_height), scaling(image_width), 3), np.uint8)
        frame[:] = (49, 52, 49)

        status = camera.update()
        if status:
            # WARNING:If distortion correction is enabled, the rectangle on windows doesn't indicate actual RoI area for auto exposure.
            see3cam_rgb_image = camera.remap_image if enable_distortion_correction else camera.image
            scaled_width = scaling(image_width)
            scaled_height = scaling(image_height)
            see3cam_rgb_image_resized = cv2.resize(see3cam_rgb_image, (scaled_width, scaled_height))
            frame[:scaled_height, :scaled_width, :] = see3cam_rgb_image_resized

            window_w = image_width // 2
            window_h = image_height // 2
            if cvui.mouse(cvui.DOWN):
                click_pos_x = int(cvui.mouse().x / scale_val)
                click_pos_y = int(cvui.mouse().y / scale_val)

            camera.set_roi_properties(click_pos_x, click_pos_y, win_size=4)
            roi = cvui.Rect(scaling(click_pos_x - image_width // 4), scaling(click_pos_y - image_height // 4), scaling(window_w), scaling(window_h))

            # Ensure ROI is within bounds
            roi.x = 0 if roi.x < 0 else roi.x
            roi.y = 0 if roi.y < 0 else roi.y

            roi.width = roi.width + scaled_width - (roi.x + roi.width) if roi.x + roi.width > scaled_width else roi.width
            roi.height = roi.height + scaled_height - (roi.y + roi.height) if roi.y + roi.height > scaled_height else roi.height

            cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, 0xFF0000)

        if key == 27 or key == ord("q"):
            break

        cvui.update()
        cvui.imshow(WINDOW_NAME, frame)
    cv2.destroyAllWindows()
コード例 #2
0
def update_kcf_tracker(frame, kcf_tracker_state):
    """ Update the kcf tracker based on new frame and previous state """

    anchor, kcf_roi, drawing, new_bb = roi_tool(
        frame, kcf_tracker_state.kcf_box_anchor, kcf_tracker_state.kcf_roi)

    target_pos_obs = None
    roi = None

    if drawing:
        kcf_tracker_state.kcf_tracker_init = False

    if new_bb:
        kcf_tracker_state.kcf_tracker_init = True
        kcf_tracker_state.kcf_tracker = cv2.TrackerKCF_create()
        kcf_tracker_state.kcf_tracker.init(
            frame, (kcf_roi.x, kcf_roi.y, kcf_roi.width, kcf_roi.height))
    elif kcf_tracker_state.kcf_tracker_init:
        track_ok, new_roi = kcf_tracker_state.kcf_tracker.update(frame)
        x1 = kcf_roi.x
        y1 = kcf_roi.y
        w = kcf_roi.width
        h = kcf_roi.height
        if track_ok:
            target_pos_obs = np.array([x1 + w / 2., y1 + h / 2])
        else:
            target_pos_obs = None
        kcf_tracker_state.kcf_roi = cvui.Rect(new_roi[0], new_roi[1],
                                              new_roi[2], new_roi[3])
        roi = ((x1, y1), (x1 + w, y1 + h))
    return target_pos_obs, roi, kcf_tracker_state
コード例 #3
0
	def begin(self, frame):
		mouseInsideTitleArea = cvui.mouse().inside(cvui.Rect(self.__x, self.__y, self.__width, 20))
		self.__height = 20 if self.__minimized else self.__heightNotMinimized

		if self.__isMoving == False and cvui.mouse(cvui.DOWN) and mouseInsideTitleArea:
			self.__deltaX = cvui.mouse().x - self.__x
			self.__deltaY = cvui.mouse().y - self.__y
			self.__isMoving = True

		elif self.__isMoving and cvui.mouse(cvui.IS_DOWN):
			self.__x = cvui.mouse().x - self.__deltaX
			self.__y = cvui.mouse().y - self.__deltaY

		else:
			frameRows,frameCols,frameChannels = frame.shape
			self.__isMoving = False
			self.__x = max(0, self.__x)
			self.__y = max(0, self.__y)
			self.__x = min(frameCols - self.__width, self.__x)
			self.__y = min(frameRows - 20, self.__y)

		cvui.window(frame, self.__x, self.__y, self.__width, self.__height, self.__title)

		if self.__minimizable and cvui.button(frame, self.__x + self.__width - 20, self.__y + 1, 18, 18, '+' if self.__minimized else '-'):
			self.__minimized = not self.__minimized

		cvui.beginRow(frame, self.__x + 10, self.__y + 30, self.__width - 20, self.__height - 20)
		cvui.beginColumn(self.__width - 10, self.__height - 20)
コード例 #4
0
def main():
    frame = np.zeros((300, 600, 3), np.uint8)

    # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME).
    cvui.init(WINDOW_NAME)

    while (True):
        # Fill the frame with a nice color
        frame[:] = (49, 52, 49)

        # Render a rectangle on the screen.
        rectangle = cvui.Rect(50, 50, 100, 100)
        cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width,
                  rectangle.height, 0xff0000)

        # Check what is the current status of the mouse cursor
        # regarding the previously rendered rectangle.
        status = cvui.iarea(rectangle.x, rectangle.y, rectangle.width,
                            rectangle.height)

        # cvui::iarea() will return the current mouse status:
        #  CLICK: mouse just clicked the interaction are
        #	DOWN: mouse button was pressed on the interaction area, but not released yet.
        #	OVER: mouse cursor is over the interaction area
        #	OUT: mouse cursor is outside the interaction area
        if status == cvui.CLICK: print('Rectangle was clicked!')
        if status == cvui.DOWN: cvui.printf(frame, 240, 70, "Mouse is: DOWN")
        if status == cvui.OVER: cvui.printf(frame, 240, 70, "Mouse is: OVER")
        if status == cvui.OUT: cvui.printf(frame, 240, 70, "Mouse is: OUT")

        # Show the coordinates of the mouse pointer on the screen
        cvui.printf(frame, 240, 50, "Mouse pointer is at (%d,%d)",
                    cvui.mouse().x,
                    cvui.mouse().y)

        # This function must be called *AFTER* all UI components. It does
        # all the behind the scenes magic to handle mouse clicks, etc.
        cvui.update()

        # Show everything on the screen
        cv2.imshow(WINDOW_NAME, frame)

        # Check if ESC key was pressed
        if cv2.waitKey(20) == 27:
            break
コード例 #5
0
def main():
    lena = cv2.imread('lena.jpg')
    frame = np.zeros(lena.shape, np.uint8)
    anchors = [cvui.Point()
               for i in range(3)]  # one anchor for each mouse button
    rois = [cvui.Rect() for i in range(3)]  # one ROI for each mouse button
    colors = [0xff0000, 0x00ff00, 0x0000ff]

    # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME).
    cvui.init(WINDOW_NAME)

    while (True):
        # Fill the frame with Lena's image
        frame[:] = lena[:]

        # Show the coordinates of the mouse pointer on the screen
        cvui.text(
            frame, 10, 10,
            'Click (any) mouse button then drag the pointer around to select a ROI.'
        )
        cvui.text(
            frame, 10, 25,
            'Use different mouse buttons (right, middle and left) to select different ROIs.'
        )

        # Iterate all mouse buttons (left, middle  and right button)
        button = cvui.LEFT_BUTTON
        while button <= cvui.RIGHT_BUTTON:
            # Get the anchor, ROI and color associated with the mouse button
            anchor = anchors[button]
            roi = rois[button]
            color = colors[button]

            # The function 'bool cvui.mouse(int button, int query)' allows you to query a particular mouse button for events.
            # E.g. cvui.mouse(cvui.RIGHT_BUTTON, cvui.DOWN)
            #
            # Available queries:
            #	- cvui.DOWN: mouse button was pressed. cvui.mouse() returns true for single frame only.
            #	- cvui.UP: mouse button was released. cvui.mouse() returns true for single frame only.
            #	- cvui.CLICK: mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only.
            #	- cvui.IS_DOWN: mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed.

            # Did the mouse button go down?
            if cvui.mouse(button, cvui.DOWN):
                # Position the anchor at the mouse pointer.
                anchor.x = cvui.mouse().x
                anchor.y = cvui.mouse().y

            # Is any mouse button down (pressed)?
            if cvui.mouse(button, cvui.IS_DOWN):
                # Adjust roi dimensions according to mouse pointer
                width = cvui.mouse().x - anchor.x
                height = cvui.mouse().y - anchor.y

                roi.x = anchor.x + width if width < 0 else anchor.x
                roi.y = anchor.y + height if height < 0 else anchor.y
                roi.width = abs(width)
                roi.height = abs(height)

                # Show the roi coordinates and size
                cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, color, '(%d,%d)',
                            roi.x, roi.y)
                cvui.printf(frame,
                            cvui.mouse().x + 5,
                            cvui.mouse().y + 5, 0.3, color, 'w:%d, h:%d',
                            roi.width, roi.height)

            # Ensure ROI is within bounds
            lenaRows, lenaCols, lenaChannels = lena.shape
            roi.x = 0 if roi.x < 0 else roi.x
            roi.y = 0 if roi.y < 0 else roi.y
            roi.width = roi.width + lenaCols - (
                roi.x +
                roi.width) if roi.x + roi.width > lenaCols else roi.width
            roi.height = roi.height + lenaRows - (
                roi.y +
                roi.height) if roi.y + roi.height > lenaRows else roi.height

            # If the ROI is valid, render it in the frame and show in a window.
            if roi.area() > 0:
                cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, color)
                cvui.printf(frame, roi.x + 5, roi.y - 10, 0.3, color, 'ROI %d',
                            button)

                lenaRoi = lena[roi.y:roi.y + roi.height,
                               roi.x:roi.x + roi.width]
                cv2.imshow('ROI button' + str(button), lenaRoi)

            button += 1

        # This function must be called *AFTER* all UI components. It does
        # all the behind the scenes magic to handle mouse clicks, etc.
        cvui.update()

        # Show everything on the screen
        cv2.imshow(WINDOW_NAME, frame)

        # Check if ESC key was pressed
        if cv2.waitKey(20) == 27:
            break
コード例 #6
0
def main():
    lena = cv2.imread('lena.jpg')
    frame = np.zeros(lena.shape, np.uint8)
    anchor = cvui.Point()
    roi = cvui.Rect(0, 0, 0, 0)
    working = False

    # Init cvui and tell it to create a OpenCV window, i.e. cv.namedWindow(WINDOW_NAME).
    cvui.init(WINDOW_NAME)

    while (True):
        # Fill the frame with Lena's image
        frame[:] = lena[:]

        # Show the coordinates of the mouse pointer on the screen
        cvui.text(
            frame, 10, 10,
            'Click (any) mouse button and drag the pointer around to select a ROI.'
        )

        # The function 'bool cvui.mouse(int query)' allows you to query the mouse for events.
        # E.g. cvui.mouse(cvui.DOWN)
        #
        # Available queries:
        #	- cvui.DOWN: any mouse button was pressed. cvui.mouse() returns true for single frame only.
        #	- cvui.UP: any mouse button was released. cvui.mouse() returns true for single frame only.
        #	- cvui.CLICK: any mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for single frame only.
        #	- cvui.IS_DOWN: any mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed.

        # Did any mouse button go down?
        if cvui.mouse(cvui.DOWN):
            # Position the anchor at the mouse pointer.
            anchor.x = cvui.mouse().x
            anchor.y = cvui.mouse().y

            # Inform we are working, so the ROI window is not updated every frame
            working = True

        # Is any mouse button down (pressed)?
        if cvui.mouse(cvui.IS_DOWN):
            # Adjust roi dimensions according to mouse pointer
            width = cvui.mouse().x - anchor.x
            height = cvui.mouse().y - anchor.y

            roi.x = anchor.x + width if width < 0 else anchor.x
            roi.y = anchor.y + height if height < 0 else anchor.y
            roi.width = abs(width)
            roi.height = abs(height)

            # Show the roi coordinates and size
            cvui.printf(frame, roi.x + 5, roi.y + 5, 0.3, 0xff0000, '(%d,%d)',
                        roi.x, roi.y)
            cvui.printf(frame,
                        cvui.mouse().x + 5,
                        cvui.mouse().y + 5, 0.3, 0xff0000, 'w:%d, h:%d',
                        roi.width, roi.height)

        # Was the mouse clicked (any button went down then up)?
        if cvui.mouse(cvui.UP):
            # We are done working with the ROI.
            working = False

        # Ensure ROI is within bounds
        lenaRows, lenaCols, lenaChannels = lena.shape
        roi.x = 0 if roi.x < 0 else roi.x
        roi.y = 0 if roi.y < 0 else roi.y
        roi.width = roi.width + lena.cols - (
            roi.x + roi.width) if roi.x + roi.width > lenaCols else roi.width
        roi.height = roi.height + lena.rows - (
            roi.y +
            roi.height) if roi.y + roi.height > lenaRows else roi.height

        # Render the roi
        cvui.rect(frame, roi.x, roi.y, roi.width, roi.height, 0xff0000)

        # This function must be called *AFTER* all UI components. It does
        # all the behind the scenes magic to handle mouse clicks, etc.
        cvui.update()

        # Show everything on the screen
        cv2.imshow(WINDOW_NAME, frame)

        # If the ROI is valid, show it.
        if roi.area() > 0 and working == False:
            lenaRoi = lena[roi.y:roi.y + roi.height, roi.x:roi.x + roi.width]
            cv2.imshow(ROI_WINDOW, lenaRoi)

        # Check if ESC key was pressed
        if cv2.waitKey(20) == 27:
            break
コード例 #7
0
def main():

    global keep_running

    # This is for saving video *with* detection boxes on it
    # To save raw video, use the CameraSaver.py script
    save_video = True
    if save_video:
        sz = (p.IMG_WIDTH_SPOTTER, p.IMG_HEIGHT_SPOTTER)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        vout = cv2.VideoWriter()
        vout.open('track_output.mp4', fourcc, p.FPS_SPOTTER, sz, False)

    signal.signal(signal.SIGINT, sigint_handler)

    control_panes = ControlPanes()
    control_panes.stage_control_pane = EnhancedWindow(0, 0, 300, 500,
                                                      'Stage Control')
    control_panes.focus_control_pane = EnhancedWindow(0, 20, 300, 500,
                                                      'Focus Control')
    control_panes.tracker_select_pane = EnhancedWindow(0, 40, 300, 500,
                                                       'Tracker Select')
    control_panes.canny_settings_pane = EnhancedWindow(0, 60, 300, 500,
                                                       'Canny Tuning')
    control_panes.threshold_setting_pane = EnhancedWindow(
        0, 80, 300, 500, 'Threshold Tuning')

    cvui.init(p.CTRL_WINDOW_NAME)
    cvui.init(p.VIDEO_WINDOW_NAME)

    context = zmq.Context()
    (video_socket, focus_sub, stage_sub, focus_state_sub, macro_sharpness_sub,
     track_socket, roi_socket, af_pub) = setup_zmq(context)

    stage_zero_offset = np.load('tank_corners_offset.npy')
    world_points = np.load('tank_corners.npy')
    intrinsic = np.load('intrinsic_calibration/ll_65/intrinsic.npy')

    stage_x = None
    stage_y = None
    stage_z = None
    z_moving = True
    current_ll_focus = None
    object_distance_ll = 0

    target_pos_obs = None
    target_pos = np.array([1, 1])
    target_pos_slow = target_pos.copy()
    feature_delta = np.array([0, 0])
    target_track_init = False
    STAGE_MODE = 'PAUSED'
    FOCUS_MODE = 'MANUAL'
    tracker_type = 'KCF'  # options are KCF or CANNY

    # These three structs store the state information necessary for the trackers
    canny_tracker_state = CannyTracker()
    canny_tracker_state.canny_low = [50]
    canny_tracker_state.canny_high = [150]

    kcf_tracker_state = KCFTracker()
    kcf_tracker_state.kcf_box_anchor = cvui.Point()
    kcf_tracker_state.kcf_roi = cvui.Rect(0, 0, 0, 0)
    kcf_tracker_state.kcf_tracker_init = False

    threshold_tracker_state = ThresholdTracker()
    threshold_tracker_state.threshold = [30]
    threshold_tracker_state.roi = cvui.Rect(0, 0, 0, 0)
    threshold_tracker_state.box_anchor = cvui.Point
    threshold_tracker_state.show_binary = [False]

    sharpness_focus_state = SharpnessFocusState()
    sharpness_focus_state.mode = 'COARSE'
    macro_sharpness = 0

    while keep_running:
        ctrl_frame = np.zeros((700, 300, 3), np.uint8)

        # Receive stage position updates
        try:
            stage_pos = stage_sub.recv_string()
            (stage_x, stage_y,
             stage_z_new) = [float(x) for x in stage_pos.split(' ')]
            if stage_z_new == stage_z:
                z_moving = False
            else:
                z_moving = True
            stage_z = stage_z_new
        except zmq.Again:
            # the stage publisher only publishes at ~10hz, so not having an update is common
            pass

        # Receive macro sharpness
        try:
            macro_sharpness_last = macro_sharpness
            macro_sharpness = float(macro_sharpness_sub.recv_string())
        except zmq.Again:
            # no sharpness value, which is unexpected
            print('No Macro Image Sharpness!')

        # receive next frame
        try:
            frame = recv_img(video_socket)
        except zmq.Again:
            print('Timed Out!')
            time.sleep(1)
            continue

        cvui.context(p.VIDEO_WINDOW_NAME)
        if cvui.mouse(cvui.IS_DOWN):
            (target_pos, feature_delta) = reset_target_selection()
            target_pos_slow = target_pos.copy()
            target_track_init = True
        feature_delta += get_feature_2delta()

        if stage_x is not None:
            stage_pos = np.array([stage_x, stage_y, -stage_z], ndmin=2).T
            frame = tank_corners_clip(frame, stage_pos, stage_zero_offset,
                                      world_points, intrinsic)

        # This is where the tracking happens. tracker_type is controlled by a button on the interface
        # Adding a new tracker is as simple as adding another case to this if/else and adding a button in
        # the UI to switch into the new tracking mode
        if tracker_type == 'CANNY':
            canny_tracker_state.target_pos = target_pos
            (target_pos_obs, roi, canny_tracker_state) = update_canny_tracker(
                frame, canny_tracker_state)

        elif tracker_type == 'KCF':
            cvui.context(p.VIDEO_WINDOW_NAME)
            (target_pos_obs, roi,
             kcf_tracker_state) = update_kcf_tracker(frame, kcf_tracker_state)

        elif tracker_type == 'THRESHOLD':
            cvui.context(p.VIDEO_WINDOW_NAME)
            threshold_tracker_state.target_pos = target_pos
            (target_pos_obs, roi,
             threshold_tracker_state) = update_threshold_tracker(
                 frame, threshold_tracker_state)

        else:
            print('Invalid tracker mode: %s' % tracker_type)
            roi = None
            keep_running = False

        # This roi_msg takes an roi that may have been identified around the animal and sends it over zmq
        # This enables any cameras trying to autofocus to know which roi to keep in focus
        # if no autofocusing is happening, then these messages don't do anything
        if roi is not None:
            roi_msg = m.SetFocusROI(roi[0], roi[1])
        else:
            roi_msg = m.SetFocusROI(None, None)
        roi_socket.send_pyobj(
            roi_msg
        )  # tell the LL camera (or anything else I guess) which ROI to focus

        (target_track_ok, target_pos,
         target_pos_slow) = filter_target_position(target_pos, target_pos_slow,
                                                   target_pos_obs)

        # This is probably where we want to use the other camera to estimate depth

        # Now we have a giant state machine. We need to structure the code this way, because we want 2D tracking and
        # user interaction to update even when we are waiting on some slower action to occur related to object depth
        # and focusing. The state machine provides a mechanism to handle these slower processes while not impeding the
        # rest of the tracking process.

        # STAGE_MODE = {MANUAL | AUTO | PAUSED}
        #   -- In MANUAL mode, dx,dy,dz all set by keyboard input.
        #   -- In AUTO mode, dx and dy are set by tracker. dz is set by autofocus if FOCUS_MODE is set to AUTO
        #   -- In PAUSED mode, dx = dy = dz = 0. The tracker will keep tracking, but the stage won't move
        #
        # FOCUS_MODE = {MANUAL | SHARPNESS | DEPTH}
        #   -- In MANUAL mode, dz is set by keyboard input
        #   -- In SHARPNESS mode, dz is set by trying to maximize sharpness, although the final position can be tweaked
        #      by user input. SHARPNESS mode does nothing if STAGE_MODE is MANUAL
        #   -- In DEPTH mode, dz is set by a target depth measurement that is estimated from a second camera
        #      (stereo or perpendicular)

        # Determine dx and dy
        if STAGE_MODE == 'PAUSED':  # -> Stage Control
            track_socket.send_string('0 0 0')
            dx = 0
            dy = 0
            dz = 0
        elif STAGE_MODE == 'MANUAL':  # TODO: Probably tune this better
            (dx, dy) = get_feature_2delta()
            dx = 10 * dx
            dy = 10 * dy
            print('FULL_MANUAL %f, %f' % (dx, dy))
            dz = manual_focus_update()
        elif STAGE_MODE == 'AUTO':
            # The tracker makes a determination in pixel space, then we may decide to filter it. We then determine the
            # dx and dy based on the distance between the feature of interest and the macro lens center
            # how much do we need to move in pixel-space?
            # Note dx and dy are 0 if there are no target tracks

            if stage_z is None:
                print('Waiting on stage node')
                dx = 0
                dy = 0
                dz = 0
            else:
                if target_pos_obs is not None:
                    if target_track_ok:
                        (dx, dy) = calculate_movement_offsets(
                            frame, target_pos, target_pos_slow, feature_delta)
                    else:
                        dx = 0
                        dy = 0
                else:
                    dx = 0
                    dy = 0
                    target_track_ok = False

                # When STAGE_MODE == 'AUTO', we need to determine how to handle the focusing
                if FOCUS_MODE == 'MANUAL':
                    dz = manual_focus_update()
                elif FOCUS_MODE == 'SHARPNESS':

                    sharpness_focus_state.stage_z = stage_z
                    sharpness_focus_state.macro_sharpness = macro_sharpness
                    sharpness_focus_state.z_moving = z_moving
                    dz, sharpness_focus_state = sharpness_focus(
                        sharpness_focus_state, af_pub, focus_state_sub,
                        video_socket, focus_sub)
                elif FOCUS_MODE == 'DEPTH':
                    # this is the mode when we have a second camera to estimate depth
                    dz = 0
                else:
                    # invalid focus mode
                    print('Invalid focus mode %s' % FOCUS_MODE)
                    sys.exit(1)
        else:
            print('Unknown stage mode: %s' % STAGE_MODE)
            dx = 0
            dy = 0
            dz = 0

        print(dx, dy, dz)
        track_socket.send_string(
            '%f %f %f' %
            (dx, dy, dz))  # 'wasteful', but easier debugging for now

        frame = cv2.resize(
            frame, (p.IMG_DISP_WIDTH_SPOTTER, p.IMG_DISP_HEIGHT_SPOTTER))

        # draw dots on frame centers
        cv2.circle(frame, (int(
            p.IMG_DISP_WIDTH_SPOTTER / 2), int(p.IMG_DISP_HEIGHT_SPOTTER / 2)),
                   5, (0, 0, 255), -1)  # center of frame
        cv2.circle(frame, (p.MACRO_LL_CENTER[0], p.MACRO_LL_CENTER[1]), 5,
                   (255, 0, 255), -1)  # center of macro frame frame

        cvui.update(p.VIDEO_WINDOW_NAME)
        cv2.imshow(p.VIDEO_WINDOW_NAME, frame)
        if save_video:
            vout.write(frame)

        cvui.context(p.CTRL_WINDOW_NAME)
        STAGE_MODE, FOCUS_MODE, tracker_type, macro_resweep, ll_resweep = draw_settings(
            ctrl_frame, control_panes, canny_tracker_state,
            threshold_tracker_state, STAGE_MODE, FOCUS_MODE, tracker_type)

        if macro_resweep:
            p.BYPASS_LL_ESTIMATE = True
            sharpness_focus_state.mode = 'FINE_UNINITIALIZED'

        if ll_resweep:
            if stage_z is not None:
                print('Liquid Lens Refocus!')
                dist_to_tank = (300 - stage_z) + p.STAGE_TANK_OFFSET
                ll_max = 2953.5 * dist_to_tank**-0.729
                ll_min = 2953.5 * (dist_to_tank + p.TANK_DEPTH_MM)**-0.729
                print('llmin, llmax: (%f, %f)' % (ll_min, ll_max))
                af_pub.send_pyobj(m.AutofocusMessage(ll_min, ll_max, 1))
            else:
                print('Cannot refocus liquid lens until stage node is running')

        cvui.update(p.CTRL_WINDOW_NAME)
        cv2.imshow(p.CTRL_WINDOW_NAME, ctrl_frame)
        cv2.waitKey(1)

    if save_video:
        vout.release()
コード例 #8
0
def main():
    frame = np.zeros((300, 600, 3), np.uint8)

    # Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME).
    cvui.init(WINDOW_NAME)

    # Rectangle to be rendered according to mouse interactions.
    rectangle = cvui.Rect(0, 0, 0, 0)

    while (True):
        # Fill the frame with a nice color
        frame[:] = (49, 52, 49)

        # Show the coordinates of the mouse pointer on the screen
        cvui.text(
            frame, 10, 30,
            'Click (any) mouse button and drag the pointer around to select an area.'
        )
        cvui.printf(frame, 10, 50, 'Mouse pointer is at (%d,%d)',
                    cvui.mouse().x,
                    cvui.mouse().y)

        # The function "bool cvui.mouse(int query)" allows you to query the mouse for events.
        # E.g. cvui.mouse(cvui.DOWN)
        #
        # Available queries:
        #	- cvui.DOWN: any mouse button was pressed. cvui.mouse() returns true for a single frame only.
        #	- cvui.UP: any mouse button was released. cvui.mouse() returns true for a single frame only.
        #	- cvui.CLICK: any mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for a single frame only.
        #	- cvui.IS_DOWN: any mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed.

        # Did any mouse button go down?
        if cvui.mouse(cvui.DOWN):
            # Position the rectangle at the mouse pointer.
            rectangle.x = cvui.mouse().x
            rectangle.y = cvui.mouse().y

        # Is any mouse button down (pressed)?
        if cvui.mouse(cvui.IS_DOWN):
            # Adjust rectangle dimensions according to mouse pointer
            rectangle.width = cvui.mouse().x - rectangle.x
            rectangle.height = cvui.mouse().y - rectangle.y

            # Show the rectangle coordinates and size
            cvui.printf(frame, rectangle.x + 5, rectangle.y + 5, 0.3, 0xff0000,
                        '(%d,%d)', rectangle.x, rectangle.y)
            cvui.printf(frame,
                        cvui.mouse().x + 5,
                        cvui.mouse().y + 5, 0.3, 0xff0000, 'w:%d, h:%d',
                        rectangle.width, rectangle.height)

        # Did any mouse button go up?
        if cvui.mouse(cvui.UP):
            # Hide the rectangle
            rectangle.x = 0
            rectangle.y = 0
            rectangle.width = 0
            rectangle.height = 0

        # Was the mouse clicked (any button went down then up)?
        if cvui.mouse(cvui.CLICK):
            cvui.text(frame, 10, 70, 'Mouse was clicked!')

        # Render the rectangle
        cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width,
                  rectangle.height, 0xff0000)

        # This function must be called *AFTER* all UI components. It does
        # all the behind the scenes magic to handle mouse clicks, etc, then
        # shows the frame in a window like cv2.imshow() does.
        cvui.imshow(WINDOW_NAME, frame)

        # Check if ESC key was pressed
        if cv2.waitKey(20) == 27:
            break