Exemplo n.º 1
0
def main():
    args = get_args()

    # Check for calib_images folder
    if not os.path.exists('calib_images'):
        print 'Please create a directory "calib_images"'
        return

    # Setup video display
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        video_disp.refresh(frame)

        cur_frame_num = video.get_cur_frame_num()

        # Service the key events
        # if s is pressed, save image
        # if b is pressed, go back 1s
        # if n is pressed, go ahead 1s
        if video_disp.key_pressed('s'):
            video_file = os.path.basename(args['video']).lower()
            img_file_name = 'calib_images/{}_{}.png'.format(
                video_file.strip('.mp4'), cur_frame_num)
            if cv2.imwrite(img_file_name, frame):
                print 'Saved', img_file_name
        elif video_disp.key_pressed('n'):
            seek_callback(
                min((((cur_frame_num + 60) * 100) // num_frames), num_frames))
        elif video_disp.key_pressed('b'):
            seek_callback(max((((cur_frame_num - 60) * 100) // num_frames), 0))

        # Add quitting event
        if video_disp.can_quit():
            break
Exemplo n.º 2
0
def main():
    args = get_args()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    thresh_video_disp = Display({'name': 'Tresholded_Video'})

    # Setup controls
    setup_trackbars(controls_window_name)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    # To communicate with seek callback
    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_thresh = cv2.inRange(
            frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max))

        # Refresh thresholded video display
        thresh_video_disp.refresh(frame_thresh)

        # Add quitting event
        if orig_video_disp.can_quit() or thresh_video_disp.can_quit():
            break

    # On quit, save the thresholds
    save_config = SaveConfig('new_thresholds', 'thresholds')
    save_config.save(h_min=h_min, h_max=h_max, s_min=s_min,
                     s_max=s_max, v_min=v_min, v_max=v_max)
def scheme_1(file_name):
    video = Video(file_name)
    display = Display()

    # Emulate a do-while loop
    # with "while True" and breaking
    # if condition fails after executing
    while True:
        frame = video.next_frame()
        if video.end_reached():
            break

        # Do some operation
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Refresh display with new image
        display.refresh(gray)
        if display.can_quit():
            break
Exemplo n.º 4
0
def main():
    args = get_args()

    # Read in configuration
    load_config = LoadConfig('config/thresholds.npz', 'thresholds')
    thresholds = load_config.load()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    processed_video_disp = Display({'name': 'Processed_Video'})

    # Setup controls
    setup_trackbars(controls_window_name, thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max, erode_size, dilate_size = get_params(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_thresh = cv2.inRange(frame_hsv, (h_min, s_min, v_min),
                                   (h_max, s_max, v_max))

        # Apply erosion
        # Create a kernel first and then apply kernel
        erode_kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (erode_size + 1, erode_size + 1))
        frame_erode = cv2.erode(frame_thresh, erode_kernel)

        # Apply dilate
        # Create a kernel first and then apply kernel
        dilate_kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (dilate_size + 1, dilate_size + 1))
        frame_dilate = cv2.dilate(frame_erode, dilate_kernel)

        # Refresh thresholded video display
        processed_video_disp.refresh(frame_dilate)

        # Add quitting event
        if orig_video_disp.can_quit() or processed_video_disp.can_quit():
            break

    # On quit, save the params
    save_config = SaveConfig('new_erode_dilate', 'erode_dilate')
    save_config.save(dilate_size=dilate_size, erode_size=erode_size)
def main_worker(id, video_file, camera_model, K, D, R, T, measurements, quit_event):
    # Setup video displays
    video_disp = Display({'name': 'Camera_{}'.format(id)})

    # Get input video
    video = Video(video_file)

    # Setup the undistortion stuff
    if camera_model == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_K = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0.35)[0]

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.initUndistortRectifyMap(
            K, D, None, new_K, new_img_size, cv2.CV_16SC2)

    elif camera_model == 'F':
        # Harcoded image size
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K, D, img_size, np.eye(3), balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            K, D, np.eye(3), new_K, new_img_size_1, cv2.CV_16SC2)

    # Set up foreground and background separation
    fgbg = cv2.createBackgroundSubtractorMOG2()

    # Averaging kernel that will be used in opening
    kernel = np.ones((6, 6), np.uint8)

    # Code commented out because not using
    # confidence currently, but could be 
    # used again with changes later
    # # Will be used for histogram comparison
    # # (Confidence measure)
    # ball_image_file = 'ball_image.jpg'
    # ball_image = cv2.imread(ball_image_file)


    # 2D ball detection and 3D ball tracking setup
    ball_position_frame = None
    ball_wc = [0, 0, 0]

    while not video.end_reached() and not quit_event.value:
        # Get each frame
        frame = video.next_frame()

        # Undistort the current frame
        img_undistorted = cv2.remap(
            frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        # Convert to HSV and threshold range of ball
        img_hsv = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(img_hsv, np.array(
            (15, 190, 200)), np.array((25, 255, 255)))

        # Foreground and background separation mask
        fgmask = fgbg.apply(img_undistorted)
        mask_color_bgs = cv2.bitwise_and(mask, mask, mask=fgmask)
        masked_and_opened = cv2.morphologyEx(
            mask_color_bgs, cv2.MORPH_OPEN, kernel)

        # Hough transform to detect ball (circle)
        circles = cv2.HoughCircles(masked_and_opened, cv2.HOUGH_GRADIENT, dp=3,
                                   minDist=2500, param1=300, param2=5, minRadius=3, maxRadius=30)
        if circles is not None:
            # Make indexing easier and
            # convert everything to int
            circles = circles[0, :]
            circles = np.round(circles).astype("int")

            # Take only the first
            # (and hopefully largest)
            # circle detected
            x, y, r = circles[0]
            ball_position_frame = [x - r, y - r, 2 * r, 2 * r]
        else:
            ball_position_frame = None

        # Determine the correct ball radius
        mask_ball_radius = cv2.bitwise_and(fgmask, fgmask, mask=cv2.inRange(
            img_hsv, np.array((10, 150, 180)), np.array((40, 255, 255))))
        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            ball_crop_temp = mask_ball_radius[(
                y1 + h1 // 2 - 50):(y1 + h1 // 2 + 50), (x1 + w1 // 2 - 50):(x1 + w1 // 2 + 50)]   
            height, width = ball_crop_temp.shape
            if height and width:
                # Successfully cropped image
                ball_crop = ball_crop_temp
                cnts = cv2.findContours(
                    ball_crop.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    ellipse = cv2.fitEllipse(c)
                    width = min(ellipse[1])
                    ball_position_frame = [
                        ball_position_frame[0], ball_position_frame[1], 2 * width, 2 * width]

                # Code commented out because not using
                # confidence currently, but could be 
                # used again with changes later
                # # Calculate confidence
                # confidence = histogram_comparison(ball_image, img_undistorted, ball_position_frame)
                # print confidence

        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            pixels_per_mm = (
                K[0, 0] + K[1, 1]) / 2 / DEFAULT_FOCAL_LENGTH
            z = PING_PONG_DIAMETER * \
                DEFAULT_FOCAL_LENGTH / (w1 / pixels_per_mm)
            x = ((x1 - K[0, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH
            y = ((y1 - K[1, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH

            ball_cc = np.array([x, y, z]) / 1000
            ball_wc = np.dot(R.T, ball_cc - T.ravel())

        # Push measurements to be processed/visualized
        measurement = {
            'id': id,
            'frame_num': video.get_cur_frame_num(),
            'ball_ic': ball_position_frame,
            'ball_wc': ball_wc
        }
        measurements.put(measurement)

        # Update video display
        video_disp.refresh(img_undistorted)

        # Add quitting event
        if video_disp.can_quit():
            break

    # Setting this will signal
    # the other parallel process
    # to exit too.
    quit_event.value = 1
Exemplo n.º 6
0
def main():
    args = get_args()

    # Read in intrinsic calibration
    load_config = LoadConfig(
        'config/intrinsic_calib_{}.npz'.format(args['model'].lower()), 'calib')
    calib = load_config.load()

    # Read in extrinsic calibration
    load_config_e = LoadConfig('config/extrinsic_calib_p_camera_1.npz',
                               'extrinsics')
    extrinsics = load_config_e.load()

    # Setup video display
    video_disp = Display({'name': 'Video'})

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame; to see
    # if video framework works
    frame = video.next_frame()

    shared_var = Array('d', [0, 0, 0])
    visu = Process(target=visualize_table, args=(shared_var, ))
    visu.start()

    # Setup the undistortion stuff

    if args['model'].upper() == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix(
            calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35)

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size = (
            int(img_size[0] +
                (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
            int(img_size[1] +
                (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'],
                                                 calib['dist_coeffs'], None,
                                                 new_calib_matrix,
                                                 new_img_size, cv2.CV_16SC2)

    elif args['model'].upper() == 'F':
        # Harcoded image size
        img_size = (1920, 1080)
        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            calib['camera_matrix'],
            calib['dist_coeffs'],
            img_size,
            np.eye(3),
            balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size = (
            int(img_size[0] +
                (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
            int(img_size[1] +
                (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            calib['camera_matrix'], calib['dist_coeffs'], np.eye(3),
            new_calib_matrix, new_img_size, cv2.CV_16SC2)

    # STUFF
    corr_threshold = -1
    radius_change_threshold = 5
    ball_image_file = 'ball_image.jpg'
    # will be used for histogram comparison
    ball_image = cv2.imread(ball_image_file)

    fgbg2 = cv2.createBackgroundSubtractorMOG2()

    kernel = np.ones((6, 6), np.uint8)
    ball_position_frame2 = None
    prev_frame2 = None

    # Get the rotation matrix
    R = cv2.Rodrigues(extrinsics['rvec'])[0]

    while not video.end_reached():

        frame = video.next_frame()
        img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                    cv2.BORDER_CONSTANT)

        # STUFF
        img_hsv = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2HSV)
        mask2 = cv2.inRange(img_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask2 = fgbg2.apply(img_undistorted)
        mask2_color_bgs = cv2.bitwise_and(mask2, mask2, mask=fgmask2)
        frame2_hsv_bgs = cv2.bitwise_and(img_hsv,
                                         img_hsv,
                                         mask=mask2_color_bgs)

        frame_hsv = img_hsv
        frame_gray = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2GRAY)
        mask = cv2.inRange(frame_hsv, np.array((10, 150, 150)),
                           np.array((40, 255, 255)))
        open_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        frame_thresholded_opened_gray = cv2.bitwise_and(frame_gray,
                                                        frame_gray,
                                                        mask=open_mask)
        frame_thresholded_opened_gray_smoothed = cv2.GaussianBlur(
            frame_thresholded_opened_gray, (11, 11), 0)
        # opening
        a = cv2.inRange(frame_thresholded_opened_gray_smoothed, 10, 256)
        b = cv2.morphologyEx(mask2_color_bgs, cv2.MORPH_OPEN, kernel)
        circles = cv2.HoughCircles(b,
                                   cv2.HOUGH_GRADIENT,
                                   dp=3,
                                   minDist=2500,
                                   param1=300,
                                   param2=5,
                                   minRadius=3,
                                   maxRadius=30)
        if circles is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles = np.round(circles[0, :]).astype("int")
            x, y, r = circles[0]
            ball_position_frame2 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame2 = None

        frame2 = img_undistorted

        mask2_ball_radius = cv2.bitwise_and(fgmask2,
                                            fgmask2,
                                            mask=cv2.inRange(
                                                img_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))
        if ball_position_frame2 != None:
            x2, y2, w2, h2 = ball_position_frame2
            ball_crop_temp = mask2_ball_radius[(y2 + h2 / 2 -
                                                30):(y2 + h2 / 2 + 30),
                                               (x2 + w2 / 2 -
                                                30):(x2 + w2 / 2 + 30)]
            ball_crop_color = frame2[(y2 + h2 / 2 - 30):(y2 + h2 / 2 + 30),
                                     (x2 + w2 / 2 - 30):(x2 + w2 / 2 + 30)]
            height, width = ball_crop_temp.shape
        else:
            ball_crop_temp = []
            height = 0
            width = 0

        if height != 0 and width != 0:
            ball_crop = ball_crop_temp

        cnts = cv2.findContours(ball_crop.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        center = None

        if len(cnts) > 0 and ball_position_frame2:
            c = max(cnts, key=cv2.contourArea)
            rect = cv2.minAreaRect(c)
            width, height = rect[1]
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(ball_crop_color, [box], 0, (0, 0, 255), 2)
            ball_position_frame2 = [
                ball_position_frame2[0], ball_position_frame2[1],
                min(width, height),
                min(width, height)
            ]

        prev_frame2 = frame2

        # print ball_position_frame2
        if ball_position_frame2:
            x2, y2, w2, h2 = ball_position_frame2
            # x = (x2 - 960) / PIXELS_PER_MM
            # y = (y2 - 540) / PIXELS_PER_MM
            pixels_per_mm = (new_calib_matrix[0, 0] +
                             new_calib_matrix[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w2 / pixels_per_mm)
            x = ((x2 - new_calib_matrix[0, 2]) /
                 pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y2 - new_calib_matrix[1, 2]) /
                 pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc = np.array([x, y, z]) / 1000
            #ball_wc = np.dot(R.T, extrinsics['tvec'].ravel() - ball_cc)
            ball_wc = np.dot(R.T, ball_cc - extrinsics['tvec'].ravel())

            print 'Ball IC', np.array([x2, y2]), 'Dia', w2
            print 'Ball CC', ball_cc
            print 'Ball WC', ball_wc

            shared_var[0] = ball_wc[0]
            shared_var[1] = ball_wc[1]
            shared_var[2] = ball_wc[2]

        # Update GUI with new image

        video_disp.refresh(frame2)

        #print "Pixels", ball_position_frame2

        # Add quitting event
        if video_disp.can_quit():
            break

    global main_process_end_reached
    main_process_end_reached = True
    visu.join()
def main():
    args = get_args()

    # Read in intrinsic calibration
    load_config_1 = LoadConfig(
        'config/intrinsic_calib_{}_camera_1.npz'.format(args['model'].lower()),
        'calib_camera_1')
    intrinsic_1 = load_config_1.load()
    K_1 = intrinsic_1['camera_matrix']
    D_1 = intrinsic_1['dist_coeffs']
    load_config_2 = LoadConfig(
        'config/intrinsic_calib_{}_camera_2.npz'.format(args['model'].lower()),
        'calib_camera_2')
    intrinsic_2 = load_config_2.load()
    K_2 = intrinsic_2['camera_matrix']
    D_2 = intrinsic_2['dist_coeffs']

    # Read in extrinsic calibration
    load_config_e_1 = LoadConfig(
        'config/extrinsic_calib_{}_camera_1.npz'.format(args['model'].lower()),
        'extrinsic_camera_1')
    extrinsic_1 = load_config_e_1.load()
    R_1 = cv2.Rodrigues(extrinsic_1['rvec'])[0]
    T_1 = extrinsic_1['tvec']
    load_config_e_2 = LoadConfig(
        'config/extrinsic_calib_{}_camera_2.npz'.format(args['model'].lower()),
        'extrinsic_camera_2')
    extrinsic_2 = load_config_e_2.load()
    R_2 = cv2.Rodrigues(extrinsic_2['rvec'])[0]
    T_2 = extrinsic_2['tvec']

    # Setup video displays
    video_disp_1 = Display({'name': 'Camera_1'})
    video_disp_2 = Display({'name': 'Camera_2'})

    # Get input video
    video_1 = Video(args['video_1'])
    video_2 = Video(args['video_2'])

    # Get the first frame; to see
    # if video framework works
    frame_1 = video_1.next_frame()
    frame_2 = video_2.next_frame()

    # Original code was to multiprocess, but
    # found MACOSX doesn't like forking processes
    # with GUIs. However, retaining Array from
    # multiproc for future.
    shared_var = Array('d', [0, 0, 0])
    end_reached = Value('b', False)
    # visu = Process(target=visualize_table, args=(shared_var,))
    # visu.start()
    visu = Thread(target=visualize_table, args=(end_reached, shared_var))
    visu.daemon = True
    visu.start()

    # Setup the undistortion stuff

    if args['model'].upper() == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_K_1 = cv2.getOptimalNewCameraMatrix(K_1, D_1, img_size, 0.35)[0]
        new_K_2 = cv2.getOptimalNewCameraMatrix(K_2, D_2, img_size, 0.35)[0]

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size_1 = (int(img_size[0] + (new_K_1[0, 2] - K_1[0, 2])),
                          int(img_size[1] + (new_K_1[1, 2] - K_1[1, 2])))
        new_img_size_2 = (int(img_size[0] + (new_K_2[0, 2] - K_2[0, 2])),
                          int(img_size[1] + (new_K_2[1, 2] - K_2[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1_1, map2_1 = cv2.initUndistortRectifyMap(K_1, D_1, None, new_K_1,
                                                     new_img_size_1,
                                                     cv2.CV_16SC2)
        map1_2, map2_2 = cv2.initUndistortRectifyMap(K_2, D_2, None, new_K_2,
                                                     new_img_size_2,
                                                     cv2.CV_16SC2)

    elif args['model'].upper() == 'F':
        # Harcoded image size
        img_size = (1920, 1080)
        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_K_1 = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K_1, D_1, img_size, np.eye(3), balance=1)
        new_K_2 = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K_2, D_2, img_size, np.eye(3), balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size_1 = (int(img_size[0] + (new_K_1[0, 2] - K_1[0, 2])),
                          int(img_size[1] + (new_K_1[1, 2] - K_1[1, 2])))
        new_img_size_2 = (int(img_size[0] + (new_K_2[0, 2] - K_2[0, 2])),
                          int(img_size[1] + (new_K_2[1, 2] - K_2[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1_1, map2_1 = cv2.fisheye.initUndistortRectifyMap(
            K_1, D_1, np.eye(3), new_K_1, new_img_size_1, cv2.CV_16SC2)
        map1_2, map2_2 = cv2.fisheye.initUndistortRectifyMap(
            K_2, D_2, np.eye(3), new_K_2, new_img_size_2, cv2.CV_16SC2)

    # STUFF
    corr_threshold = -1
    radius_change_threshold = 5
    ball_image_file = 'ball_image.jpg'
    # will be used for histogram comparison
    ball_image = cv2.imread(ball_image_file)

    fgbg1 = cv2.createBackgroundSubtractorMOG2()
    fgbg2 = cv2.createBackgroundSubtractorMOG2()

    kernel = np.ones((6, 6), np.uint8)
    ball_position_frame1 = None
    ball_position_frame2 = None
    prev_frame1 = None
    prev_frame2 = None
    ball_wc = [0, 0, 0]

    while not video_1.end_reached() and not video_2.end_reached():

        frame_1 = video_1.next_frame()
        frame_2 = video_2.next_frame()

        img_undistorted_1 = cv2.remap(frame_1, map1_1, map2_1,
                                      cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
        img_undistorted_2 = cv2.remap(frame_2, map1_2, map2_2,
                                      cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        # STUFF1
        frame_1_hsv = cv2.cvtColor(img_undistorted_1, cv2.COLOR_BGR2HSV)
        mask1 = cv2.inRange(frame_1_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask1 = fgbg1.apply(img_undistorted_1)
        mask1_color_bgs = cv2.bitwise_and(mask1, mask1, mask=fgmask1)
        frame1_hsv_bgs = cv2.bitwise_and(frame_1_hsv,
                                         frame_1_hsv,
                                         mask=mask1_color_bgs)

        # opening
        b1 = cv2.morphologyEx(mask1_color_bgs, cv2.MORPH_OPEN, kernel)
        circles1 = cv2.HoughCircles(b1,
                                    cv2.HOUGH_GRADIENT,
                                    dp=3,
                                    minDist=2500,
                                    param1=300,
                                    param2=5,
                                    minRadius=3,
                                    maxRadius=30)
        if circles1 is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles1 = np.round(circles1[0, :]).astype("int")
            x, y, r = circles1[0]
            ball_position_frame1 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame1 = None

        mask_ball_radius1 = cv2.bitwise_and(fgmask1,
                                            fgmask1,
                                            mask=cv2.inRange(
                                                frame_1_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))

        # determine the correct radius
        if ball_position_frame1 != None:
            x1, y1, w1, h1 = ball_position_frame1
            ball_crop_temp1 = mask_ball_radius1[(y1 + h1 // 2 -
                                                 30):(y1 + h1 // 2 + 30),
                                                (x1 + w1 // 2 -
                                                 30):(x1 + w1 // 2 + 30)]
            height, width = ball_crop_temp1.shape
            if height != 0 and width != 0:
                # successfully cropped image
                ball_crop1 = ball_crop_temp1
                cnts = cv2.findContours(ball_crop1.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    rect = cv2.minAreaRect(c)
                    width, height = rect[1]
                    ball_position_frame1 = [
                        ball_position_frame1[0], ball_position_frame1[1],
                        min(width, height),
                        min(width, height)
                    ]

        prev_frame1 = img_undistorted_1

        if ball_position_frame1:
            x1, y1, w1, h1 = ball_position_frame1
            pixels_per_mm = (K_1[0, 0] + K_1[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w1 / pixels_per_mm)
            x = ((x1 - K_1[0, 2]) / pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y1 - K_1[1, 2]) / pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc1 = np.array([x, y, z]) / 1000
            ball_wc1 = np.dot(R_1.T, ball_cc1 - T_1.ravel())

        # STUFF2
        frame_2_hsv = cv2.cvtColor(img_undistorted_2, cv2.COLOR_BGR2HSV)
        mask2 = cv2.inRange(frame_2_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask2 = fgbg2.apply(img_undistorted_2)
        mask2_color_bgs = cv2.bitwise_and(mask2, mask2, mask=fgmask2)
        frame2_hsv_bgs = cv2.bitwise_and(frame_2_hsv,
                                         frame_2_hsv,
                                         mask=mask2_color_bgs)

        # opening
        b2 = cv2.morphologyEx(mask2_color_bgs, cv2.MORPH_OPEN, kernel)
        circles2 = cv2.HoughCircles(b2,
                                    cv2.HOUGH_GRADIENT,
                                    dp=3,
                                    minDist=2500,
                                    param1=300,
                                    param2=5,
                                    minRadius=3,
                                    maxRadius=30)
        if circles2 is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles2 = np.round(circles2[0, :]).astype("int")
            x, y, r = circles2[0]
            ball_position_frame2 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame2 = None

        mask_ball_radius2 = cv2.bitwise_and(fgmask2,
                                            fgmask2,
                                            mask=cv2.inRange(
                                                frame_2_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))

        # determine the correct radius
        if ball_position_frame2 != None:
            x2, y2, w2, h2 = ball_position_frame2
            ball_crop_temp2 = mask_ball_radius2[(y2 + h2 // 2 -
                                                 30):(y2 + h2 // 2 + 30),
                                                (x2 + w2 // 2 -
                                                 30):(x2 + w2 // 2 + 30)]
            height, width = ball_crop_temp2.shape
            if height != 0 and width != 0:
                # successfully cropped image
                ball_crop2 = ball_crop_temp2
                cnts = cv2.findContours(ball_crop2.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    rect = cv2.minAreaRect(c)
                    width, height = rect[1]
                    ball_position_frame2 = [
                        ball_position_frame2[0], ball_position_frame2[1],
                        min(width, height),
                        min(width, height)
                    ]

        prev_frame2 = img_undistorted_2

        if ball_position_frame2:
            x2, y2, w2, h2 = ball_position_frame2
            pixels_per_mm = (K_2[0, 0] + K_2[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w2 / pixels_per_mm)
            x = ((x2 - K_2[0, 2]) / pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y2 - K_2[1, 2]) / pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc2 = np.array([x, y, z]) / 1000
            ball_wc2 = np.dot(R_2.T, ball_cc2 - T_2.ravel())

            # Additional rotation for absolute coordinates
            R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
            ball_wc2 = np.dot(R, ball_wc2)

        # Combine STUFF1 and STUFF2
        # If positions from either available,
        # update as predicted from either.
        # But if both available, predict the average
        if ball_position_frame1 and ball_position_frame2:
            ball_wc = (ball_wc1 + ball_wc2) / 2
        elif ball_position_frame1:
            ball_wc = ball_wc1
        elif ball_position_frame2:
            ball_wc = ball_wc2

        # print 'Ball IC', np.array([x2, y2]), 'Dia', w2
        # print 'Ball CC', ball_cc2
        # print 'Ball WC', ball_wc2

        shared_var[0] = ball_wc[0]
        shared_var[1] = ball_wc[1]
        shared_var[2] = ball_wc[2]

        # Update GUI with new image
        video_disp_1.refresh(img_undistorted_1)
        video_disp_2.refresh(img_undistorted_2)

        # Add quitting event
        if video_disp_2.can_quit() or video_disp_1.can_quit():
            break

    end_reached.value = True

    visu.join()
Exemplo n.º 8
0
def main():
    args = get_args()

    # Read in configuration
    load_config = LoadConfig('new_calib_{}.npz'.format(args['model'].lower()),
                             'calib')
    calib = load_config.load()

    # Setup video displays
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Undistort according to pinhole model
        if args['model'].upper() == 'P':
            # Make sure distortion coeffecients
            # follow pinhole model
            if calib['dist_coeffs'].shape[1] != 5:
                print 'Input configuration probably not pinhole'
                return

            # Harcoded image size as
            # this is a test script
            img_size = (1920, 1080)

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region
            new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix(
                calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35)

            # Then calculate new image size according to the scaling
            # Unfortunately the Python API doesn't directly provide the
            # the new image size. They forgot?
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'],
                                                     calib['dist_coeffs'],
                                                     None, new_calib_matrix,
                                                     new_img_size,
                                                     cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Undistort according to fisheye model
        elif args['model'].upper() == 'F':
            # Make sure distortion coeffecients
            # follow fisheye model
            if calib['dist_coeffs'].shape[0] != 4:
                print 'Input configuration probably not fisheye'
                return

            # Harcoded image size as
            # this is a test script.
            # As already ranted before
            # someone messed with the image
            # size indexing and reversed it.
            img_size = (1920, 1080)

            # Also, the basic undistortion DOES NOT work
            # with the fisheye module
            # img_undistorted = cv2.fisheye.undistortImage(
            #   frame, calib['camera_matrix'], calib['dist_coeffs'])

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region. The alpha
            # parameter in pinhole model is equivalent to balance parameter here.
            new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
                calib['camera_matrix'],
                calib['dist_coeffs'],
                img_size,
                np.eye(3),
                balance=1)

            # Then calculate new image size according to the scaling
            # Well if they forgot this in pinhole Python API,
            # can't complain about Fisheye model. Note the reversed
            # indexing here too.
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.fisheye.initUndistortRectifyMap(
                calib['camera_matrix'], calib['dist_coeffs'], np.eye(3),
                new_calib_matrix, new_img_size, cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Update GUI with new image
        video_disp.refresh(img_undistorted)

        # Service the s key to save image
        if video_disp.key_pressed('s'):
            cur_frame_num = video.get_cur_frame_num()
            orig_img_file_name = 'image_for_markers_orig.png'
            undistorted_img_file_name = 'image_for_markers_undistorted.png'
            if cv2.imwrite(orig_img_file_name, frame):
                print 'Saved original {} at frame {}'.format(
                    orig_img_file_name, cur_frame_num)
            if cv2.imwrite(undistorted_img_file_name, img_undistorted):
                print 'Saved undistorted {} at frame {}'.format(
                    undistorted_img_file_name, cur_frame_num)

        # Add quitting event
        if video_disp.can_quit():
            break
Exemplo n.º 9
0
def main():
    args = get_args()

    # Read in configuration
    # load_config = LoadConfig('config/thresholds.npz', 'thresholds')
    # thresholds = load_config.load()

    # Setup video displays
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    # Deck for storing calib images
    calib_img_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)

    # Deck for storing charuco info
    charuco_corners_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)
    charuco_ids_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)

    skip_count = 0

    test_camera_matrix = np.array([
        [11096.77, 0, 540],
        [0, 11096.77, 960],
        [0, 0, 1]
    ])

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        corners, ids, rejected_img_points = cv2.aruco.detectMarkers(
            frame, dictionary)

        if ids is not None:
            img_markers = cv2.aruco.drawDetectedMarkers(frame, corners, ids)
            num_charuco, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
                corners, ids, frame, board, cameraMatrix=test_camera_matrix)

            if charuco_corners is not None:
                img_markers = cv2.aruco.drawDetectedCornersCharuco(
                    img_markers, charuco_corners, charuco_ids)

                if ids.shape[0] == MAX_ARUCO_IDS \
                        and num_charuco == MAX_CHARUCO_IDS \
                        and skip_count % 15 == 0:
                    calib_img_deck.append(frame)
                    charuco_corners_deck.append(charuco_corners)
                    charuco_ids_deck.append(charuco_ids)
        else:
            img_markers = frame

        cv2.putText(img_markers, '{}/{}'.format(len(calib_img_deck), MAX_NUM_IMAGES_FOR_CALIB),
                    (200, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 5)

        video_disp.refresh(img_markers)
        if video_disp.key_pressed('s'):
            pass

        skip_count = skip_count + 1

        # Add quitting event
        if video_disp.can_quit():
            break

    # On quit, save the params
    # save_config = SaveConfig('new_erode_dilate', 'erode_dilate')
    # save_config.save(dilate_size=dilate_size, erode_size=erode_size)
    img_size = calib_img_deck[0].shape[:2]
    # print charuco_ids_deck
    # error, camera_matrix, dist_coeffs = cv2.aruco.calibrateCameraCharuco(
    #    charuco_corners_deck, charuco_ids_deck, board, img_size, test_camera_matrix, None)[:3]

    objPoints, imgPoints = [board.chessboardCorners.reshape(
        1, -1, 3)] * len(charuco_corners_deck), charuco_corners_deck

    calibration_flags = cv2.fisheye.CALIB_USE_INTRINSIC_GUESS + \
        cv2.fisheye.CALIB_FIX_PRINCIPAL_POINT + cv2.fisheye.CALIB_FIX_SKEW
    error, camera_matrix, dist_coeffs = cv2.fisheye.calibrate(
        objPoints, imgPoints, img_size, test_camera_matrix, np.zeros(4), flags=calibration_flags)[:3]

    print error, camera_matrix

    save_config = SaveConfig('new_calib', 'calib')
    save_config.save(camera_matrix=camera_matrix, dist_coeffs=dist_coeffs)
Exemplo n.º 10
0
def main():
    args = get_args()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    thresh_video_disp = Display({'name': 'Thresholded_Video'})
    mean_shift_video_disp = Display({'name': 'Mean-Shift Tracking Video'})

    # Setup controls
    setup_trackbars(controls_window_name)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    # setup initial location of window
    top, length, left, width = 450, 36, 1000, 43  # simply hardcoded the values
    track_window = (left, top, width, length)

    # set up the ROI for tracking
    roi = frame[top:top + length, left:left + width]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
        controls_window_name)
    mask = cv2.inRange(hsv_roi, np.array(
        (h_min, s_min, v_min)), np.array((h_max, s_max, v_max)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # Setup the termination criteria, either 10 iteration or move by at least 1 pt
    term_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 1)

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        frame_thresh = cv2.inRange(
            frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max))

        # threshold image in hsv domain
        frame_hsv_threshold = cv2.bitwise_and(
            frame_hsv, frame_hsv, mask=frame_thresh)

        # Refresh thresholded video display
        thresh_video_disp.refresh(frame_hsv_threshold)

        # Find the backprojection of the histogram
        dst = cv2.calcBackProject([frame_hsv_threshold], [
                                  0], roi_hist, [0, 180], 1)

        # apply meanshift to get the new location
        ret, track_window = cv2.meanShift(dst, track_window, term_criteria)

        # Draw it on image
        x, y, w, h = track_window
        frame_mean_shift = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)

        # Refresh mean shift tracking video display
        mean_shift_video_disp.refresh(frame_mean_shift)