Beispiel #1
0
def main():
    args = get_args()

    # Check for calib_images folder
    if not os.path.exists('calib_images'):
        print 'Please create a directory "calib_images"'
        return

    # Setup video display
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        video_disp.refresh(frame)

        cur_frame_num = video.get_cur_frame_num()

        # Service the key events
        # if s is pressed, save image
        # if b is pressed, go back 1s
        # if n is pressed, go ahead 1s
        if video_disp.key_pressed('s'):
            video_file = os.path.basename(args['video']).lower()
            img_file_name = 'calib_images/{}_{}.png'.format(
                video_file.strip('.mp4'), cur_frame_num)
            if cv2.imwrite(img_file_name, frame):
                print 'Saved', img_file_name
        elif video_disp.key_pressed('n'):
            seek_callback(
                min((((cur_frame_num + 60) * 100) // num_frames), num_frames))
        elif video_disp.key_pressed('b'):
            seek_callback(max((((cur_frame_num - 60) * 100) // num_frames), 0))

        # Add quitting event
        if video_disp.can_quit():
            break
def main_worker(id, video_file, camera_model, K, D, R, T, measurements, quit_event):
    # Setup video displays
    video_disp = Display({'name': 'Camera_{}'.format(id)})

    # Get input video
    video = Video(video_file)

    # Setup the undistortion stuff
    if camera_model == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_K = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0.35)[0]

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.initUndistortRectifyMap(
            K, D, None, new_K, new_img_size, cv2.CV_16SC2)

    elif camera_model == 'F':
        # Harcoded image size
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K, D, img_size, np.eye(3), balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            K, D, np.eye(3), new_K, new_img_size_1, cv2.CV_16SC2)

    # Set up foreground and background separation
    fgbg = cv2.createBackgroundSubtractorMOG2()

    # Averaging kernel that will be used in opening
    kernel = np.ones((6, 6), np.uint8)

    # Code commented out because not using
    # confidence currently, but could be 
    # used again with changes later
    # # Will be used for histogram comparison
    # # (Confidence measure)
    # ball_image_file = 'ball_image.jpg'
    # ball_image = cv2.imread(ball_image_file)


    # 2D ball detection and 3D ball tracking setup
    ball_position_frame = None
    ball_wc = [0, 0, 0]

    while not video.end_reached() and not quit_event.value:
        # Get each frame
        frame = video.next_frame()

        # Undistort the current frame
        img_undistorted = cv2.remap(
            frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        # Convert to HSV and threshold range of ball
        img_hsv = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(img_hsv, np.array(
            (15, 190, 200)), np.array((25, 255, 255)))

        # Foreground and background separation mask
        fgmask = fgbg.apply(img_undistorted)
        mask_color_bgs = cv2.bitwise_and(mask, mask, mask=fgmask)
        masked_and_opened = cv2.morphologyEx(
            mask_color_bgs, cv2.MORPH_OPEN, kernel)

        # Hough transform to detect ball (circle)
        circles = cv2.HoughCircles(masked_and_opened, cv2.HOUGH_GRADIENT, dp=3,
                                   minDist=2500, param1=300, param2=5, minRadius=3, maxRadius=30)
        if circles is not None:
            # Make indexing easier and
            # convert everything to int
            circles = circles[0, :]
            circles = np.round(circles).astype("int")

            # Take only the first
            # (and hopefully largest)
            # circle detected
            x, y, r = circles[0]
            ball_position_frame = [x - r, y - r, 2 * r, 2 * r]
        else:
            ball_position_frame = None

        # Determine the correct ball radius
        mask_ball_radius = cv2.bitwise_and(fgmask, fgmask, mask=cv2.inRange(
            img_hsv, np.array((10, 150, 180)), np.array((40, 255, 255))))
        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            ball_crop_temp = mask_ball_radius[(
                y1 + h1 // 2 - 50):(y1 + h1 // 2 + 50), (x1 + w1 // 2 - 50):(x1 + w1 // 2 + 50)]   
            height, width = ball_crop_temp.shape
            if height and width:
                # Successfully cropped image
                ball_crop = ball_crop_temp
                cnts = cv2.findContours(
                    ball_crop.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    ellipse = cv2.fitEllipse(c)
                    width = min(ellipse[1])
                    ball_position_frame = [
                        ball_position_frame[0], ball_position_frame[1], 2 * width, 2 * width]

                # Code commented out because not using
                # confidence currently, but could be 
                # used again with changes later
                # # Calculate confidence
                # confidence = histogram_comparison(ball_image, img_undistorted, ball_position_frame)
                # print confidence

        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            pixels_per_mm = (
                K[0, 0] + K[1, 1]) / 2 / DEFAULT_FOCAL_LENGTH
            z = PING_PONG_DIAMETER * \
                DEFAULT_FOCAL_LENGTH / (w1 / pixels_per_mm)
            x = ((x1 - K[0, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH
            y = ((y1 - K[1, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH

            ball_cc = np.array([x, y, z]) / 1000
            ball_wc = np.dot(R.T, ball_cc - T.ravel())

        # Push measurements to be processed/visualized
        measurement = {
            'id': id,
            'frame_num': video.get_cur_frame_num(),
            'ball_ic': ball_position_frame,
            'ball_wc': ball_wc
        }
        measurements.put(measurement)

        # Update video display
        video_disp.refresh(img_undistorted)

        # Add quitting event
        if video_disp.can_quit():
            break

    # Setting this will signal
    # the other parallel process
    # to exit too.
    quit_event.value = 1
Beispiel #3
0
def main():
    args = get_args()

    # Read in configuration
    load_config = LoadConfig('new_calib_{}.npz'.format(args['model'].lower()),
                             'calib')
    calib = load_config.load()

    # Setup video displays
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Undistort according to pinhole model
        if args['model'].upper() == 'P':
            # Make sure distortion coeffecients
            # follow pinhole model
            if calib['dist_coeffs'].shape[1] != 5:
                print 'Input configuration probably not pinhole'
                return

            # Harcoded image size as
            # this is a test script
            img_size = (1920, 1080)

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region
            new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix(
                calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35)

            # Then calculate new image size according to the scaling
            # Unfortunately the Python API doesn't directly provide the
            # the new image size. They forgot?
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'],
                                                     calib['dist_coeffs'],
                                                     None, new_calib_matrix,
                                                     new_img_size,
                                                     cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Undistort according to fisheye model
        elif args['model'].upper() == 'F':
            # Make sure distortion coeffecients
            # follow fisheye model
            if calib['dist_coeffs'].shape[0] != 4:
                print 'Input configuration probably not fisheye'
                return

            # Harcoded image size as
            # this is a test script.
            # As already ranted before
            # someone messed with the image
            # size indexing and reversed it.
            img_size = (1920, 1080)

            # Also, the basic undistortion DOES NOT work
            # with the fisheye module
            # img_undistorted = cv2.fisheye.undistortImage(
            #   frame, calib['camera_matrix'], calib['dist_coeffs'])

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region. The alpha
            # parameter in pinhole model is equivalent to balance parameter here.
            new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
                calib['camera_matrix'],
                calib['dist_coeffs'],
                img_size,
                np.eye(3),
                balance=1)

            # Then calculate new image size according to the scaling
            # Well if they forgot this in pinhole Python API,
            # can't complain about Fisheye model. Note the reversed
            # indexing here too.
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.fisheye.initUndistortRectifyMap(
                calib['camera_matrix'], calib['dist_coeffs'], np.eye(3),
                new_calib_matrix, new_img_size, cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Update GUI with new image
        video_disp.refresh(img_undistorted)

        # Service the s key to save image
        if video_disp.key_pressed('s'):
            cur_frame_num = video.get_cur_frame_num()
            orig_img_file_name = 'image_for_markers_orig.png'
            undistorted_img_file_name = 'image_for_markers_undistorted.png'
            if cv2.imwrite(orig_img_file_name, frame):
                print 'Saved original {} at frame {}'.format(
                    orig_img_file_name, cur_frame_num)
            if cv2.imwrite(undistorted_img_file_name, img_undistorted):
                print 'Saved undistorted {} at frame {}'.format(
                    undistorted_img_file_name, cur_frame_num)

        # Add quitting event
        if video_disp.can_quit():
            break