Beispiel #1
0
    def process_frame(frame, prev_mask, carpet_mask):
        if carpet_mask is None:
            carpet_mask = img_util.green_carpet_mask(frame,
                                                     carpet_lowerb,
                                                     carpet_upperb)
        mask = get_mask(frame, lowerb=ball_lowerb, upperb=ball_upperb,
                        carpet_mask=carpet_mask)
        if prev_mask is None:
            move_mask = np.zeros_like(mask)
        else:
            move_mask = cv2.bitwise_not(prev_mask, mask=mask)

        prev_mask = mask
        is_ball, move_mask = img_util.detect_ball(move_mask)
        return is_ball, move_mask, prev_mask, frame
Beispiel #2
0
def test_orange_mask(input_path, out_prefix=None):
    if out_prefix is None:
        out_prefix = os.path.join('output', os.path.basename(input_path))

    frames_orig = video_io.get_frames(input_path)
    carpet_mask = img_util.green_carpet_mask(frames_orig[30],
                                               lowerb_hls=(55, 0, 0),
                                               upperb_hls=(70, 255, 255))
    cv2.imwrite(out_prefix + '_carpet.jpg', carpet_mask)

    frames_orange = [
        video_util.preprocess(frame, carpet_mask=carpet_mask) for frame
        in frames_orig]

    write_video(out_prefix + '_orange_mask.avi', frames_orange)
    return
Beispiel #3
0
def program(out_dir):
    cap = cv2.VideoCapture(0)
    cap.set(cv2.cv.CV_CAP_PROP_FPS, 30.0)
    # fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920)
    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)
    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 864)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    perspective_matrix = get_perspective_transform(cap)
    #
    logger.info('matrix of perspective transform: {}'.format(
        perspective_matrix))

    img = read_from_capture(cap)
    cv2.imwrite(os.path.join(out_dir, 'img.jpg'), img)

    # Find carpet
    lowerb, upperb = gui.get_hls_range(img,
                                       winname='Choose HLS range for carpet')

    logger.info('Carpet HLS range: {} - {}'.format(lowerb, upperb))

    carpet_mask = img_util.green_carpet_mask(img, lowerb, upperb)
    cv2.imwrite(os.path.join(out_dir, 'carpet_mask.jpg'), carpet_mask)

    # Find target
    img = read_from_capture(cap)
    lowerb, upperb = gui.get_hls_range(img,
                                       winname='Choose HLS range for target')
    logger.info('Target HLS range: {} - {}'.format(lowerb, upperb))

    ret, target_contour = img_util.target_contour(img, lowerb, upperb,
                                                  carpet_mask)
    logging.debug('target contour: {}'.format(target_contour))
    if not ret:
        raise ValueError('Cannot find target')

    target_contour_plane = perspective_transform_contour(target_contour,
                                                         perspective_matrix)
    m = cv2.moments(target_contour_plane)
    mass = m['m00']
    # x,y coordinates on plane
    if mass > 0:
        target_coords = m['m10'] / mass, m['m01'] / mass
        logger.info(
            'target coordinates on plane: (x,y) = {}'.format(target_coords))
    else:
        raise ValueError('Cannot find target center')

    cv2.drawContours(img, contours=[target_contour], contourIdx=-1,
                     color=(255, 255, 255),
                     thickness=2)
    img_carpet = cv2.bitwise_and(img, img, mask=carpet_mask)
    cv2.addWeighted(img, 0.25, img_carpet, 0.75, 0, dst=img_carpet)

    cv2.imwrite(os.path.join(out_dir, 'carpet_target.jpg'), img_carpet)

    # Find ball
    img = read_from_capture(cap)
    ball_lowerb, ball_upperb = gui.get_hls_range(img,
                                                 winname='Choose HLS range for ball')

    logger.info('Ball HLS range: {} - {}'.format(ball_lowerb, ball_upperb))

    conn = robot.RobotConnector(winscp_path='C:/TRIKStudio/winscp/WinSCP.com')
    trik = robot.Robot(conn, angle_to_encoder=180 / np.pi)

    traj = None

    def get_coords():
        if traj is None:
            return False, (0, 0)
        else:
            idx, landing_point = tr.get_landing_point(traj)
            if landing_point is not None:
                return True, perspective_transform(landing_point,
                                                   perspective_matrix)
        return False, (0, 0)

    gun_params = {'x': 300, 'y': 50, 'z': 97, 'v': 300, 'g': 981,
                  'alpha_0': 15 * pi / 180, 'phi_0': 0.0, 'gun_length': 10}
    gun_layer = GunLayer(rotate_and_shoot=trik.rotate_and_shoot,
                         get_coords=get_coords, target=target_coords,
                         gun_params=gun_params)

    for i in xrange(10):
        out_prefix = os.path.join(out_dir, str(i))

        commands = tuple()
        stdout, stderr = trik.open_and_trikRun(*commands)
        logger.debug('trik: sent command = {}'.format(commands))
        logger.debug('trik: stdout = {}'.format(stdout))
        logger.debug('trik: stderr = {}'.format(stderr))

        logger.info('ready to shoot...')
        gun_layer.shoot_at_target()
        logger.info('ready to capture...')
        frames, mask_ball = video_util.extract_ball_from_capture(cap,
                                                                 max_frames_count=30,
                                                                 skip_count=0,
                                                                 carpet_mask=carpet_mask,
                                                                 get_mask=video_util.get_ball_mask,
                                                                 ball_lowerb=ball_lowerb,
                                                                 ball_upperb=ball_upperb,
                                                                 ball_size=3
                                                                 )

        logger.debug("Frames captured: {}".format(len(frames)))

        traj = tr.get_ball_trajectory(mask_ball)
        if not traj:
            logger.info('trajectory not found')
        else:
            logger.info('trajectory found')
            for x, y in traj:
                center = (int(round(x)), int(round(y)))
                for frame in frames:
                    cv2.circle(frame, center, 3, (0, 0, 255), thickness=-1)
            idx, landing_point = tr.get_landing_point(traj)
            if landing_point is not None:
                logger.info('landing point on plane: {}'.format(
                    perspective_transform(landing_point, perspective_matrix)))

                for frame in frames:
                    x, y = landing_point
                    center = (int(round(x)), int(round(y)))
                    cv2.circle(frame, center, radius=10, color=(255, 255, 255),
                               thickness=1)
        video_io.write_video(out_prefix + '_frames_ball.avi', frames)
        video_io.write_video(out_prefix + '_ball_mask.avi', mask_ball)

    cap.release()
Beispiel #4
0
def extract_ball_from_capture(cap, max_frames_count=-1, skip_count=0,
                              carpet_mask=None, get_mask=None,
                              carpet_lowerb=None,
                              carpet_upperb=None,
                              ball_lowerb=None,
                              ball_upperb=None,
                              ball_size=3):
    """

    Read frames from capture until we detect motion of the ball

    Return tuple of (original frames, ball mask frames)

    :param carpet_lowerb:
    :rtype : (list(np.ndarray), list(np.ndarray))
    """
    frames = []
    mask_frames = []
    # mog = cv2.BackgroundSubtractorMOG(history=5, nmixtures=4,backgroundRatio=0.7)
    mog = cv2.BackgroundSubtractorMOG()
    motion_started = False
    for _ in xrange(skip_count):
        if cap.isOpened():
            cap.read()

    prev_mask = None
    while cap.isOpened():
        if 0 < max_frames_count <= len(frames):
            logger.debug('max frames count reached')
            break

        ret, frame = cap.read()
        if ret:
            if get_mask is not None:
                if carpet_mask is None:
                    carpet_mask = img_util.green_carpet_mask(frame,
                                                             carpet_lowerb,
                                                             carpet_upperb)
                mask = get_mask(frame, lowerb=ball_lowerb, upperb=ball_upperb,
                                carpet_mask=carpet_mask)
                if prev_mask is None:
                    move_mask = np.zeros_like(mask)
                else:
                    move_mask = cv2.bitwise_not(prev_mask, mask=mask)
                prev_mask = mask
            else:
                logger.debug('get_mask = None')
                move_mask = mog.apply(frame)

            is_ball, move_mask = img_util.detect_ball(move_mask, ball_size)
            if is_ball:
                if not motion_started:
                    logger.debug('ball appeared')
                    motion_started = True
                frames.append(frame)
                mask_frames.append(move_mask)
            else:
                if motion_started:
                    logger.debug(
                        'ball disappeared. Frames count: {}'.format(
                            len(frames)))
                    break
                else:
                    continue
        else:
            logger.debug('Cannot read more frames')
            break
    return frames, mask_frames