示例#1
0
def get_mask(i_subject, i_seq, i_cam, i_frame):
    chroma_frame = improc.imread_jpeg(
        f'{paths.DATA_ROOT}/3dhp/S{i_subject}/Seq{i_seq}/FGmasks/img_{i_cam}_{i_frame:06d}.jpg'
    )
    person_box = get_box(i_subject, i_seq, i_cam, i_frame)

    is_fg = chroma_frame[..., 0] > 100
    n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
        is_fg.astype(np.uint8), 4, cv2.CV_32S)
    component_boxes = stats[:, :4]
    ious = [
        boxlib.iou(component_box, person_box)
        for component_box in component_boxes
    ]
    ious[0] = 0
    person_label = np.argmax(ious)
    mask = (labels == person_label).astype(np.uint8)

    # Remove foreground pixels that are far from the person box
    intbox = boxlib.intersect(boxlib.full_box((2048, 2048)),
                              boxlib.expand(person_box, 1.3)).astype(int)
    mask[:intbox[1]] = 0
    mask[:, :intbox[0]] = 0
    mask[:, intbox[0] + intbox[2]:] = 0
    mask[intbox[1] + intbox[3]:] = 0
    encoded_mask = improc.encode_mask(mask)
    return encoded_mask
示例#2
0
def make_efficient_example(ex,
                           further_expansion_factor=1,
                           further_scale_up=1,
                           dir_suffix=''):
    """Make example by storing the image in a cropped and resized version for efficient loading"""

    # Determine which area we will need from the image
    # This is a bit larger than the tight crop because of the geometric augmentations
    max_rotate = np.pi / 6
    padding_factor = 1 / 0.85
    scale_up_factor = 1 / 0.85 * further_scale_up
    scale_down_factor = 1 / 0.85
    shift_factor = 1.1
    base_dst_side = 256

    box_center = boxlib.center(ex.bbox)
    s, c = np.sin(max_rotate), np.cos(max_rotate)
    w, h = ex.bbox[2:]
    rot_bbox_side = max(c * w + s * h, c * h + s * w)
    rot_bbox = boxlib.box_around(box_center, rot_bbox_side)

    scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor,
                       1)
    expansion_factor = (padding_factor * shift_factor * scale_down_factor *
                        further_expansion_factor)
    expanded_bbox = boxlib.expand(rot_bbox, expansion_factor)
    expanded_bbox = boxlib.intersect(expanded_bbox,
                                     np.array([0, 0, 1000, 1000]))

    new_camera = copy.deepcopy(ex.camera)
    new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2]
    new_camera.scale_output(scale_factor)
    new_camera.undistort()

    new_im_relpath = ex.image_path.replace('h36m',
                                           f'h36m_downscaled{dir_suffix}')
    new_im_path = f'{paths.DATA_ROOT}/{new_im_relpath}'
    if not (util.is_file_newer(new_im_path, "2019-11-14T23:33:14")
            and improc.is_image_readable(new_im_path)):
        im = improc.imread_jpeg(ex.image_path)
        dst_shape = improc.rounded_int_tuple(scale_factor *
                                             expanded_bbox[[3, 2]])
        new_im = cameralib.reproject_image(im, ex.camera, new_camera,
                                           dst_shape)
        util.ensure_path_exists(new_im_path)
        imageio.imwrite(new_im_path, new_im)

    new_bbox_topleft = cameralib.reproject_image_points(
        ex.bbox[:2], ex.camera, new_camera)
    new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor])
    ex = ps3d.Pose3DExample(new_im_relpath,
                            ex.world_coords,
                            new_bbox,
                            new_camera,
                            activity_name=ex.activity_name)
    return ex
示例#3
0
def make_efficient_example(ex, root_muco, i_person):
    image_relpath = ex.image_path
    max_rotate = np.pi / 6
    padding_factor = 1 / 0.85
    scale_up_factor = 1 / 0.85
    scale_down_factor = 1 / 0.85
    shift_factor = 1.2
    base_dst_side = 256
    box_center = boxlib.center(ex.bbox)
    s = np.sin(max_rotate)
    c = np.cos(max_rotate)
    rot_bbox_size = (np.array([[c, s], [s, c]]) @ ex.bbox[2:, np.newaxis])[:, 0]
    side = np.max(rot_bbox_size)
    rot_bbox_size = np.array([side, side])
    rot_bbox = boxlib.box_around(box_center, rot_bbox_size)

    scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor, 1)
    expansion_factor = padding_factor * shift_factor * scale_down_factor
    expanded_bbox = boxlib.expand(rot_bbox, expansion_factor)
    expanded_bbox = boxlib.intersect(expanded_bbox, boxlib.full_box([2048, 2048]))

    new_camera = ex.camera.copy()
    new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2]
    new_camera.scale_output(scale_factor)
    new_camera.undistort()

    dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]])
    new_im_path = f'{root_muco}_downscaled/{image_relpath[:-4]}_{i_person:01d}.jpg'
    if not (util.is_file_newer(new_im_path, "2020-02-15T23:28:26")):
        im = improc.imread_jpeg(f'{root_muco}/{image_relpath}')
        new_im = cameralib.reproject_image(im, ex.camera, new_camera, dst_shape, antialias_factor=4)
        util.ensure_path_exists(new_im_path)
        imageio.imwrite(new_im_path, new_im, quality=95)

    new_bbox_topleft = cameralib.reproject_image_points(ex.bbox[:2], ex.camera, new_camera)
    new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor])

    if ex.mask is None:
        noext, ext = os.path.splitext(image_relpath[:-4])
        noext = noext.replace('unaugmented_set_001/', '')
        mask = improc.decode_mask(util.load_pickle(f'{root_muco}/masks/{noext}.pkl'))
    else:
        mask = ex.mask

    if mask is False:
        new_mask_encoded = None
    else:
        new_mask = cameralib.reproject_image(mask, ex.camera, new_camera, dst_shape)
        new_mask_encoded = improc.encode_mask(new_mask)

    return p3ds.Pose3DExample(
        os.path.relpath(new_im_path, paths.DATA_ROOT), ex.world_coords.astype(np.float32),
        new_bbox.astype(np.float32), new_camera, mask=new_mask_encoded,
        univ_coords=ex.univ_coords.astype(np.float32))
def get_expanded_crop_box(bbox, full_box, further_expansion_factor):
    max_rotate = np.pi / 6
    padding_factor = 1 / 0.85
    scale_down_factor = 1 / 0.85
    shift_factor = 1.1
    s, c = np.sin(max_rotate), np.cos(max_rotate)
    w, h = bbox[2:]
    box_center = boxlib.center(bbox)
    rot_bbox_side = max(c * w + s * h, c * h + s * w)
    rot_bbox = boxlib.box_around(box_center, rot_bbox_side)
    expansion_factor = (padding_factor * shift_factor * scale_down_factor *
                        further_expansion_factor)
    expanded_bbox = boxlib.intersect(boxlib.expand(rot_bbox, expansion_factor),
                                     full_box)
    return expanded_bbox
示例#5
0
def make_efficient_example(ex, rect_id):
    """Make example by storing the image in a cropped and resized version for efficient loading"""

    # Determine which area we will need
    # For rotation, usual padding around box, scale (shrink) augmentation and shifting
    padding_factor = 1 / 0.85
    scale_up_factor = 1 / 0.85
    scale_down_factor = 1 / 0.85
    shift_factor = 1.1
    max_rotate = np.pi / 6
    rot_factor = np.sin(max_rotate) + np.cos(max_rotate)
    base_dst_side = 256

    scale_factor = min(base_dst_side / ex.bbox[3] * scale_up_factor, 1)
    hopeful_factor = 0.9
    expansion_factor = (
            rot_factor * padding_factor * shift_factor * scale_down_factor * hopeful_factor)

    expanded_bbox = boxlib.expand(boxlib.expand_to_square(ex.bbox), expansion_factor)
    imsize = improc.image_extents(ex.image_path)
    full_box = np.array([0, 0, imsize[0], imsize[1]])
    expanded_bbox = boxlib.intersect(expanded_bbox, full_box)

    old_camera = cameralib.Camera.create2D()
    new_camera = old_camera.copy()
    new_camera.shift_image(-expanded_bbox[:2])
    new_camera.scale_output(scale_factor)

    dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]])
    new_im_path = ex.image_path.replace('mpii', f'mpii_downscaled')
    without_ext, ext = os.path.splitext(new_im_path)
    new_im_path = f'{without_ext}_{rect_id:02d}{ext}'

    if not (util.is_file_newer(new_im_path, "2019-11-12T17:54:06") and
            improc.is_image_readable(new_im_path)):
        im = improc.imread_jpeg(ex.image_path)
        new_im = cameralib.reproject_image(im, old_camera, new_camera, dst_shape)
        util.ensure_path_exists(new_im_path)
        imageio.imwrite(new_im_path, new_im)

    new_bbox_topleft = cameralib.reproject_image_points(ex.bbox[:2], old_camera, new_camera)
    new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor])
    new_coords = cameralib.reproject_image_points(ex.coords, old_camera, new_camera)
    ex = Pose2DExample(os.path.relpath(new_im_path, paths.DATA_ROOT), new_coords, bbox=new_bbox)
    return ex
示例#6
0
def make_muco():
    joint_info, selected_joints = make_joint_info()

    root_3dhp = f'{paths.DATA_ROOT}/3dhp'
    root_muco = f'{paths.DATA_ROOT}/muco'
    sample_info = np.load(f'{root_muco}/composite_frame_origins.npy')
    n_all_joints = 28
    valid_indices = list(np.load(f'{root_muco}/valid_composite_frame_indices.npy'))
    all_detections = util.load_pickle(f'{root_muco}/yolov3_detections.pkl')
    all_detections = np.array([all_detections[k] for k in sorted(all_detections.keys())])
    all_visible_boxes = np.load(f'{root_muco}/visible_boxes.npy')
    matloader = functools.lru_cache(1024)(matlabfile.load)

    @functools.lru_cache(1024)
    def get_world_coords(i_subject, i_seq, i_cam, anno_name):
        seqpath = f'{root_3dhp}/S{i_subject}/Seq{i_seq}'
        anno_file = matloader(f'{seqpath}/annot.mat')
        camcoords = anno_file[anno_name][i_cam].reshape(
            [-1, n_all_joints, 3])[:, selected_joints]
        camera = load_cameras(f'{seqpath}/camera.calibration')[i_cam]
        world_coords = [camera.camera_to_world(c) for c in camcoords]
        return world_coords

    examples = []

    with util.BoundedPool(None, 120) as pool:
        for i_sample, people, detections, visible_boxes in zip(
                util.progressbar(valid_indices), sample_info[valid_indices],
                all_detections[valid_indices], all_visible_boxes[valid_indices]):

            detections = [box for box in detections if box[-1] > 0.1]
            if not detections:
                continue

            filename = f'{i_sample + 1:06d}.jpg'
            image_relpath = f'unaugmented_set_001/{filename[:2]}/{filename[:4]}/{filename}'

            gt_people = []
            for i_person, ((i_subject, i_seq, i_cam, i_frame), visible_box) in enumerate(
                    zip(people, visible_boxes)):
                seqpath = f'{root_3dhp}/S{i_subject}/Seq{i_seq}'
                world_coords = get_world_coords(i_subject, i_seq, i_cam, 'annot3')[i_frame]
                univ_world_coords = get_world_coords(
                    i_subject, i_seq, i_cam, 'univ_annot3')[i_frame]
                camera = load_cameras(f'{seqpath}/camera.calibration')[i_cam]

                im_coords = camera.world_to_image(world_coords)
                coord_bbox = boxlib.expand(boxlib.intersect(
                    boxlib.bb_of_points(im_coords),
                    boxlib.full_box([2048, 2048])), 1.05)
                bbox = boxlib.intersect_vertical(visible_box, coord_bbox)

                ex = p3ds.Pose3DExample(
                    image_relpath, world_coords, bbox, camera, mask=None,
                    univ_coords=univ_world_coords)
                gt_people.append(ex)

            if not gt_people:
                continue

            iou_matrix = np.array([[boxlib.iou(gt_person.bbox, box[:4])
                                    for box in detections]
                                   for gt_person in gt_people])
            gt_indices, det_indices = scipy.optimize.linear_sum_assignment(-iou_matrix)

            for i_gt, i_det in zip(gt_indices, det_indices):
                gt_box = gt_people[i_gt].bbox
                det_box = detections[i_det]
                if (iou_matrix[i_gt, i_det] > 0.1 and
                        boxlib.area(det_box) < 2 * boxlib.area(gt_box)):
                    ex = gt_people[i_gt]
                    ex.bbox = np.array(detections[i_det][:4])
                    pool.apply_async(make_efficient_example, (ex, root_muco, i_gt),
                                     callback=examples.append)

    examples.sort(key=lambda ex: ex.image_path)
    return p3ds.Pose3DDataset(joint_info, examples)
示例#7
0
def make_efficient_example(ex):
    image_relpath = ex.image_path
    max_rotate = np.pi / 6
    padding_factor = 1 / 0.85
    scale_up_factor = 1 / 0.85
    scale_down_factor = 1 / 0.85
    shift_factor = 1.2
    base_dst_side = 256

    box_center = boxlib.center(ex.bbox)
    s, c = np.sin(max_rotate), np.cos(max_rotate)
    w, h = ex.bbox[2:]
    rot_bbox_side = max(c * w + s * h, c * h + s * w)
    rot_bbox = boxlib.box_around(box_center, rot_bbox_side)

    scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor,
                       1)
    expansion_factor = padding_factor * shift_factor * scale_down_factor
    expanded_bbox = boxlib.expand(rot_bbox, expansion_factor)
    expanded_bbox = boxlib.intersect(expanded_bbox,
                                     np.array([0, 0, 2048, 2048]))

    new_camera = ex.camera.copy()
    new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2]
    new_camera.scale_output(scale_factor)
    new_camera.undistort()
    dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]])

    new_im_relpath = ex.image_path.replace('3dhp', f'3dhp_downscaled')
    new_im_path = os.path.join(paths.DATA_ROOT, new_im_relpath)
    if not (util.is_file_newer(new_im_path, "2019-11-14T23:32:07")
            and improc.is_image_readable(new_im_path)):
        im = improc.imread_jpeg(f'{paths.DATA_ROOT}/{image_relpath}')
        new_im = cameralib.reproject_image(im, ex.camera, new_camera,
                                           dst_shape)
        util.ensure_path_exists(new_im_path)
        imageio.imwrite(new_im_path, new_im)

    new_bbox_topleft = cameralib.reproject_image_points(
        ex.bbox[:2], ex.camera, new_camera)
    new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor])

    mask_rle_relpath = new_im_path.replace('Images', 'FGmaskImages').replace(
        '.jpg', '.pkl')
    mask_rle_path = os.path.join(paths.DATA_ROOT, mask_rle_relpath)
    if util.is_file_newer(mask_rle_path, "2020-03-11T20:46:46"):
        mask_runlength = util.load_pickle(mask_rle_path)
    else:
        mask_relpath = ex.image_path.replace('Images', 'FGmaskImages').replace(
            '.jpg', '.png')
        mask = imageio.imread(os.path.join(paths.DATA_ROOT, mask_relpath))
        mask_reproj = cameralib.reproject_image(mask, ex.camera, new_camera,
                                                dst_shape)
        mask_runlength = get_mask_with_highest_iou(mask_reproj, new_bbox)
        util.dump_pickle(mask_runlength, mask_rle_path)

    return p3ds.Pose3DExample(new_im_relpath,
                              ex.world_coords,
                              new_bbox,
                              new_camera,
                              mask=mask_runlength,
                              univ_coords=ex.univ_coords)
示例#8
0
def get_box(i_subject, i_seq, i_cam, i_frame):
    imcoords = get_coords(i_subject, i_seq, i_cam, 'annot3')[2][i_frame]
    box = boxlib.expand(boxlib.bb_of_points(imcoords), 1.05)
    return boxlib.intersect(boxlib.full_box((2048, 2048)),
                            box).astype(np.float32)