コード例 #1
0
def random_sized_crop(image, keypoints, bbox):
    image, param = transforms.random_sized_crop(
        image,
        scale_ratio_range=(0.5, 5),
        aspect_ratio_range=(0.75, 1.3333333333333333),
        return_param=True
    )

    keypoints = [
        transforms.translate_point(points,
                                   x_offset=-param['x_slice'].start,
                                   y_offset=-param['y_slice'].start
                                   )
        for points in keypoints
    ]

    _, cropped_H, cropped_W = image.shape

    bbox = translate_bbox(
        bbox,
        size=(cropped_H, cropped_W),
        x_offset=-param['x_slice'].start,
        y_offset=-param['y_slice'].start,
    )

    return image, keypoints, bbox, {random_sized_crop.__name__: param}
コード例 #2
0
def resize_contain(image, joint_zyx, camera, size, fill=0, return_param=False):
    _, inH, inW = image.shape
    resized, resize_param = transforms.resize_contain(
        image,
        size=size,
        return_param=True,
        fill=fill,
    )
    y_scale, x_scale = resize_param["scaled_size"] / np.array([inH, inW])

    print(resize_param)
    vu = camera.zyx2vu(joint_zyx.copy())
    vu = np.expand_dims(vu, axis=0)
    vu = transforms.resize_point(vu,
                                 in_size=(inH, inW),
                                 out_size=resize_param["scaled_size"])
    vu = transforms.translate_point(vu,
                                    y_offset=resize_param["y_offset"],
                                    x_offset=resize_param["x_offset"])

    camera_scaled = camera.scale_camera(y_scale=y_scale, x_scale=x_scale)
    camera_resized = camera_scaled.translate_camera(
        y_offset=resize_param["y_offset"], x_offset=resize_param["x_offset"])
    vu = camera_resized.zyx2vu(joint_zyx)
    return resized, vu, camera_resized
コード例 #3
0
    def test_translate_point_ndarray(self):
        point = np.random.uniform(low=0., high=32., size=(3, 10, 2))

        out = translate_point(point, y_offset=3, x_offset=5)
        expected = np.empty_like(point)
        expected[:, :, 0] = point[:, :, 0] + 3
        expected[:, :, 1] = point[:, :, 1] + 5
        np.testing.assert_equal(out, expected)
コード例 #4
0
    def test_translate_point_list(self):
        point = [
            np.random.uniform(low=0., high=32., size=(12, 2)),
            np.random.uniform(low=0., high=32., size=(10, 2))
        ]

        out = translate_point(point, y_offset=3, x_offset=5)
        for i, pnt in enumerate(point):
            expected = np.empty_like(pnt)
            expected[:, 0] = pnt[:, 0] + 3
            expected[:, 1] = pnt[:, 1] + 5
            np.testing.assert_equal(out[i], expected)
コード例 #5
0
ファイル: dataset.py プロジェクト: liuxingyuxx/ppn
    def transform(self, image, keypoints, bbox, is_labeled):
        _, H, W = image.shape
        # PCA Lighting
        image = transforms.pca_lighting(image, sigma=5)

        # Random rotate
        degree = np.random.uniform(-40, 40)
        image, keypoints, bbox = rotate(image, keypoints, bbox, degree)
        # Random flip
        image, param = transforms.random_flip(image,
                                              x_random=True,
                                              return_param=True)
        if param['x_flip']:
            keypoints = [
                transforms.flip_point(points, (H, W),
                                      x_flip=True)[self.flip_indices]
                for points in keypoints
            ]

            is_labeled = [label[self.flip_indices] for label in is_labeled]

            new_bbox = []
            for x, y, w, h in bbox:
                [[y, x]] = transforms.flip_point(np.array([[y, x + w]]),
                                                 (H, W),
                                                 x_flip=True)
                new_bbox.append([x, y, w, h])
            bbox = new_bbox

        # Random resize
        scalew, scaleh = np.random.uniform(1.0, 2.0, 2)
        resizeW, resizeH = int(W * scalew), int(H * scalew)
        image, keypoints, bbox = self.resize(image, keypoints, bbox,
                                             (resizeH, resizeW))

        # Random crop
        image, param = transforms.random_sized_crop(image,
                                                    scale_ratio_range=(0.5, 5),
                                                    return_param=True)
        keypoints = [
            transforms.translate_point(points,
                                       x_offset=-param['x_slice'].start,
                                       y_offset=-param['y_slice'].start)
            for points in keypoints
        ]
        new_bbox = []
        for x, y, w, h in bbox:
            new_bbox.append(
                [x - param['x_slice'].start, y - param['y_slice'].start, w, h])
        bbox = new_bbox

        return image, keypoints, bbox, is_labeled
コード例 #6
0
def crop_around_3d_center(subject_id, action, seq_idx, frame_id):
    global image
    fig = plt.figure(figsize=(8, 8))
    ax1 = fig.add_subplot(221)
    ax2 = fig.add_subplot(222)
    ax3 = fig.add_subplot(223, projection="3d")
    label_3d(ax3)
    ax3.view_init(-90, -90)
    example = get_example(subject_id, action, seq_idx, frame_id)
    joints_zyx = example["world_joints"][:, ::-1]
    vu, z_ = zyx2depth_vu(joints_zyx, return_z=True)
    vu_com, z_com = calc_com(vu, z_)
    zyx_com = depth_vu2zyx(vu_com[np.newaxis], z_com[np.newaxis]).squeeze()
    z_com, y_com, x_com = zyx_com
    [
        xmin,
        ymin,
        xmax,
        ymax,
    ] = [
        x_com-crop3dW/2,
        y_com-crop3dH/2,
        x_com+crop3dW/2,
        y_com+crop3dH/2,
    ]
    [
        [vmin, umin],
        [vmax, umax],
    ] = zyx2depth_vu(np.array([
        [z_com, ymin, xmin],
        [z_com, ymax, xmax],
    ])).astype(int)
    domain = [vmin, umin, vmax, umax]
    depth = example["depth"]
    cropped, crop_param = crop_domain(depth, domain)
    vu = np.expand_dims(vu, axis=0)
    vu = transforms.translate_point(
        vu,
        y_offset=crop_param["y_offset"],
        x_offset=crop_param["x_offset"]
    )
    _, inH, inW = cropped.shape

    if inH < crop2dH or inW < crop2dW:
        cropped = chainercv.transforms.scale(
            cropped, size=max(crop2dH, crop2dW), fit_short=True)
        vu = transforms.resize_point(
            vu,
            in_size=(inH, inW),
            out_size=cropped.shape[1:],
        )
        _, inH, inW = cropped.shape

    resized, resize_param = transforms.resize_contain(
        cropped,
        size=(crop2dH, crop2dW),
        return_param=True,
        fill=define_background(cropped),
    )
    vu = transforms.resize_point(
        vu,
        in_size=(inH, inW),
        out_size=resize_param["scaled_size"]
    )
    vu = transforms.translate_point(
        vu,
        y_offset=resize_param["y_offset"],
        x_offset=resize_param["x_offset"]
    )
    # visualize
    color = [COLOR_MAP[k] for k in KEYPOINT_NAMES]
    vis_image(resized, ax=ax1)
    print(z_com, z_com-crop3dD/2, z_com+crop3dD/2)
    normalized = normalize_depth(resized, z_com, z_size=crop3dD)
    vis_image(normalized, ax=ax2)
    vis_point(point=vu, ax=ax1, color=color)
    vis_point(point=vu, ax=ax2, color=color)
    cropped_zyx = joints_zyx-zyx_com
    vis_point(point=[cropped_zyx], ax=ax3, color=color)
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    vis_edges(point=vu, indices=EDGES, color=edge_color, ax=ax1)
    vis_edges(point=vu, indices=EDGES, color=edge_color, ax=ax2)
    vis_edges(point=[cropped_zyx], indices=EDGES, color=edge_color, ax=ax3)
コード例 #7
0
def crop_around_3d_center(subject_id, action, seq_idx, frame_id):
    global image
    fig = plt.figure(figsize=(10, 5))
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122, projection="3d")
    label_3d(ax2)
    ax2.view_init(-90, -90)
    example = get_example(subject_id, action, seq_idx, frame_id)
    cam_joints_zyx = example["cam_joints"][:, ::-1]
    vu, z_ = zyx2vu(cam_joints_zyx, return_z=True)
    vu_com, z_com = calc_com(vu, z_)
    zyx_com = vu2zyx(vu_com[np.newaxis], z_com[np.newaxis]).squeeze()
    z_com, y_com, x_com = zyx_com
    [
        xmin,
        ymin,
        xmax,
        ymax,
    ] = [
        x_com-crop3dW/2,
        y_com-crop3dH/2,
        x_com+crop3dW/2,
        y_com+crop3dH/2,
    ]
    [
        [vmin, umin],
        [vmax, umax],
    ] = zyx2vu(np.array([
        [z_com, ymin, xmin],
        [z_com, ymax, xmax],
    ])).astype(int)
    domain = [vmin, umin, vmax, umax]
    img = example["image"]

    cropped, crop_param = crop_domain(img, domain)
    offset_vu = np.array([crop_param["y_offset"], crop_param["x_offset"]])
    vu = np.expand_dims(vu, axis=0)
    vu = transforms.translate_point(
        vu,
        y_offset=crop_param["y_offset"],
        x_offset=crop_param["x_offset"]
    )
    _, inH, inW = cropped.shape
    resized, resize_param = transforms.resize_contain(
        cropped,
        size=(crop2dH, crop2dW),
        return_param=True
    )
    vu = transforms.resize_point(vu, in_size=(
        inH, inW), out_size=resize_param["scaled_size"])
    vu = transforms.translate_point(
        vu,
        y_offset=resize_param["y_offset"],
        x_offset=resize_param["x_offset"]
    )
    # visualize
    color = [COLOR_MAP[k] for k in KEYPOINT_NAMES]
    chainercv.visualizations.vis_image(resized, ax=ax1)
    vis_point(point=vu, ax=ax1, color=color)
    cropped_zyx = cam_joints_zyx-zyx_com
    vis_point(point=[cropped_zyx], ax=ax2, color=color)
    edge_color = [COLOR_MAP[s, t] for s, t in EDGES]
    vis_edges(point=vu, indices=EDGES, color=edge_color, ax=ax1)
    vis_edges(point=[cropped_zyx], indices=EDGES, color=edge_color, ax=ax2)
コード例 #8
0
def crop_all_humans(image, keypoints, bbox, is_labeled):
    _, H, W = image.shape
    aspect = W / H
    param = {}
    if len(keypoints) == 0:
        param['do_nothing'] = True
        return image, keypoints, bbox, param

    kymax = max([np.max(ks[l, 0]) for l, ks in zip(is_labeled, keypoints)])
    kxmax = max([np.max(ks[l, 1]) for l, ks in zip(is_labeled, keypoints)])
    kymin = min([np.min(ks[l, 0]) for l, ks in zip(is_labeled, keypoints)])
    kxmin = min([np.min(ks[l, 1]) for l, ks in zip(is_labeled, keypoints)])

    bxmax = max([b[0] + b[2] for b in bbox])
    bymax = max([b[1] + b[3] for b in bbox])
    bxmin = min([b[0] for b in bbox])
    bymin = min([b[1] for b in bbox])

    ymax = max(kymax, bymax)
    xmax = max(kxmax, bxmax)
    ymin = min(kymin, bymin)
    xmin = min(kxmin, bxmin)

    if (xmax + xmin) / 2 < W / 2:
        x_start = random.randint(0, max(0, int(xmin)))
        y_start = random.randint(0, max(0, int(ymin)))
        y_end = random.randint(min(H, int(ymax)), H)
        ylen = y_end - y_start
        xlen = aspect * ylen
        x_end = min(W, int(x_start + xlen))
        x_slice = slice(x_start, x_end, None)
        y_slice = slice(y_start, y_end, None)
    else:
        x_end = random.randint(min(int(xmax), W), W)
        y_end = random.randint(min(int(ymax), H), H)
        y_start = random.randint(0, max(0, int(ymin)))
        ylen = y_end - y_start
        xlen = aspect * ylen
        x_start = max(0, int(x_end - xlen))
        x_slice = slice(x_start, x_end, None)
        y_slice = slice(y_start, y_end, None)

    cropped = crop(image, y_slice=y_slice, x_slice=x_slice, copy=True)
    _, cropped_H, cropped_W = cropped.shape
    param['x_slice'] = x_slice
    param['y_slice'] = y_slice
    if cropped_H <= 50 or cropped_W <= 50:
        """
        This case, for example, cropped_H=0 will cause an error when try to resize image
        or resize small image to insize will cause low resolution human image.
        To avoid situations, we will stop crop image.
        """
        param['do_nothing'] = True
        return image, keypoints, bbox, param
    image = cropped

    keypoints = [
        transforms.translate_point(
            points, x_offset=-x_slice.start, y_offset=-y_slice.start)
        for points in keypoints
    ]

    bbox = translate_bbox(
        bbox,
        size=(cropped_H, cropped_W),
        x_offset=-x_slice.start,
        y_offset=-y_slice.start,
    )

    return image, keypoints, bbox, param