コード例 #1
0
    def __call__(self, dense_keypoints0):
        dense_keypoints1 = self.affine(dense_keypoints0)

        mask = is_in_image_range(dense_keypoints1, self.image_shape)

        dense_keypoints1[mask] = self._tracker.optimize(dense_keypoints1[mask])
        return dense_keypoints1
コード例 #2
0
def calc_pose_update(camera_model1, residuals, GX1, GY1, P1, weights):
    assert(GX1.shape == GY1.shape)
    us1 = camera_model1.unnormalize(pi(P1))
    mask = is_in_image_range(us1, GX1.shape) & (P1[:, 2] > 0)

    if not np.any(mask):
        # warped coordinates are out of image range
        return None

    r = residuals[mask]
    p1 = P1[mask]
    gx1 = interpolation(GX1, us1[mask])
    gy1 = interpolation(GY1, us1[mask])

    J = calc_jacobian(camera_model1.camera_parameters.focal_length,
                      gx1, gy1, p1)

    if weights is None:
        return solve_linear_equation(J, r)
    if isinstance(weights, str):
        weights = compute_weights(weights, r)
        return solve_linear_equation(J, r, weights)

    weights = weights.flatten()[mask]
    return solve_linear_equation(J, r, weights)
コード例 #3
0
def keypoints_from_new_area(image1, flow01):
    """Extract keypoints from newly observed image area"""
    keypoints1 = extract_dense_keypoints(image1)
    # out of image range after transforming from frame1 to frame0
    # we assume image1.shape == image0.shape
    mask = ~is_in_image_range(flow01.inverse(keypoints1), image1.shape)
    return keypoints1[mask]
コード例 #4
0
ファイル: test_utils.py プロジェクト: IshitaTakeshi/Tadataka
def test_is_in_image_range():
    width, height = 20, 30
    image_shape = (height, width)

    keypoints = np.array([
        #     x   y
        [19, 29],
        [19, 0],
        [0, 29],
        [-1, 29],
        [19, -1],
        [20, 29],
        [19, 30],
        [20, 30]
    ])

    expected = np.array([True, True, True, False, False, False, False, False])

    assert_array_equal(is_in_image_range(keypoints, image_shape), expected)

    # case if keypoints are in float
    keypoints = np.array([
        #    x      y
        [19.00, 29.00],
        [19.01, 29.00],
        [19.00, 29.01],
        [19.01, 29.01],
        [00.00, 00.00],
        [00.00, -0.01],
        [-0.01, 00.00],
        [-0.01, -0.01],
    ])

    expected = np.array([True, False, False, False, True, False, False, False])

    assert_array_equal(is_in_image_range(keypoints, image_shape), expected)

    # case if 1d array is passed
    assert (is_in_image_range([0, 29], image_shape))
    assert (not is_in_image_range([-1, 29], image_shape))
コード例 #5
0
ファイル: metric.py プロジェクト: IshitaTakeshi/Tadataka
def photometric_error(warp, gray_image0, depth_map0, gray_image1):
    # TODO change the argument order
    #    gray_image0, depth_map0, gray_image1
    # -> gray_image0, gray_image1, depth_map0
    # assert(isinstance(warp, LocalWarp2D))

    us0 = image_coordinates(depth_map0.shape)
    us1, depths1 = warp(us0, depth_map0.flatten())

    mask = is_in_image_range(us1, depth_map0.shape)

    i0 = get(gray_image0, us0[mask])
    i1 = interpolation(gray_image1, us1[mask])

    return calc_error_(i0, i1)
コード例 #6
0
    def __call__(self, keypoints0):
        # track keypoints
        keypoints0_ = get_array(keypoints0)
        keypoints1_ = track_(keypoints0_,
                             self.image1, self.flow01, self.lambda_)
        mask1 = is_in_image_range(keypoints1_, self.image1.shape)
        ids0 = get_ids(keypoints0)
        keypoints1 = create_keypoint_frame_(ids0[mask1], keypoints1_[mask1])

        # keypoints extracted from the newly observed image area
        id_start = ids0[-1] + 1  # assign new indices
        new_keypoints1 = keypoints_from_new_area(self.image1, self.flow01)
        new_rows = create_keypoint_frame(id_start, new_keypoints1)

        return pd.concat([keypoints1, new_rows])
コード例 #7
0
ファイル: __init__.py プロジェクト: IshitaTakeshi/Tadataka
def interpolation(image, C):
    """
    Args:
        image (np.ndarary): gray scale image
        coordinates (np.ndarray): coordinates of shape (n_coordinates, 2)
    """

    if not np.ndim(image) == 2:
        raise ValueError("Image have to be a two dimensional array")

    mask = is_in_image_range(C, image.shape)
    if not mask.all():
        raise ValueError(
            "Coordinates {} out of image range".format(C[~mask])
        )

    return interpolation_(image, C)
コード例 #8
0
    def optimize(self, initial_coordinates):
        """
        Return corrected point coordinates
        """

        assert (np.ndim(initial_coordinates) == 2)
        assert (initial_coordinates.shape[1] == 2)

        coordinates = initial_coordinates
        coordinates = np.round(initial_coordinates)
        after_decimal = initial_coordinates - coordinates

        coordinates = coordinates.astype(np.int64)

        mask = is_in_image_range(coordinates, self.image_shape)

        P = coordinates[mask]
        P = self.maximizer(P)
        coordinates[mask] = P

        return coordinates + after_decimal
コード例 #9
0
def plot_warp(warp2d, gray_image0, depth_map0, gray_image1):
    from tadataka.interpolation import interpolation
    from tadataka.coordinates import image_coordinates
    from tadataka.utils import is_in_image_range
    from matplotlib import pyplot as plt

    us0 = image_coordinates(depth_map0.shape)
    depths0 = depth_map0.flatten()
    us1, depths1 = warp2d(us0, depths0)
    mask = is_in_image_range(us1, depth_map0.shape)

    fig = plt.figure()
    E = photometric_error(warp2d, gray_image0, depth_map0, gray_image1)
    fig.suptitle("photometric error = {:.3f}".format(E))

    ax = fig.add_subplot(221)
    ax.set_title("t0 intensities")
    ax.imshow(gray_image0, cmap="gray")

    ax = fig.add_subplot(223)
    ax.set_title("t0 depth")
    ax.imshow(depth_map0, cmap="gray")

    ax = fig.add_subplot(222)
    ax.set_title("t1 intensities")
    ax.imshow(gray_image1, cmap="gray")

    ax = fig.add_subplot(224)
    ax.set_title("predicted t1 intensities")
    height, width = gray_image1.shape
    ax.scatter(us1[mask, 0], us1[mask, 1],
               c=gray_image0[us0[mask, 1], us0[mask, 0]],
               s=0.5,
               cmap="gray")
    ax.set_xlim(0, width)
    ax.set_ylim(height, 0)
    ax.set_aspect('equal')

    plt.show()
コード例 #10
0
def dense_track_triangulation(frame0, frame1):
    features0 = extract_dense_features(image0)
    features1 = extract_dense_features(image1)
    matches01 = match(features0, features1)

    affine = estimate_affine_transform(features0.keypoints[matches01[:, 0]],
                                       features1.keypoints[matches01[:, 1]])

    dense_keypoints0 = extract_curvature_extrema(image0)
    dense_keypoints1 = affine.transform(dense_keypoints0)

    mask = is_in_image_range(dense_keypoints1, image1.shape)

    et = ExtremaTracker(compute_image_curvature(rgb2gray(image1)),
                        lambda_=10.0)
    dense_keypoints1[mask] = et.optimize(dense_keypoints1[mask])

    fig = plt.figure()
    ax = fig.add_subplot(121)
    ax.imshow(image0)
    ax.scatter(dense_keypoints0[mask, 0],
               dense_keypoints0[mask, 1],
               s=0.1,
               c='red')
    ax = fig.add_subplot(122)
    ax.imshow(image1)
    ax.scatter(dense_keypoints1[mask, 0],
               dense_keypoints1[mask, 1],
               s=0.1,
               c='red')
    plt.show()

    points, depth_mask = TwoViewTriangulation(pose0, pose1).triangulate(
        frame0.camera_model.undistort(dense_keypoints0[mask]),
        frame1.camera_model.undistort(dense_keypoints1[mask]))

    plot_map([pose0, pose1], points)
コード例 #11
0
 def normalize(self, us):
     assert (is_in_image_range(us, self.image_shape).all())
     return _normalize(self._xs_map_0, self._xs_map_1, us)