Beispiel #1
0
def test_euclidean_init():
    # init with implicit parameters
    rotation = 1
    translation = (1, 1)
    tform = EuclideanTransform(rotation=rotation, translation=translation)
    assert_almost_equal(tform.rotation, rotation)
    assert_almost_equal(tform.translation, translation)

    # init with transformation matrix
    tform2 = EuclideanTransform(tform.params)
    assert_almost_equal(tform2.rotation, rotation)
    assert_almost_equal(tform2.translation, translation)

    # test special case for scale if rotation=0
    rotation = 0
    translation = (1, 1)
    tform = EuclideanTransform(rotation=rotation, translation=translation)
    assert_almost_equal(tform.rotation, rotation)
    assert_almost_equal(tform.translation, translation)

    # test special case for scale if rotation=90deg
    rotation = np.pi / 2
    translation = (1, 1)
    tform = EuclideanTransform(rotation=rotation, translation=translation)
    assert_almost_equal(tform.rotation, rotation)
    assert_almost_equal(tform.translation, translation)
Beispiel #2
0
def test_euclidean_param_defaults():
    # 2D rotation is 0 when only translation is given
    tf = EuclideanTransform(translation=(5, 5))
    assert np.array(tf)[0, 1] == 0
    # off diagonals are 0 when only translation is given
    tf = EuclideanTransform(translation=(4, 5, 9), dimensionality=3)
    assert_equal(np.array(tf)[[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]], 0)
    with pytest.raises(ValueError):
        # specifying parameters for D>3 is not supported
        _ = EuclideanTransform(translation=(5, 6, 7, 8), dimensionality=4)
    with pytest.raises(ValueError):
        # incorrect number of angles for given dimensionality
        _ = EuclideanTransform(rotation=(4, 8), dimensionality=3)
    # translation is 0 when rotation is given
    tf = EuclideanTransform(rotation=np.pi * np.arange(3), dimensionality=3)
    assert_equal(np.array(tf)[:-1, 3], 0)
Beispiel #3
0
def test_euclidean_estimation():
    # exact solution
    tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10)
    assert_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10)
    assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
    assert_almost_equal(tform.params[0, 1], -tform.params[1, 0])

    # over-determined
    tform2 = estimate_transform('euclidean', SRC, DST)
    assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
    assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
    assert_almost_equal(tform2.params[0, 1], -tform2.params[1, 0])

    # via estimate method
    tform3 = EuclideanTransform()
    tform3.estimate(SRC, DST)
    assert_almost_equal(tform3.params, tform2.params)
def circcentlikl(I, radius, scale=2, n0piAngles=8):
    angles = np.arange(0, np.pi, np.pi / n0piAngles)
    A = np.zeros(I.shape)
    for i in range(len(angles)):
        angle = angles[i]
        K = gabor_kernel(1 / scale, angle).imag
        J = convolve(I, K)
        dx = -radius * np.cos(angle)
        dy = -radius * np.sin(angle)
        T = EuclideanTransform(translation=(-dx, -dy))
        L1 = warp(J, T)
        T = EuclideanTransform(translation=(dx, dy))
        L2 = warp(-J, T)

        # imshowlist([I, resize(K,J.shape), J, np.multiply(L1,L1 > 0), np.multiply(L2,L2 > 0)])
        A += np.multiply(L1, L1 > 0) + np.multiply(L2, L2 > 0)
    return A
def test_euclidean_estimation():
    # exact solution
    tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10)
    assert_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10)
    assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
    assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])

    # over-determined
    tform2 = estimate_transform('euclidean', SRC, DST)
    assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
    assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
    assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])

    # via estimate method
    tform3 = EuclideanTransform()
    tform3.estimate(SRC, DST)
    assert_almost_equal(tform3.params, tform2.params)
Beispiel #6
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = EuclideanTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    # See gh-3926 for discussion details
    tform = ProjectiveTransform()
    for i in range(20):
        # Some random coordinates
        src = np.random.rand(4, 2) * 100
        dst = np.random.rand(4, 2) * 100

        # Degenerate the case by arranging points on a single line
        src[:, 1] = np.random.rand()
        # Prior to gh-3926, under the above circumstances,
        # a transform could be returned with nan values.
        assert(not tform.estimate(src, dst) or np.isfinite(tform.params).all())

    src = np.array([[0, 2, 0], [0, 2, 0], [0, 4, 0]])
    dst = np.array([[0, 1, 0], [0, 1, 0], [0, 3, 0]])
    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    # Prior to gh-6207, the above would set the parameters as the identity.
    assert np.all(np.isnan(tform.params))

    # The tesselation on the following points produces one degenerate affine
    # warp within PiecewiseAffineTransform.
    src = np.asarray([
        [0, 192, 256], [0, 256, 256], [5, 0, 192], [5, 64, 0], [5, 64, 64],
        [5, 64, 256], [5, 192, 192], [5, 256, 256], [0, 192, 256],
    ])

    dst = np.asarray([
        [0, 142, 206], [0, 206, 206], [5, -50, 142], [5, 14, 0], [5, 14, 64],
        [5, 14, 206], [5, 142, 142], [5, 206, 206], [0, 142, 206],
    ])
    tform = PiecewiseAffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.affines[4].params))  # degenerate affine
    for idx, affine in enumerate(tform.affines):
        if idx != 4:
            assert not np.all(np.isnan(affine.params))
    for affine in tform.inverse_affines:
        assert not np.all(np.isnan(affine.params))
Beispiel #7
0
def absolute_orientation(p, q, no_scaling=False):
    """
    Returns R, t, s satisfying q = s * R * p + t
    
    p and q must be 3xN matrices.
    """

    if no_scaling:
        st = EuclideanTransform()
    else:
        st = SimilarityTransform()

    st.estimate(p.T, q.T)
    R = st.params[:3, :3]
    t = st.params[:3, 3]
    s = np.linalg.norm(R) / np.sqrt(3)
    R = R / s
    return R, t, s
Beispiel #8
0
def merge_small_img(image0, image1, model_robust, verbose=1):
    """
	stitch image0 and image1 based on model_robust
	"""
    #from skimage.transform import SimilarityTransform

    r, c = image1.shape[:2]

    # Note that transformations take coordinates in (x, y) format,
    # not (row, column), in order to be consistent with most literature
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])

    # Warp the image corners to their new positions
    warped_corners = model_robust(corners)  # also include rotation
    #offset = model_robust(np.zeros(2)) # only do translation move
    #warpped_corners = corners + offset

    # Find the extents of both the reference image and the warped
    # target image
    all_corners = np.vstack((warped_corners, corners))

    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)

    output_shape = (corner_max - corner_min)
    output_shape = np.ceil(output_shape[::-1])

    offset = EuclideanTransform(translation=-corner_min)
    image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=0)
    image1_ = warp(image1, (model_robust + offset).inverse,
                   output_shape=output_shape,
                   cval=0)

    #image_merge = np.where(image0_ == 0, image1_,image0_)
    mask = (cv2.bitwise_and(image0_, image1_) > 0)
    #image_merge[mask>0] = image_merge[mask>0]/2
    ssim_value = ssim(image0_ * mask,
                      image1_ * mask,
                      data_range=np.amax(image1_))

    image_merge = image1_ + image0_
    image_merge[mask] = image_merge[mask] / 2
    if verbose:
        plt.figure(figsize=(5, 5))
        ax1 = plt.subplot(131)
        plt.imshow(image0_, cmap='gray')
        ax2 = plt.subplot(132)
        plt.imshow(image1_, cmap='gray')

        plt.subplot(133)
        plt.imshow(image_merge, cmap='gray')

    print('similarity in the overlapping area:', ssim_value)

    return (image_merge, ssim_value)
Beispiel #9
0
def center(image):
    """Shift image so its center of mass is right in the middle.

    This improves accuracy because the network was trained on centered data.
    """
    (rcm, ccm) = center_of_mass(image)
    (rc, cc) = (image.shape[0] / 2, image.shape[1] / 2)
    dr = (rc - rcm)
    dc = (cc - ccm)
    transf = EuclideanTransform(translation=(-dc, -dr))
    translated = warp(image, transf)
    return translated
def test_invalid_input():
    with testing.raises(ValueError):
        ProjectiveTransform(np.zeros((2, 3)))
    with testing.raises(ValueError):
        AffineTransform(np.zeros((2, 3)))
    with testing.raises(ValueError):
        SimilarityTransform(np.zeros((2, 3)))
    with testing.raises(ValueError):
        EuclideanTransform(np.zeros((2, 3)))
    with testing.raises(ValueError):
        AffineTransform(matrix=np.zeros((2, 3)), scale=1)
    with testing.raises(ValueError):
        SimilarityTransform(matrix=np.zeros((2, 3)), scale=1)
    with testing.raises(ValueError):
        EuclideanTransform(
            matrix=np.zeros((2, 3)), translation=(0, 0))
    with testing.raises(ValueError):
        PolynomialTransform(np.zeros((3, 3)))
    with testing.raises(ValueError):
        FundamentalMatrixTransform(matrix=np.zeros((3, 2)))
    with testing.raises(ValueError):
        EssentialMatrixTransform(matrix=np.zeros((3, 2)))

    with testing.raises(ValueError):
        EssentialMatrixTransform(rotation=np.zeros((3, 2)))
    with testing.raises(ValueError):
        EssentialMatrixTransform(
            rotation=np.zeros((3, 3)))
    with testing.raises(ValueError):
        EssentialMatrixTransform(
            rotation=np.eye(3))
    with testing.raises(ValueError):
        EssentialMatrixTransform(rotation=np.eye(3),
                                 translation=np.zeros((2,)))
    with testing.raises(ValueError):
        EssentialMatrixTransform(rotation=np.eye(3),
                                 translation=np.zeros((2,)))
    with testing.raises(ValueError):
        EssentialMatrixTransform(
            rotation=np.eye(3), translation=np.zeros((3,)))
Beispiel #11
0
def test_3d_euclidean_estimation():
    src_points = np.random.rand(1000, 3)

    # Random transformation for testing
    angles = np.random.random((3, )) * 2 * np.pi - np.pi
    rotation_matrix = _euler_rotation_matrix(angles)
    translation_vector = np.random.random((3, ))
    dst_points = []
    for pt in src_points:
        pt_r = pt.reshape(3, 1)
        dst = np.matmul(rotation_matrix, pt_r) + \
            translation_vector.reshape(3, 1)
        dst = dst.reshape(3)
        dst_points.append(dst)

    dst_points = np.array(dst_points)
    # estimating the transformation
    tform = EuclideanTransform(dimensionality=3)
    assert tform.estimate(src_points, dst_points)
    estimated_rotation = tform.rotation
    estimated_translation = tform.translation
    assert_almost_equal(estimated_rotation, rotation_matrix)
    assert_almost_equal(estimated_translation, translation_vector)
 def transform_euclidean(left_kps, right_kps, left_ds, right_ds):
     """Determine projective transform which fits best to map right kps to left kps."""
     bf = cv2.BFMatcher()
     log.info('Start matching Features.')
     raw_matches = bf.knnMatch(left_ds, right_ds, k=2)
     log.debug('Matches found: #raw_matches = {}'.format(len(raw_matches)))
     left_pts, right_pts, good_matches = helpers.get_points_n_matches(
         left_kps, right_kps, raw_matches)
     log.debug('# Filtered Features = {}'.format(len(good_matches)))
     if len(left_pts) > 3:
         log.info('Start finding homography.')
         left_pts = left_pts.reshape((len(left_pts), 2))
         right_pts = right_pts.reshape((len(right_pts), 2))
         model = EuclideanTransform()
         model.estimate(right_pts, left_pts)
         model_robust, inliers = ransac((right_pts, left_pts),
                                        EuclideanTransform,
                                        min_samples=50,
                                        residual_threshold=10,
                                        max_trials=3000)
         h**o = model_robust.params
         mask_good = None
         return h**o, mask_good, good_matches
     return None
        def test_image_align(self, image_temp_path, image_array, align_image):
            from scipy.ndimage import affine_transform
            from skimage.transform import EuclideanTransform

            transform = EuclideanTransform(rotation=90)
            imagefile = ImageFile(image_temp_path,
                                  cache_image=True,
                                  align_image=align_image)

            # The original image should be returned if no transform is available
            assert (imagefile.image == image_array).all()
            assert (imagefile.image_gray == array([[1.]])).all()

            # The original image should be returned unless align_image is True
            imagefile.alignment_transform = transform
            if align_image:
                #assert (imagefile.image == transform_img(image_array, 1, transform.rotation, transform.translation)).all()
                assert (imagefile.image == affine_transform(image_array,
                                                            transform.params,
                                                            order=3)).all()
            else:
                assert (imagefile.image == image_array).all()
Beispiel #14
0
def data_aug(inpu, mask):
    '''inpu: tensor [bch, h, w, c],
       output: tensor [bch*some h, w, c]
    '''
    bch, h, w, c = inpu.shape
    # shuffle the data
    np.random.seed(100)
    np.random.shuffle(inpu)
    np.random.seed(100)
    np.random.shuffle(mask)

    # crop the data
    crop_size = [((5, 5), (8, 8)), ((10, 10), (15, 15)), ((15, 15), (20, 20)),
                 ((20, 20), (30, 30)), ((30, 30), (40, 40))]
    crop_inpu = np.zeros([bch // 4, h, w, c])
    crop_mask = np.zeros([bch // 4, h, w, c])
    for i in range(bch // 4):
        idx = np.random.choice(5)
        crop_inpu[i, :, :, 0] = resize(
            crop(inpu[i, :, :, 0], crop_size[idx], copy=True), [h, w])
        crop_mask[i, :, :, 0] = resize(
            crop(mask[i, :, :, 0], crop_size[idx], copy=True), [h, w])

    # rotate the data
    rota_inpu = np.zeros([bch // 4, h, w, c])
    rota_mask = np.zeros([bch // 4, h, w, c])
    for i in range(bch // 4):
        rota_inpu[i, :, :, 0] = rotate(inpu[i, :, :, 0], i % 360)
        rota_mask[i, :, :, 0] = rotate(mask[i, :, :, 0], i % 360)

    # translate data
    trans_size = [(1, 1), (2, 3), (3, 4), (4, 6)]
    trans_inpu = np.zeros([bch // 4, h, w, c])
    trans_mask = np.zeros([bch // 4, h, w, c])
    for i in range(bch // 4):
        idx = np.random.choice(4)
        trans_inpu[i, :, :,
                   0] = warp(inpu[i, :, :, 0],
                             EuclideanTransform(translation=trans_size[idx]))
        trans_mask[i, :, :,
                   0] = warp(mask[i, :, :, 0],
                             EuclideanTransform(translation=trans_size[idx]))

    # flip data
    flip_inpu = np.zeros([bch // 4, h, w, c])
    flip_mask = np.zeros([bch // 4, h, w, c])
    for i in range(bch // 4):
        idx = np.random.choice(2)
        flip_inpu[i, :, :, 0] = np.flip(inpu[i, :, :, 0], idx)
        flip_mask[i, :, :, 0] = np.flip(mask[i, :, :, 0], idx)

    #print(inpu.shape, crop_inpu.shape, rota_inpu.shape, trans_inpu.shape)
    inpu = np.concatenate([inpu, crop_inpu, rota_inpu, trans_inpu, flip_inpu],
                          axis=0)
    mask = np.concatenate([mask, crop_mask, rota_mask, trans_mask, flip_mask],
                          axis=0)

    # shuffle again
    np.random.seed(100)
    np.random.shuffle(inpu)
    np.random.seed(100)
    np.random.shuffle(mask)

    return inpu, mask
Beispiel #15
0
best_x = None
best_y = None
for sig in sigmas:
    for x in xs:
        for y in ys:
            for rot in [8.2]:
                orig = gaussian(io.imread('BrainProtonDensitySlice.png'),
                                sigma=sig)
                moved = gaussian(
                    io.imread('BrainProtonDensitySliceR10X13Y17.png'),
                    sigma=sig)
                sft, idc, idk = register_translation(pad(orig, 20, 'edge'),
                                                     moved)
                unmoved = warp(
                    moved,
                    EuclideanTransform(translation=[-sft[1], -sft[0]]),
                    output_shape=(
                        257,
                        221,
                    ),
                    mode='constant',
                    cval=0)
                cropped = unmoved[20:-20, 20:-20]
                io.imsave('unmoved_cropped.png', cropped)
                #orig_tr = logpolar_fancy(np.fft.fftn(orig),x,y)#91,108
                #cropped_tr = logpolar_fancy(np.fft.fftn(cropped),x,y)#91,108
                #shifted_lpt,error_lpt,phased_lpt = register_translation(orig_tr,cropped_tr,space='fourier')
                #rot_rads = math.atan2(shifted_lpt[0],shifted_lpt[1])
                #rot_degs = math.degrees(rot_rads)
                rotated = rotate(cropped, rot)
                io.imsave('rotated.png', rotated)
Beispiel #16
0
def test_euler_angle_consistency():
    angles = np.random.random((3, )) * 2 * np.pi - np.pi
    euclid = EuclideanTransform(rotation=angles, dimensionality=3)
    similar = SimilarityTransform(rotation=angles, dimensionality=3)
    assert_array_almost_equal(euclid, similar)
Beispiel #17
0
def merge_full_image(data_left,
                     data_right,
                     left_index,
                     right_index,
                     model_robust,
                     verbose=1,
                     hist_match=1,
                     offset_columns=100):
    """
	merge the whole CT slice
	Parameters:
	--------------------------------------------
	data_left: to be stitched left CT volumn
	data_right: to be stitched right CT volumn
	left_index: index of CT 
	
	"""
    # get the slice of the left image and right image
    image_left = np.squeeze(data_left[:, :, left_index])
    image_right = np.squeeze(data_right[:, 1:, right_index])

    # match histogram
    if hist_match:
        image_left = histogram_matching(image_left, image_right, verbose=0)

    # get the size of the image
    size_right = np.shape(image_right)
    size_left = np.shape(image_left)

    #############################################################################
    ### calculate the output shape of the stitched image
    #############################################################################
    # number of columns for the stitched image
    n_col = np.add(size_right, size_left)[1]

    #     image0 = image_left
    #     image1 = image_right

    r, c = image_right.shape[:2]
    c = n_col
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])

    warped_corners = model_robust(corners)  # also include rotation
    all_corners = np.vstack((warped_corners, corners))

    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)

    output_shape = (corner_max - corner_min)
    if warped_corners[0][0] < 0:
        offset_box = -warped_corners[0][0] + offset_columns  # change from 102
    else:
        offset_box = warped_corners[0][0]
    output_shape[0] = n_col - offset_box
    output_shape = np.ceil(output_shape[::-1])
    #############################################################################
    ### merge the two image by shifting them to the correct positions
    #############################################################################
    offset = EuclideanTransform(translation=(0, 0))  # move the left image

    image0_ = warp(image_left,
                   offset.inverse,
                   output_shape=output_shape,
                   cval=0)
    # pad -1 to the left image to the same shape as output_shape

    offset = EuclideanTransform(translation=(size_left[1] -
                                             (offset_columns + 2), 0))
    # move the right image

    image1_ = warp(image_right, (model_robust + offset).inverse,
                   output_shape=output_shape,
                   cval=0)
    # use the image registion model - model_robust with the offset translation movement

    image_merge = image1_ + image0_
    #image_merge[np.where(image_merge>0)] = (image_merge[np.where(image_merge>0)] -2)/2 # average the overlap part
    #image_merge = np.where(image0_ == 0, image1_,image0_)
    mask = (cv2.bitwise_and(image0_, image1_) > 0)  # find the overlapping area
    image_merge[mask] = image_merge[mask] / 2

    ##############################################################################
    if verbose:
        plt.figure(figsize=(10, 5))
        plt.subplot(121)
        plt.imshow(image0_, cmap='gray')
        plt.title('left image', fontsize=20)
        plt.axis('off')

        plt.subplot(122)
        plt.imshow(image1_, cmap='gray')
        plt.title('right image', fontsize=20)
        plt.axis('off')

        plt.figure(figsize=(10, 5))
        plt.imshow(image_merge[10:-10, :], cmap='gray')
        plt.title('stitched image', fontsize=20)
        plt.axis('off')

    return (image_merge)