Esempio n. 1
0
def normalize_slice_projection(slice_pixel_array, crop_bounds, desired_width,
                               desired_height):
    result = np.zeros((desired_height, desired_width))
    #Crop bounds must be converted from (x, y) points to (y, x) points
    source_bounds = np.asarray([[0, 0], [0, desired_width],
                                [desired_height, desired_width],
                                [desired_height, 0]])
    destination_bounds = np.asarray([[crop_bounds[0][1], crop_bounds[0][0]],
                                     [crop_bounds[1][1], crop_bounds[1][0]],
                                     [crop_bounds[2][1], crop_bounds[2][0]],
                                     [crop_bounds[3][1], crop_bounds[3][0]]])
    projective_transform = ProjectiveTransform()
    if not projective_transform.estimate(source_bounds, destination_bounds):
        print("Cannot project from crop bounds to desired image dimensions")
    else:
        for x in range(0, desired_width):
            for y in range(0, desired_height):
                normalized_point = [y, x]
                transform = projective_transform(normalized_point)
                slice_point = transform[0]
                value = MathUtil.sample_image_bilinear(slice_pixel_array,
                                                       slice_point[1],
                                                       slice_point[0])
                result[y][x] = value

    return result
Esempio n. 2
0
    def apply_projection_transform(self, Xb, batch_size, image_size):
        d = image_size * 0.3 * self.intensity
        for i in np.random.choice(batch_size,
                                  int(batch_size * self.p),
                                  replace=False):
            tl_top = random.uniform(-d, d)
            tl_left = random.uniform(-d, d)
            bl_bottom = random.uniform(-d, d)
            bl_left = random.uniform(-d, d)
            tr_top = random.uniform(-d, d)
            tr_right = random.uniform(-d, d)
            br_bottom = random.uniform(-d, d)
            br_right = random.uniform(-d, d)

            transform = ProjectiveTransform()
            transform.estimate(
                np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                          (image_size - br_right, image_size - br_bottom),
                          (image_size - tr_right, tr_top))),
                np.array(((0, 0), (0, image_size), (image_size, image_size),
                          (image_size, 0))))
            Xb[i] = warp(Xb[i],
                         transform,
                         output_shape=(image_size, image_size),
                         order=1,
                         mode='edge')
        return Xb
Esempio n. 3
0
def test_fast_homography():
    img = rgb2gray(data.lena()).astype(np.uint8)
    img = img[:, :100]

    theta = np.deg2rad(30)
    scale = 0.5
    tx, ty = 50, 50

    H = np.eye(3)
    S = scale * np.sin(theta)
    C = scale * np.cos(theta)

    H[:2, :2] = [[C, -S], [S, C]]
    H[:2, 2] = [tx, ty]

    tform = ProjectiveTransform(H)
    coords = warp_coords(tform.inverse, (img.shape[0], img.shape[1]))

    for order in range(4):
        for mode in ('constant', 'reflect', 'wrap', 'nearest'):
            p0 = map_coordinates(img, coords, mode=mode, order=order)
            p1 = warp(img, tform, mode=mode, order=order)

            # import matplotlib.pyplot as plt
            # f, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4)
            # ax0.imshow(img)
            # ax1.imshow(p0, cmap=plt.cm.gray)
            # ax2.imshow(p1, cmap=plt.cm.gray)
            # ax3.imshow(np.abs(p0 - p1), cmap=plt.cm.gray)
            # plt.show()

            d = np.mean(np.abs(p0 - p1))
            assert d < 0.001
Esempio n. 4
0
def apply_projection_transform_(X, intensity):
    image_size = X.shape[1]
    d = image_size * 0.3 * intensity
    for i in range(X.shape[0]):
        tl_top = random.uniform(-d, d)     # Top left corner, top margin
        tl_left = random.uniform(-d, d)    # Top left corner, left margin
        bl_bottom = random.uniform(-d, d)  # Bottom left corner, bottom margin
        bl_left = random.uniform(-d, d)    # Bottom left corner, left margin
        tr_top = random.uniform(-d, d)     # Top right corner, top margin
        tr_right = random.uniform(-d, d)   # Top right corner, right margin
        br_bottom = random.uniform(-d, d)  # Bottom right corner, bottom margin
        br_right = random.uniform(-d, d)   # Bottom right corner, right margin

        transform = ProjectiveTransform()
        transform.estimate(np.array((
                (tl_left, tl_top),
                (bl_left, image_size - bl_bottom),
                (image_size - br_right, image_size - br_bottom),
                (image_size - tr_right, tr_top)
            )), np.array((
                (0, 0),
                (0, image_size),
                (image_size, image_size),
                (image_size, 0)
            )))
        X[i] = warp(X[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')

    return X
Esempio n. 5
0
    def apply_projection_transform(self, Xb, batch_size, image_size):
        """
        Applies projection transform to a random subset of images. Projection margins are randomised in a range
        depending on the size of the image. Range itself is subject to scaling depending on augmentation intensity.
        """
        d = image_size * 0.3 * self.intensity
        for i in np.random.choice(batch_size,
                                  int(batch_size * self.p),
                                  replace=False):
            tl_top = random.uniform(-d, d)  # Top left corner, top margin
            tl_left = random.uniform(-d, d)  # Top left corner, left margin
            bl_bottom = random.uniform(-d,
                                       d)  # Bottom left corner, bottom margin
            bl_left = random.uniform(-d, d)  # Bottom left corner, left margin
            tr_top = random.uniform(-d, d)  # Top right corner, top margin
            tr_right = random.uniform(-d, d)  # Top right corner, right margin
            br_bottom = random.uniform(-d,
                                       d)  # Bottom right corner, bottom margin
            br_right = random.uniform(-d,
                                      d)  # Bottom right corner, right margin

            transform = ProjectiveTransform()
            transform.estimate(
                np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                          (image_size - br_right, image_size - br_bottom),
                          (image_size - tr_right, tr_top))),
                np.array(((0, 0), (0, image_size), (image_size, image_size),
                          (image_size, 0))))
            Xb[i] = warp(Xb[i],
                         transform,
                         output_shape=(image_size, image_size),
                         order=1,
                         mode='edge')

        return Xb
Esempio n. 6
0
def get_final_center_warps(image_collection, simple_center_warps):
    """Find final transformations.

        image_collection (Tuple[N]) : list of all images
        simple_center_warps (Tuple[N])  : transformations unadjusted for shift

        Returns:
            Tuple[N] : final transformations
        """
    # your code here
    all_corners = list(get_corners(image_collection, simple_center_warps))
    arr = np.array(all_corners)
    corners = get_min_max_coords(arr)
    height = corners[1][1] - corners[0][1]
    width = corners[1][0] - corners[0][0]
    dest = [[0, 0], [height, 0], [0, width], [height, width]]
    src = [[corners[0][1], corners[0][0]],
           [corners[1][1], corners[0][0]],
           [corners[0][1], corners[1][0]],
           [corners[1][1], corners[1][0]]]
    src = np.array(src)
    dest = np.array(dest)
    transform = find_homography(src, dest)
    result = []
    for warm in simple_center_warps:
        result.append(warm + ProjectiveTransform(transform))
    return tuple(result), (int(round(height)), int(round(width)))
def apply_projection_transform(X, intensity):
    image_size = X.shape[1]
    d = image_size * 0.3 * intensity

    tl_top = random.uniform(-d, d)
    tl_left = random.uniform(-d, d)
    bl_bottom = random.uniform(-d, d)
    bl_left = random.uniform(-d, d)
    tr_top = random.uniform(-d, d)
    tr_right = random.uniform(-d, d)
    br_bottom = random.uniform(-d, d)
    br_right = random.uniform(-d, d)

    transform = ProjectiveTransform()
    transform.estimate(
        np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                  (image_size - br_right,
                   image_size - br_bottom), (image_size - tr_right, tr_top))),
        np.array(((0, 0), (0, image_size), (image_size, image_size),
                  (image_size, 0))))

    X = warp(X,
             transform,
             output_shape=(image_size, image_size),
             order=1,
             mode='edge')

    return X
Esempio n. 8
0
def randomPerspective(im):
    '''
    wrapper of Projective (or perspective) transform, from 4 random points selected from 4 corners of the image within a defined region.
    '''
    region = 1 / 4
    A = pl.array([[0, 0], [0, im.shape[0]], [im.shape[1], im.shape[0]],
                  [im.shape[1], 0]])
    B = pl.array([
        [
            int(randRange(0, im.shape[1] * region)),
            int(randRange(0, im.shape[0] * region))
        ],
        [
            int(randRange(0, im.shape[1] * region)),
            int(randRange(im.shape[0] * (1 - region), im.shape[0]))
        ],
        [
            int(randRange(im.shape[1] * (1 - region), im.shape[1])),
            int(randRange(im.shape[0] * (1 - region), im.shape[0]))
        ],
        [
            int(randRange(im.shape[1] * (1 - region), im.shape[1])),
            int(randRange(0, im.shape[0] * region))
        ],
    ])

    pt = ProjectiveTransform()
    pt.estimate(A, B)
    return warp(im, pt, output_shape=im.shape[:2])
Esempio n. 9
0
def test_geometric_tform():
    tform = GeometricTransform()
    with testing.raises(NotImplementedError):
        tform(0)
    with testing.raises(NotImplementedError):
        tform.inverse(0)
    with testing.raises(NotImplementedError):
        tform.__add__(0)

    # See gh-3926 for discussion details
    for i in range(20):
        # Generate random Homography
        H = np.random.rand(3, 3) * 100
        H[2, H[2] == 0] += np.finfo(float).eps
        H /= H[2, 2]

        # Craft some src coords
        src = np.array([
            [(H[2, 1] + 1) / -H[2, 0], 1],
            [1, (H[2, 0] + 1) / -H[2, 1]],
            [1, 1],
        ])
        # Prior to gh-3926, under the above circumstances,
        # destination coordinates could be returned with nan/inf values.
        tform = ProjectiveTransform(H)  # Construct the transform
        dst = tform(src)  # Obtain the dst coords
        # Ensure dst coords are finite numeric values
        assert(np.isfinite(dst).all())
Esempio n. 10
0
    def _get_rand_transform_matrix(self, image_size, d, batch_size):

        M = np.zeros((batch_size, 8))

        for i in range(batch_size):
            tl_top = random.uniform(-d, d)  # Top left corner, top
            tl_left = random.uniform(-d, d)  # Top left corner, left
            bl_bottom = random.uniform(-d, d)  # Bot left corner, bot
            bl_left = random.uniform(-d, d)  # Bot left corner, left
            tr_top = random.uniform(-d, d)  # Top right corner, top
            tr_right = random.uniform(-d, d)  # Top right corner, right
            br_bottom = random.uniform(-d, d)  # Bot right corner, bot
            br_right = random.uniform(-d, d)  # Bot right corner, right

            transform = ProjectiveTransform()
            transform.estimate(
                np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                          (image_size - br_right, image_size - br_bottom),
                          (image_size - tr_right, tr_top))),
                np.array(((0, 0), (0, image_size), (image_size, image_size),
                          (image_size, 0))))

            M[i] = transform.params.flatten()[:8]

        return M
    def __init__(self, config):
        '''
        Initilization for Regristration Class 

        Parameters
        -------------
        config: Config
        configuration class for traffic intersection 

        '''

        self.config = config

        # Four corners of the interection, hard-coded in camera space
        self.corners = self.config.street_corners
        # Four corners of the intersection, hard-coded in transformed space
        self.st_corners = self.config.simulator_corners

        #Computes the projected transform for the going from camera to simulator coordinate frame
        self.tf_mat = ProjectiveTransform()

        self.sc, self.cc = self.load_homography_data()
        self.tf_mat.estimate(self.sc, self.cc)

        self.af = AddOffset()
Esempio n. 12
0
def get_sprites(image, ctrs, debug=False):
    """
    This function computes a projective transform from the source (mnist image) to
    the destination (contour) and extracts the warped sprite.
    """
    # We make sure that we work on a local copy of the image
    img = image.copy()

    # We loop through the sprites
    sprts = []

    for contour in ctrs:

        # We compute the projective transform
        source_points = np.array([[28, 28], [0, 28], [0, 0], [28, 0]])
        destination_points = np.array(contour)
        transform = ProjectiveTransform()
        transform.estimate(source_points, destination_points)

        # We transform the image
        warped = warp(img, transform, output_shape=(28, 28))

        if debug:
            _, axis = plt.subplots(nrows=2, figsize=(8, 3))
            axis[0].imshow(img, cmap='gray')
            axis[0].plot(destination_points[:, 0], destination_points[:, 1], '.r')
            axis[0].set_axis_off()
            axis[1].imshow(warped, cmap='gray')
            axis[0].set_axis_off()
            plt.tight_layout()
            plt.show()

        sprts.append(warped)

    return sprts
Esempio n. 13
0
def normalize_image(image, image_colored, rescale_param=0.5):
    image = image.copy()
    image_colored = image_colored.copy()
    image_scaled = rescale(image, rescale_param)
    edges = canny(image_scaled)

    selem = disk(1)
    edges = dilation(edges, selem)

    edges = (edges).astype(np.uint8)
    img, ext_contours, hierarchy = cv2.findContours(edges.copy(),
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
    contour = max(ext_contours, key=cv2.contourArea)
    contour = contour.squeeze()

    epsilon = 0.05 * cv2.arcLength(contour, True)
    corners = cv2.approxPolyDP(contour, epsilon, True).squeeze()
    corners = (perspective.order_points(corners))
    corners = corners / rescale_param

    size_square = min(image_scaled.shape)
    tform = ProjectiveTransform()
    tform.estimate(np.array([[0, 0], [1020, 0], [1020, 720], [0, 720]]),
                   corners)
    image_warped = warp(image_colored, tform)[:720, :1020]

    img = img_as_ubyte(image_warped)
    img = adjusting_brightness(img[30:-5, 15:-15], a=1.7, b=2)
    return img, tform
def projection_transform(image, max_warp=0.8, height=128, width=128):
    #Warp Location
    d = height * 0.3 * np.random.uniform(0, max_warp)

    #Warp co-ordinates
    tl_top = np.random.uniform(-d, d)  # Top left corner, top margin
    tl_left = np.random.uniform(-d, d)  # Top left corner, left margin
    bl_bottom = np.random.uniform(-d, d)  # Bottom left corner, bottom margin
    bl_left = np.random.uniform(-d, d)  # Bottom left corner, left margin
    tr_top = np.random.uniform(-d, d)  # Top right corner, top margin
    tr_right = np.random.uniform(-d, d)  # Top right corner, right margin
    br_bottom = np.random.uniform(-d, d)  # Bottom right corner, bottom margin
    br_right = np.random.uniform(-d, d)  # Bottom right corner, right margin

    ##Apply Projection
    transform = ProjectiveTransform()
    transform.estimate(
        np.array(((tl_left, tl_top), (bl_left, height - bl_bottom),
                  (height - br_right, height - br_bottom), (height - tr_right,
                                                            tr_top))),
        np.array(((0, 0), (0, height), (height, height), (height, 0))))
    output_image = warp(image,
                        transform,
                        output_shape=(height, width),
                        order=1,
                        mode='edge')
    return output_image
Esempio n. 15
0
 def projection(self, tile: HipsTile) -> ProjectiveTransform:
     """Estimate projective transformation on a HiPS tile."""
     corners = tile.meta.skycoord_corners.to_pixel(self.geometry.wcs)
     src = np.array(corners).T.reshape((4, 2))
     dst = tile_corner_pixel_coordinates(tile.meta.width)
     pt = ProjectiveTransform()
     pt.estimate(src, dst)
     return pt
Esempio n. 16
0
def preprocess(h**o):
    inv_homos = []
    transforms = []
    idm = np.eye(3)
    for i in range(np.shape(h**o)[0]):
        inv_homos.append(np.linalg.inv(h**o[i]))
        transforms.append(ProjectiveTransform(np.matmul(idm, h**o[i])))
    return inv_homos, transforms
Esempio n. 17
0
def source_to_projection(src: List, verbose: bool = False):
    # for point in src:
    # print(round(point[0],3), round(point[1], 3))
    src = np.asarray(src)  # bottom_left, top_left, top_right, bottom_right
    dst = np.asarray([[0, 0], [0, 1], [1, 1], [1, 0]])
    pt = ProjectiveTransform()
    pt.estimate(src, dst)
    return pt
Esempio n. 18
0
def make_homography(shifts, horizon_line, resolution):
    horizon_line = resolution * horizon_line
    left_top_dx, left_top_dy = shifts[0][0]
    right_top_dx, right_top_dy = shifts[0][1]
    left_hor_dx, left_hor_dy = shifts[1][0]
    right_hor_dx, right_hor_dy = shifts[1][1]
    points_src = np.array(
        [
            [0, 0],  # left top
            [resolution, 0],  # right top
            [0, horizon_line],  # left bottom
            [resolution, horizon_line]
        ],
        dtype='float32')  # right bottom
    points_tgt = np.array(
        [
            [left_top_dx, left_top_dy],  # left top
            [resolution + right_top_dx, right_top_dy],  # right top
            [left_hor_dx, horizon_line + left_hor_dy],  # left bottom
            [resolution + right_hor_dx, horizon_line + right_hor_dy]
        ],
        dtype='float32')  # right bottom
    sky_transform = ProjectiveTransform()
    sky_transform.estimate(points_src, points_tgt)

    points_src = np.array(
        [
            [0, horizon_line],  # left horizon
            [resolution, horizon_line],  # right horizon
            [0, resolution],  # left bottom
            [resolution, resolution]
        ],
        dtype='float32')  # right bottom
    points_tgt = np.array(
        [
            [left_hor_dx, horizon_line - left_hor_dy],  # left horizon
            [resolution + right_hor_dx, horizon_line - right_hor_dy
             ],  # right horizon
            [left_top_dx, resolution - left_top_dy],  # left bottom
            [resolution + right_top_dx, resolution - right_top_dy]
        ],
        dtype='float32')  # right bottom
    earth_transform = ProjectiveTransform()
    earth_transform.estimate(points_src, points_tgt)

    return sky_transform, earth_transform
Esempio n. 19
0
def create_img_mask(r, c, n, transforms, priorities):
    pro_idx = priorities[0]  # [0, 1, 2, 3]
    print(r, c, n)
    stitch_masks = np.array([pow(2, i) * np.ones((r, c)) for i in range(2)])
    return_masks = []
    corners = np.array([[0, 0], [0, r], [c, r], [c, 0]]).astype(np.float)
    for index in range(n):
        # create mask for index-th image

        if index == pro_idx:
            return_masks.append(None)
            continue
        else:
            al_corners = corners
            warped_corners = transforms[pro_idx](corners)
            al_corners = np.vstack((al_corners, warped_corners))
            warped_corners = transforms[index](corners)
            al_corners = np.vstack((al_corners, warped_corners))

            corner_min = np.min(al_corners, axis=0)
            corner_max = np.max(al_corners, axis=0)
            output_shape = (corner_max - corner_min)

            output_shape = np.ceil(output_shape[::-1])
            offset = SimilarityTransform(translation=-corner_min)
            offset_inv = SimilarityTransform(translation=corner_min)

            total_masks = []
            total_masks.append(
                warp(stitch_masks[pro_idx, :, :],
                     (transforms[pro_idx] + offset).inverse,
                     output_shape=output_shape,
                     cval=0))
            total_masks.append(
                warp(stitch_masks[1, :, :], (transforms[1] + offset).inverse,
                     output_shape=output_shape,
                     cval=0))
            total_masks = np.sum(np.array(total_masks), axis=0)

            # return val
            transform_inv = ProjectiveTransform(transforms[index]._inv_matrix)
            return_masks.append(
                warp(total_masks, (offset_inv + transform_inv).inverse,
                     output_shape=[r, c],
                     cval=0))
            return_masks[index][(return_masks[index] % 1.0 != 0)] = pow(
                2, 1)  # pow(2,i)

            ret_masks = return_masks[index]
            # now the image that has to be bitwise-and. so the background image has to be 255
            # the mask[i]. the overlap_label will be 2^len - 1 = 3, overlap label
            overlap_value = pow(2, 2) - 1  # 3
            # print ((ret_masks==3.0).sum())
            ret_masks[(ret_masks != overlap_value)] = 255  # white
            ret_masks[(ret_masks == overlap_value)] = 0  # reverse mask
            ret_masks = ret_masks.astype('uint8')
            # print((ret_masks[index] == 255).sum())
    return return_masks
Esempio n. 20
0
def test_projective_init(array_like_input):
    tform = estimate_transform('projective', SRC, DST)
    # init with transformation matrix
    if array_like_input:
        params = [list(p) for p in tform.params]
    else:
        params = tform.params
    tform2 = ProjectiveTransform(params)
    assert_almost_equal(tform2.params, tform.params)
Esempio n. 21
0
def test_homography():
    x = np.zeros((5, 5), dtype=np.double)
    x[1, 1] = 1
    theta = -np.pi / 2
    M = np.array([[np.cos(theta), -np.sin(theta), 0],
                  [np.sin(theta), np.cos(theta), 4], [0, 0, 1]])

    x90 = warp(x, inverse_map=ProjectiveTransform(M).inverse, order=1)
    assert_almost_equal(x90, np.rot90(x))
Esempio n. 22
0
def stitching(homography, pano0, I0, I1):
    # Shape registration target
    r, c = pano0.shape[:2]

    # Note that transformations take coordinates in (x, y) format,
    # not (row, column), in order to be consistent with most literature
    corners = np.array([[0, 0, 1],
                        [0, r, 1],
                        [c, 0, 1],
                        [c, r, 1]])

    # Warp the image corners to their new positions
    warped_corners01 = np.dot(homography, corners.T)
    warped_corners01 = warped_corners01[:2, :].T

    # Find the extents of both the reference image and the warped
    # target image
    all_corners = np.vstack((warped_corners01, corners[:, :2]))

    # The overally output shape will be max - min
    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)
    output_shape = (corner_max - corner_min)

    # Ensure integer shape with np.ceil and dtype conversion
    output_shape = np.ceil(output_shape[::-1]).astype(int)

    # This in-plane offset is the only necessary transformation for the middle image
    offset1 = SimilarityTransform(translation=-corner_min)
    tform = ProjectiveTransform(homography)

    # Warp pano1 to pano0 using 3rd order interpolation
    transform01 = (tform + offset1).inverse
    I1_warped = warp(I1, transform01, order=3,
                     output_shape=output_shape, cval=-1)

    I1_mask = (I1_warped != -1)  # Mask == 1 inside image
    I1_warped[~I1_mask] = 0  # Return background values to 0

    # Translate pano0 into place
    I0_warped = warp(I0, offset1.inverse, order=3,
                     output_shape=output_shape, cval=-1)

    I0_mask = (I0_warped != -1)  # Mask == 1 inside image
    I0_warped[~I0_mask] = 0  # Return background values to 0

    # Add the images together. This could create dtype overflows!
    # We know they are are floating point images after warping, so it's OK.
    merged = (I0_warped + I1_warped)

    # Track the overlap by adding the masks together
    overlap = (I0_mask * 1.0 +  # Multiply by 1.0 for bool -> float conversion
               I1_mask)

    # Normalize through division by `overlap` - but ensure the minimum is 1
    normalized = merged / np.maximum(overlap, 1)
    return normalized
Esempio n. 23
0
    def estimar_valores_q(self, rgb=None):
        """
        Estimate the reward result from position of the objects

        The input to the CNN is 2 x 214 x 214
        The output is 112 by 112

        ponto a # 0.16, -0.22
        ponto b # 0.16, 0.22
        ponto c # 0.44, -0.22
        ponto d # 0.44, 0.22

        """
        obj = self.supervisor.service('n')
        n = obj.n
        # print(n)
        blocks = []
        for i in range(1, n, 1):
            pose = self.supervisor.get_position(str(i))
            blocks.append(pose)

        from skimage.transform import ProjectiveTransform
        h = 112
        w = 112
        t = ProjectiveTransform()
        src = np.asarray([[0.16, -0.14], [0.16, 0.14], [0.44, -0.14],
                          [0.44, 0.14]])
        dst = np.asarray([[0, 0], [0, w], [h, 0], [h, w]])
        if not t.estimate(src, dst): raise Exception("estimate failed")

        max = np.zeros((h, w))
        curve = np.zeros((h, w))
        for block in blocks:
            # print(block)
            x = block.pose.position[0]
            y = block.pose.position[1]
            a = t((x, y))
            # print(a)
            u = int(a[0][0])
            v = int(a[0][1])

            for i in range(h):
                for j in range(w):
                    curve[i, j] = np.sqrt((u - i)**2 + (v - j)**2)
            curve = np.clip(curve, 0, 20)
            curve /= curve.max()
            curve = 1 - curve

            if u < 0: u = 0
            if v < 0: v = 0
            if u > h - 1: u = h - 1
            if v > w - 1: v = w - 1
            max[u, v] = 1
            max = np.maximum(max, curve)

        return max
Esempio n. 24
0
def visualize_transformation(im1, im2, A):
    tform = ProjectiveTransform(A)
    out = warp(im2, tform)

    f, ax = plt.subplots(1, 3, figsize=(20,10))
    ax[0].imshow(im1.squeeze(), cmap='gray')
    ax[1].imshow(im2.squeeze(), cmap='gray')
    ax[2].imshow(out.squeeze(), cmap='gray')
    plt.show()
    return out
Esempio n. 25
0
def test_homography_2():
    src_keypoints = array([[1, 20], [5, 5], [15, 3], [7, 14]])

    dest_keypoints = array([[11, 2], [25, 3], [16, 13], [8, 2]])

    H = find_homography(src_keypoints, dest_keypoints)

    assert mean(
        norm(dest_keypoints - ProjectiveTransform(H)(src_keypoints),
             axis=1)) < threshold
Esempio n. 26
0
 def get_block_transform(self, iy, ix) -> ProjectiveTransform:
     tr0 = self.data.attrs['transform']
     iy0, ix0 = iy * dataManager.spatial.block_shape[
         0], ix * dataManager.spatial.block_shape[1]
     y0, x0 = tr0[5] + iy0 * tr0[4], tr0[2] + ix0 * tr0[0]
     tr1 = [tr0[0], tr0[1], x0, tr0[3], tr0[4], y0, 0, 0, 1]
     print(
         f"Tile transform: {tr0}, Block transform: {tr1}, tile indices = [{dataManager.config.value('tile/indices')}], block indices = [ {iy}, {ix} ]"
     )
     return ProjectiveTransform(np.array(tr1).reshape(3, 3))
Esempio n. 27
0
 def applyTransformationOnIma(self, ima2):
     if not self.transfMat:
         self._findTransformationMatrix()
     alignedIma = warp(ima2,
                       ProjectiveTransform(matrix=self.transfMat),
                       output_shape=ima2.shape,
                       order=3,
                       mode='constant',
                       cval=np.median(ima2.data))
     return CCDData(alignedIma, unit=ima2.unit)
def transform(coords, warp_matrix, out):
    indices = np.array(pd.read_csv(coords, delimiter="\t"))
    a_matrix = np.array(pd.read_csv(warp_matrix, delimiter=",", header=None))
    
    trans = ProjectiveTransform(matrix=a_matrix)
    warped_coords = warp_coords_batch(trans, indices)

    df = pd.DataFrame()
    df['x'] = warped_coords[:,0]
    df['y'] = warped_coords[:,1]
    df.to_csv(out, index = False, sep="\t")
Esempio n. 29
0
 def _make_transform_to_crop(self, points, center_points):
     dst_points = self._get_points_to_transform(points, center_points)
     src_points = array([
         (0, 0),
         (0, self.HEIGHT),
         (self.WIDTH, self.HEIGHT),
         (self.WIDTH, 0),
     ])
     tform = ProjectiveTransform()
     tform.estimate(src_points, dst_points)
     return tform
def affine_registration(params, moving, fixed):
    tmat = np.eye(3)
    tmat[0, :] = params.take([0, 1, 2])
    tmat[1, :] = params.take([3, 4, 5])

    trans = ProjectiveTransform(matrix=tmat)
    warped_coords = warp_coords_batch(trans, fixed.shape)
    t = map_coordinates(moving, warped_coords, mode='reflect')

    eI = (t - fixed)**2
    return eI.flatten()