def apply_warp_augmentation_on_an_image(self, image):
        image_size = image.shape[0]
        #aff_tform = AffineTransform(scale=(1, 1/1.2), rotation=1, shear=0.7, translation=(210, 50))
        aff_tform = AffineTransform(scale=(1, 1 / 1.2))
        image = warp(image,
                     aff_tform,
                     output_shape=(image_size, image_size),
                     order=1,
                     mode='edge')

        x = image_size * 0.2
        # top_right - x,y
        # bottom_left - x,y
        # top_right - x,y
        # bottom_right - x,y
        tl_x, tl_y, bl_x, bl_y, tr_x, tr_y, br_x, br_y = np.random.uniform(
            -x, x, size=8)

        src = np.array([[tl_y, tl_x], [bl_y, image_size - bl_x],
                        [image_size - br_y, image_size - br_x],
                        [image_size - tr_y, tr_x]])
        dst = np.array([[0, 0], [0, image_size], [image_size, image_size],
                        [image_size, 0]])

        proj_tform = ProjectiveTransform()
        proj_tform.estimate(src, dst)

        image = warp(image,
                     proj_tform,
                     output_shape=(image_size, image_size),
                     order=1,
                     mode='edge')

        return image
Пример #2
0
def warp_image_by_corner_points_projection(corner_points, image):
    """Given corner points of a Sudoku, warps original selection to a square image.

    :param corner_points:
    :type: corner_points: list
    :param image:
    :type image:
    :return:
    :rtype:

    """
    # Clarify by storing in named variables.
    top_left, top_right, bottom_left, bottom_right = np.array(corner_points)

    top_edge = np.linalg.norm(top_right - top_left)
    bottom_edge = np.linalg.norm(bottom_right - bottom_left)
    left_edge = np.linalg.norm(top_left - bottom_left)
    right_edge = np.linalg.norm(top_right - bottom_right)

    L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge])))
    src = np.array([top_left, top_right, bottom_left, bottom_right])
    dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]])

    tr = ProjectiveTransform()
    tr.estimate(dst, src)
    warped_image = warp(image, tr, output_shape=(L, L))
    out = resize(warped_image, (500, 500))

    return out
 def prep_image(self):
   """Takes the solved coordinate system and makes a piecewise \
   transform on the origin image to the target image"""
   transform = ProjectiveTransform()
   self.coord_solver.coordinates = self.coord_solver.min_coords.copy()
   self.new_image = np.zeros(self.coord_solver.image.shape)
   coords = np.array([self.coord_solver.coordinates[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(self.coord_solver.coordinates.shape[0]) for y in range(self.coord_solver.coordinates.shape[1]) \
     if (self.coord_solver.coordinates[x:x+2, y:y+2, :].shape == (2, 2, 2))])
   canonical_coords = np.indices((self.coord_solver.width, self.coord_solver.height)).T.astype('float32')
   flattened_canonical = np.array([canonical_coords[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(canonical_coords.shape[0]-1) for y in range(canonical_coords.shape[1]-1)])
   mesh_size = self.coord_solver.mesh_factor
   print "needs %s calcs" % coords.shape[0]
   for k in range(coords.shape[0]):
     src = mesh_size*coords[k, :, :]
     canon_coord = mesh_size*flattened_canonical[k, :, :]
     des = mesh_size*flattened_canonical[k, :, :]
     if not transform.estimate(src, des):
       raise Exception("estimate failed at %s" % str(k))
     area_in_question_x = canon_coord[0, 0].astype(int)
     area_in_question_y = canon_coord[0, 1].astype(int)
     scaled_area = tf.warp(self.coord_solver.image, transform)
     self.new_image[area_in_question_y:area_in_question_y+mesh_size, \
       area_in_question_x:area_in_question_x+mesh_size] += scaled_area[area_in_question_y:\
       area_in_question_y+mesh_size, area_in_question_x:area_in_question_x+mesh_size]
Пример #4
0
def coord(contours, r, g, b, box_img):
    t = ProjectiveTransform()  # Initiate Projective Transform

    # Use the Image and World Reference Points to Generate Source and Destination NumPy arrays for the transform
    src = np.asarray([i_bl, i_tl, i_tr, i_br])
    dst = np.asarray([w_bl, w_tl, w_tr, w_br])

    t.estimate(src, dst)  # Prepare the transform model
    i_nos = 0  # Count number of objects of the color
    objects = []  # Start with an empty array for image coordinates
    for k_coord in contours:  # Check each contour for a valid object
        y_k, x_k, w_k, h_k = cv2.boundingRect(k_coord)
        # Bound the individual contour with a rectangle
        if w_k > 40 and h_k > 40:  # Only select objects bigger than a threshold - to filter out disturbances
            cv2.rectangle(box_img, (y_k, x_k), (y_k + w_k, x_k + h_k),
                          (b, g, r), 2)
            # Draw a box on the image - to be subsequently printed on screen
            objects.append([x_k + h_k / 2, y_k + w_k / 2])
            # Add the object to the array (image coordinates)
            i_nos += 1  # Increase the count of number of objects
    if i_nos == 0:  # If there are no valid objects in the image
        world = []  # return empty array for world coordinates
    else:  # If there are valid objects in the image
        world = t(
            objects
        )  # Carry out Projective Transform to populate the world coordinates array
    return i_nos, objects, world  # return the no. of objects, image coordinates and world coordinates
Пример #5
0
    def apply_projection_transform(self, Xb, batch_size, image_size):
        d = image_size * 0.3 * self.intensity
        for i in np.random.choice(batch_size,
                                  int(batch_size * self.p),
                                  replace=False):
            tl_top = random.uniform(-d, d)
            tl_left = random.uniform(-d, d)
            bl_bottom = random.uniform(-d, d)
            bl_left = random.uniform(-d, d)
            tr_top = random.uniform(-d, d)
            tr_right = random.uniform(-d, d)
            br_bottom = random.uniform(-d, d)
            br_right = random.uniform(-d, d)

            transform = ProjectiveTransform()
            transform.estimate(
                np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                          (image_size - br_right, image_size - br_bottom),
                          (image_size - tr_right, tr_top))),
                np.array(((0, 0), (0, image_size), (image_size, image_size),
                          (image_size, 0))))
            Xb[i] = warp(Xb[i],
                         transform,
                         output_shape=(image_size, image_size),
                         order=1,
                         mode='edge')
        return Xb
Пример #6
0
def get_sprites(image, ctrs, debug=False):
    """
    This function computes a projective transform from the source (mnist image) to
    the destination (contour) and extracts the warped sprite.
    """
    # We make sure that we work on a local copy of the image
    img = image.copy()

    # We loop through the sprites
    sprts = []

    for contour in ctrs:

        # We compute the projective transform
        source_points = np.array([[28, 28], [0, 28], [0, 0], [28, 0]])
        destination_points = np.array(contour)
        transform = ProjectiveTransform()
        transform.estimate(source_points, destination_points)

        # We transform the image
        warped = warp(img, transform, output_shape=(28, 28))

        if debug:
            _, axis = plt.subplots(nrows=2, figsize=(8, 3))
            axis[0].imshow(img, cmap='gray')
            axis[0].plot(destination_points[:, 0], destination_points[:, 1], '.r')
            axis[0].set_axis_off()
            axis[1].imshow(warped, cmap='gray')
            axis[0].set_axis_off()
            plt.tight_layout()
            plt.show()

        sprts.append(warped)

    return sprts
Пример #7
0
def normalize_image(image, image_colored, rescale_param=0.5):
    image = image.copy()
    image_colored = image_colored.copy()
    image_scaled = rescale(image, rescale_param)
    edges = canny(image_scaled)

    selem = disk(1)
    edges = dilation(edges, selem)

    edges = (edges).astype(np.uint8)
    img, ext_contours, hierarchy = cv2.findContours(edges.copy(),
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
    contour = max(ext_contours, key=cv2.contourArea)
    contour = contour.squeeze()

    epsilon = 0.05 * cv2.arcLength(contour, True)
    corners = cv2.approxPolyDP(contour, epsilon, True).squeeze()
    corners = (perspective.order_points(corners))
    corners = corners / rescale_param

    size_square = min(image_scaled.shape)
    tform = ProjectiveTransform()
    tform.estimate(np.array([[0, 0], [1020, 0], [1020, 720], [0, 720]]),
                   corners)
    image_warped = warp(image_colored, tform)[:720, :1020]

    img = img_as_ubyte(image_warped)
    img = adjusting_brightness(img[30:-5, 15:-15], a=1.7, b=2)
    return img, tform
Пример #8
0
def robustEstimate(ptsA, ptsB):
	"""
	Perform robust estimation on the given
	correspondences using RANSAC.

	Args:
	----
		ptsA: A 2 x N matrix of points.
		ptsB: A 2 x N matrix of points.

	Returns:
	-------
		The number of inliers within the points.
	"""
	src, dst, N = [], [], ptsA.shape[1]
	for i in xrange(N):
		src.append((ptsA[0, i], ptsA[1, i]))
		dst.append((ptsB[0, i], ptsB[1, i]))

	src, dst = np.asarray(src), np.asarray(dst)

	model = ProjectiveTransform()
	model.estimate(src, dst)
	model_robust, inliers = ransac((src, dst), ProjectiveTransform, min_samples=3, residual_threshold=2, max_trials=100)

	return inliers
Пример #9
0
def preprocess_image(im):

    im = resize_im(im, MAX_SIDE_LENGTH)
    gray = rgb2gray(im)

    # Get the edges of the receipt
    top_line, right_line, bottom_line, left_line = get_receipt_edges(gray)

    # Intersect to get corners
    TR = line_intersection(top_line, right_line)
    TL = line_intersection(top_line, left_line)
    BR = line_intersection(bottom_line, right_line)
    BL = line_intersection(bottom_line, left_line)

    # Warp so receipt corners are image corners
    transform = ProjectiveTransform()
    height = max([BL[1] - TL[1], BR[1] - TR[1]])
    width = max([TR[0] - TL[0], BR[1] - BL[1]])
    src_pts = np.array([TL, TR, BL, BR])
    dest_pts = np.array([[0, 0], [width, 0], [0, height], [width, height]])
    success = transform.estimate(src_pts, dest_pts)
    warped_im = warp(gray, transform.inverse)[:int(height), :int(width)]

    warped_gray = rgb2gray(warped_im)
    enhanced_gray = img_as_ubyte(adjust_log(warped_gray))

    return enhanced_gray
Пример #10
0
    def random_transform(cls, img):
        image_size = img.shape[0]
        d = image_size * 0.2
        tl_top, tl_left, bl_bottom, bl_left, tr_top, tr_right, br_bottom, br_right = np.random.uniform(
            -d, d, size=8)  # Bottom right corner, right margin
        aft = AffineTransform(scale=(1, 1 / 1.2))
        img = warp(img,
                   aft,
                   output_shape=(image_size, image_size),
                   order=1,
                   mode='edge')
        transform = ProjectiveTransform()
        transform.estimate(
            np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                      (image_size - br_right, image_size - br_bottom),
                      (image_size - tr_right, tr_top))),
            np.array(((0, 0), (0, image_size), (image_size, image_size),
                      (image_size, 0))))

        img = warp(img,
                   transform,
                   output_shape=(image_size, image_size),
                   order=1,
                   mode='edge')
        return img
Пример #11
0
def apply_projection_transform_(X, intensity):
    image_size = X.shape[1]
    d = image_size * 0.3 * intensity
    for i in range(X.shape[0]):
        tl_top = random.uniform(-d, d)     # Top left corner, top margin
        tl_left = random.uniform(-d, d)    # Top left corner, left margin
        bl_bottom = random.uniform(-d, d)  # Bottom left corner, bottom margin
        bl_left = random.uniform(-d, d)    # Bottom left corner, left margin
        tr_top = random.uniform(-d, d)     # Top right corner, top margin
        tr_right = random.uniform(-d, d)   # Top right corner, right margin
        br_bottom = random.uniform(-d, d)  # Bottom right corner, bottom margin
        br_right = random.uniform(-d, d)   # Bottom right corner, right margin

        transform = ProjectiveTransform()
        transform.estimate(np.array((
                (tl_left, tl_top),
                (bl_left, image_size - bl_bottom),
                (image_size - br_right, image_size - br_bottom),
                (image_size - tr_right, tr_top)
            )), np.array((
                (0, 0),
                (0, image_size),
                (image_size, image_size),
                (image_size, 0)
            )))
        X[i] = warp(X[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')

    return X
Пример #12
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    # See gh-3926 for discussion details
    tform = ProjectiveTransform()
    for i in range(20):
        # Some random coordinates
        src = np.random.rand(4, 2) * 100
        dst = np.random.rand(4, 2) * 100

        # Degenerate the case by arranging points on a single line
        src[:, 1] = np.random.rand()
        # Prior to gh-3926, under the above circumstances,
        # a transform could be returned with nan values.
        assert (not tform.estimate(src, dst)
                or np.isfinite(tform.params).all())
    def _get_rand_transform_matrix(self, image_size, d, batch_size):

        M = np.zeros((batch_size, 8))

        for i in range(batch_size):
            tl_top = random.uniform(-d, d)  # Top left corner, top
            tl_left = random.uniform(-d, d)  # Top left corner, left
            bl_bottom = random.uniform(-d, d)  # Bot left corner, bot
            bl_left = random.uniform(-d, d)  # Bot left corner, left
            tr_top = random.uniform(-d, d)  # Top right corner, top
            tr_right = random.uniform(-d, d)  # Top right corner, right
            br_bottom = random.uniform(-d, d)  # Bot right corner, bot
            br_right = random.uniform(-d, d)  # Bot right corner, right

            transform = ProjectiveTransform()
            transform.estimate(
                np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                          (image_size - br_right, image_size - br_bottom),
                          (image_size - tr_right, tr_top))),
                np.array(((0, 0), (0, image_size), (image_size, image_size),
                          (image_size, 0))))

            M[i] = transform.params.flatten()[:8]

        return M
def apply_projection_transform(X, intensity=0.75, depth=1):
    no_samples=X.shape[0]
    image_size=X.shape[1]
    no_channels=X.shape[3]
    d = image_size * 0.3 * intensity
    indices_project = np.random.choice(
        X.shape[0], math.ceil(X.shape[0]*depth*0.5), replace=False)
    X_=[]              
    for i in indices_project:
        tl_top = uniform(-d, d)     # Top left corner, top margin
        tl_left = uniform(-d, d)    # Top left corner, left margin
        bl_bottom = uniform(-d, d)  # Bottom left corner, bottom margin
        bl_left = uniform(-d, d)    # Bottom left corner, left margin
        tr_top = uniform(-d, d)     # Top right corner, top margin
        tr_right =uniform(-d, d)   # Top right corner, right margin
        br_bottom =uniform(-d, d)  # Bottom right corner, bottom margin
        br_right = uniform(-d, d)   # Bottom right corner, right margin

        transform = ProjectiveTransform()
        transform.estimate(np.array((
                (tl_left, tl_top),
                (bl_left, image_size - bl_bottom),
                (image_size - br_right, image_size - br_bottom),
                (image_size - tr_right, tr_top)
            )), np.array((
                (0, 0),
                (0, image_size),
                (image_size, image_size),
                (image_size, 0)
            )))

        X_.append(warp(X[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge'))
        X_.append(warp(X[i], transform.inverse, output_shape=(image_size, image_size), order = 1, mode = 'edge'))
        
    return np.asarray(X_)  
Пример #15
0
    def match_features(self):
        self.tforms = [ProjectiveTransform()]
        self.new_corners = np.copy(self.corners)

        for i in range(1, self.num_imgs):
            # Find correspondences between I(n) and I(n-1).
            matches = match_descriptors(self.descriptors[i - 1],
                                        self.descriptors[i],
                                        cross_check=True)

            # Estimate the transformation between I(n) and I(n-1).
            src = self.keypoints[i][matches[:, 1]][:, ::-1]
            dst = self.keypoints[i - 1][matches[:, 0]][:, ::-1]

            model, _ = ransac((src, dst),
                              ProjectiveTransform,
                              4,
                              residual_threshold=2,
                              max_trials=2000)
            self.tforms.append(
                ProjectiveTransform(model.params @ self.tforms[-1].params))

            # Compute new corners transformed by models
            self.new_corners[i] = self.tforms[-1](self.corners[i])

        corners_min = np.min(self.new_corners, axis=1)
        corners_max = np.max(self.new_corners, axis=1)

        self.xLim = corners_max[:, 0] - corners_min[:, 0]
        self.yLim = corners_max[:, 1] - corners_min[:, 1]
Пример #16
0
def randomPerspective(im):
    '''
    wrapper of Projective (or perspective) transform, from 4 random points selected from 4 corners of the image within a defined region.
    '''
    region = 1 / 4
    A = pl.array([[0, 0], [0, im.shape[0]], [im.shape[1], im.shape[0]],
                  [im.shape[1], 0]])
    B = pl.array([
        [
            int(randRange(0, im.shape[1] * region)),
            int(randRange(0, im.shape[0] * region))
        ],
        [
            int(randRange(0, im.shape[1] * region)),
            int(randRange(im.shape[0] * (1 - region), im.shape[0]))
        ],
        [
            int(randRange(im.shape[1] * (1 - region), im.shape[1])),
            int(randRange(im.shape[0] * (1 - region), im.shape[0]))
        ],
        [
            int(randRange(im.shape[1] * (1 - region), im.shape[1])),
            int(randRange(0, im.shape[0] * region))
        ],
    ])

    pt = ProjectiveTransform()
    pt.estimate(A, B)
    return warp(im, pt, output_shape=im.shape[:2])
Пример #17
0
def apply_projection_transform(Xb, batch_size, image_size):
    """
    Applies projection transform to a random subset of images. Projection margins are randomised in a range
    depending on the size of the image. Range itself is subject to scaling depending on augmentation intensity.
    """
    d = image_size * 0.3 * intensity
    for i in np.random.choice(batch_size, int(batch_size * p), replace = False):        
        tl_top = random.uniform(-d, d)     # Top left corner, top margin
        tl_left = random.uniform(-d, d)    # Top left corner, left margin
        bl_bottom = random.uniform(-d, d)  # Bottom left corner, bottom margin
        bl_left = random.uniform(-d, d)    # Bottom left corner, left margin
        tr_top = random.uniform(-d, d)     # Top right corner, top margin
        tr_right = random.uniform(-d, d)   # Top right corner, right margin
        br_bottom = random.uniform(-d, d)  # Bottom right corner, bottom margin
        br_right = random.uniform(-d, d)   # Bottom right corner, right margin

        transform = ProjectiveTransform()
        transform.estimate(np.array((
                (tl_left, tl_top),
                (bl_left, image_size - bl_bottom),
                (image_size - br_right, image_size - br_bottom),
                (image_size - tr_right, tr_top)
            )), np.array((
                (0, 0),
                (0, image_size),
                (image_size, image_size),
                (image_size, 0)
            )))
        Xb[i] = warp(Xb[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')

    return Xb
Пример #18
0
def normalize_slice_projection(slice_pixel_array, crop_bounds, desired_width,
                               desired_height):
    result = np.zeros((desired_height, desired_width))
    #Crop bounds must be converted from (x, y) points to (y, x) points
    source_bounds = np.asarray([[0, 0], [0, desired_width],
                                [desired_height, desired_width],
                                [desired_height, 0]])
    destination_bounds = np.asarray([[crop_bounds[0][1], crop_bounds[0][0]],
                                     [crop_bounds[1][1], crop_bounds[1][0]],
                                     [crop_bounds[2][1], crop_bounds[2][0]],
                                     [crop_bounds[3][1], crop_bounds[3][0]]])
    projective_transform = ProjectiveTransform()
    if not projective_transform.estimate(source_bounds, destination_bounds):
        print("Cannot project from crop bounds to desired image dimensions")
    else:
        for x in range(0, desired_width):
            for y in range(0, desired_height):
                normalized_point = [y, x]
                transform = projective_transform(normalized_point)
                slice_point = transform[0]
                value = MathUtil.sample_image_bilinear(slice_pixel_array,
                                                       slice_point[1],
                                                       slice_point[0])
                result[y][x] = value

    return result
def projection_transform(image, max_warp=0.8, height=32, width=32):
    #Warp Location
    d = height * 0.3 * np.random.uniform(0, max_warp)

    #Warp co-ordinates
    tl_top = np.random.uniform(-d, d)  # Top left corner, top margin
    tl_left = np.random.uniform(-d, d)  # Top left corner, left margin
    bl_bottom = np.random.uniform(-d, d)  # Bottom left corner, bottom margin
    bl_left = np.random.uniform(-d, d)  # Bottom left corner, left margin
    tr_top = np.random.uniform(-d, d)  # Top right corner, top margin
    tr_right = np.random.uniform(-d, d)  # Top right corner, right margin
    br_bottom = np.random.uniform(-d, d)  # Bottom right corner, bottom margin
    br_right = np.random.uniform(-d, d)  # Bottom right corner, right margin

    ##Apply Projection
    transform = ProjectiveTransform()
    transform.estimate(
        np.array(((tl_left, tl_top), (bl_left, height - bl_bottom),
                  (height - br_right, height - br_bottom), (height - tr_right,
                                                            tr_top))),
        np.array(((0, 0), (0, height), (height, height), (height, 0))))
    output_image = warp(image,
                        transform,
                        output_shape=(height, width),
                        order=1,
                        mode='edge')
    return output_image
def apply_projection_transform(X, intensity):
    image_size = X.shape[1]
    d = image_size * 0.3 * intensity

    tl_top = random.uniform(-d, d)
    tl_left = random.uniform(-d, d)
    bl_bottom = random.uniform(-d, d)
    bl_left = random.uniform(-d, d)
    tr_top = random.uniform(-d, d)
    tr_right = random.uniform(-d, d)
    br_bottom = random.uniform(-d, d)
    br_right = random.uniform(-d, d)

    transform = ProjectiveTransform()
    transform.estimate(
        np.array(((tl_left, tl_top), (bl_left, image_size - bl_bottom),
                  (image_size - br_right,
                   image_size - br_bottom), (image_size - tr_right, tr_top))),
        np.array(((0, 0), (0, image_size), (image_size, image_size),
                  (image_size, 0))))

    X = warp(X,
             transform,
             output_shape=(image_size, image_size),
             order=1,
             mode='edge')

    return X
Пример #21
0
    def __init__(self, config):
        '''
        Initilization for Regristration Class 

        Parameters
        -------------
        config: Config
        configuration class for traffic intersection 

        '''

        self.config = config

        # Four corners of the interection, hard-coded in camera space
        self.corners = self.config.street_corners
        # Four corners of the intersection, hard-coded in transformed space
        self.st_corners = self.config.simulator_corners

        #Computes the projected transform for the going from camera to simulator coordinate frame
        self.tf_mat = ProjectiveTransform()

        self.sc, self.cc = self.load_homography_data()
        self.tf_mat.estimate(self.sc, self.cc)

        self.af = AddOffset()
Пример #22
0
def find_simple_center_warps(forward_transforms):
    """Find transformations that transform each image to plane of the central image.

    forward_transforms (Tuple[N]) : - pairwise transformations

    Returns:
        Tuple[N + 1] : transformations to the plane of central image
    """
    image_count = len(forward_transforms) + 1
    center_index = (image_count - 1) // 2

    result = [None] * image_count
    result[center_index] = DEFAULT_TRANSFORM()
    cur_h = DEFAULT_TRANSFORM().params
    for i in reversed(range(center_index)):
        cur_h = np.matmul(
            inv(forward_transforms[i].params), cur_h
        )  # by some reason calcualtion via inversions is more accurate
        result[i] = ProjectiveTransform(inv(cur_h))

    cur_h = DEFAULT_TRANSFORM().params
    for i in range(center_index, image_count - 1):
        cur_h = np.matmul(cur_h, inv(forward_transforms[i].params))
        result[i + 1] = ProjectiveTransform(cur_h)

    return tuple(result)
Пример #23
0
 def prep_image(self):
   """Takes the solved coordinate system and makes a piecewise \
   transform on the origin image to the target image"""
   transform = ProjectiveTransform()
   self.coord_solver.coordinates = self.coord_solver.min_coords.copy()
   self.new_image = np.zeros(self.coord_solver.image.shape)
   coords = np.array([self.coord_solver.coordinates[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(self.coord_solver.coordinates.shape[0]) for y in range(self.coord_solver.coordinates.shape[1]) \
     if (self.coord_solver.coordinates[x:x+2, y:y+2, :].shape == (2, 2, 2))])
   canonical_coords = np.indices((self.coord_solver.width, self.coord_solver.height)).T.astype('float32')
   flattened_canonical = np.array([canonical_coords[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(canonical_coords.shape[0]-1) for y in range(canonical_coords.shape[1]-1)])
   mesh_size = self.coord_solver.mesh_factor
   print "needs %s calcs" % coords.shape[0]
   coord_grid = np.indices(self.coord_solver.image.shape[:-1]).T.astype('float32').reshape(-1,2)
   for k in range(coords.shape[0]):
     des = mesh_size*coords[k, :, :]
     canon_coord = mesh_size*flattened_canonical[k, :, :]
     src = mesh_size*flattened_canonical[0, :, :]
     if not transform.estimate(des, canon_coord):
       raise Exception("estimate failed at %s" % str(k))
     area_in_question_x = canon_coord[0, 0].astype(int)
     area_in_question_y = canon_coord[0, 1].astype(int)
     scaled_area = tf.warp(self.coord_solver.image, transform)
     area_path = path.Path([des[0],des[1],des[3],des[2],des[0]])
     points_in_area = area_path.contains_points(coord_grid,radius=0.00001).reshape(self.coord_solver.image.shape[:-1])
     self.new_image += scaled_area*points_in_area[:,:,np.newaxis]
Пример #24
0
    def projection_transform(self, x):
        image_size = x.shape[1]

        change = image_size * 0.3 * self.intensity

        x_return = np.empty(x.shape, dtype=x.dtype)

        indices = np.random.choice(x.shape[0], int(x.shape[0] * self.ratio),
                                   replace=False)
        for i in indices:
            changes = []
            for _ in range(8):
                changes.append(random.uniform(-change, change))

            transform = ProjectiveTransform()
            transform.estimate(np.array(
                (
                    (changes[0], changes[1]),  # top left
                    (changes[2], image_size - changes[3]),  # bottom left
                    (image_size - changes[4], changes[5]),  # top right
                    (image_size - changes[6], image_size - changes[7])  # bottom right
                )), np.array(
                (
                    (0, 0),
                    (0, image_size),
                    (image_size, 0),
                    (image_size, image_size)
                ))
            )

            x_return[i] = warp(x[i], transform,
                               output_shape=(image_size, image_size),
                               order=1, mode="edge")

        return x_return
Пример #25
0
def source_to_projection(src: List, verbose: bool = False):
    # for point in src:
    # print(round(point[0],3), round(point[1], 3))
    src = np.asarray(src)  # bottom_left, top_left, top_right, bottom_right
    dst = np.asarray([[0, 0], [0, 1], [1, 1], [1, 0]])
    pt = ProjectiveTransform()
    pt.estimate(src, dst)
    return pt
Пример #26
0
 def projection(self, tile: HipsTile) -> ProjectiveTransform:
     """Estimate projective transformation on a HiPS tile."""
     corners = tile.meta.skycoord_corners.to_pixel(self.geometry.wcs)
     src = np.array(corners).T.reshape((4, 2))
     dst = tile_corner_pixel_coordinates(tile.meta.width)
     pt = ProjectiveTransform()
     pt.estimate(src, dst)
     return pt
Пример #27
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = EuclideanTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    # See gh-3926 for discussion details
    tform = ProjectiveTransform()
    for i in range(20):
        # Some random coordinates
        src = np.random.rand(4, 2) * 100
        dst = np.random.rand(4, 2) * 100

        # Degenerate the case by arranging points on a single line
        src[:, 1] = np.random.rand()
        # Prior to gh-3926, under the above circumstances,
        # a transform could be returned with nan values.
        assert(not tform.estimate(src, dst) or np.isfinite(tform.params).all())

    src = np.array([[0, 2, 0], [0, 2, 0], [0, 4, 0]])
    dst = np.array([[0, 1, 0], [0, 1, 0], [0, 3, 0]])
    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    # Prior to gh-6207, the above would set the parameters as the identity.
    assert np.all(np.isnan(tform.params))

    # The tesselation on the following points produces one degenerate affine
    # warp within PiecewiseAffineTransform.
    src = np.asarray([
        [0, 192, 256], [0, 256, 256], [5, 0, 192], [5, 64, 0], [5, 64, 64],
        [5, 64, 256], [5, 192, 192], [5, 256, 256], [0, 192, 256],
    ])

    dst = np.asarray([
        [0, 142, 206], [0, 206, 206], [5, -50, 142], [5, 14, 0], [5, 14, 64],
        [5, 14, 206], [5, 142, 142], [5, 206, 206], [0, 142, 206],
    ])
    tform = PiecewiseAffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.affines[4].params))  # degenerate affine
    for idx, affine in enumerate(tform.affines):
        if idx != 4:
            assert not np.all(np.isnan(affine.params))
    for affine in tform.inverse_affines:
        assert not np.all(np.isnan(affine.params))
Пример #28
0
def ransac_transform(src_keypoints,
                     src_descriptors,
                     dest_keypoints,
                     dest_descriptors,
                     max_trials=N_TRIALS,
                     residual_threshold=1,
                     return_matches=False):
    """Match keypoints of 2 images and find ProjectiveTransform using RANSAC algorithm.

    src_keypoints ((N, 2) np.ndarray) : source coordinates
    src_descriptors ((N, 256) np.ndarray) : source descriptors
    dest_keypoints ((N, 2) np.ndarray) : destination coordinates
    dest_descriptors ((N, 256) np.ndarray) : destination descriptors
    max_trials (int) : maximum number of iterations for random sample selection.
    residual_threshold (float) : maximum distance for a data point to be classified as an inlier.
    return_matches (bool) : if True function returns matches

    Returns:
        skimage.transform.ProjectiveTransform : transform of source image to destination image
        (Optional)(N, 2) np.ndarray : inliers' indexes of source and destination images
    """

    # your code here
    matches = match_descriptors(src_descriptors, dest_descriptors)
    n = matches.shape[0]
    res_inds = [-1, -1, -1, -1]
    res_kol = 0
    for trial in range(max_trials):
        inds = random.sample(range(n), 4)
        h = find_homography(src_keypoints[matches[inds, 0]],
                            dest_keypoints[matches[inds, 1]])
        projected = ProjectiveTransform(h)(src_keypoints[matches[:, 0]])
        dist = np.sqrt(
            np.power(projected[:, 0] - dest_keypoints[matches[:, 1], 0], 2) +
            np.power(projected[:, 1] - dest_keypoints[matches[:, 1], 1], 2))
        kol = np.sum(dist < residual_threshold)
        if kol > res_kol:
            print("trial: {}, kol: {}".format(trial, kol))
            res_kol = kol
            res_inds = inds
            if res_kol > len(src_keypoints) / 10 and trial > max_trials / 10:
                break
    h = find_homography(src_keypoints[matches[res_inds, 0]],
                        dest_keypoints[matches[res_inds, 1]])
    transform = ProjectiveTransform(h)
    projected = transform(src_keypoints[matches[:, 0]])
    dist = np.sqrt(
        np.power(projected[:, 0] - dest_keypoints[matches[:, 1], 0], 2) +
        np.power(projected[:, 1] - dest_keypoints[matches[:, 1], 1], 2))
    inliers = matches[dist < residual_threshold]
    print("{} inliers matched".format(inliers.shape[0]))
    transform = ProjectiveTransform(
        find_homography(src_keypoints[inliers[:, 0]],
                        dest_keypoints[inliers[:, 1]]))
    if return_matches:
        return transform, inliers
    else:
        return transform
Пример #29
0
    def estimar_valores_q(self, rgb=None):
        """
        Estimate the reward result from position of the objects

        The input to the CNN is 2 x 214 x 214
        The output is 112 by 112

        ponto a # 0.16, -0.22
        ponto b # 0.16, 0.22
        ponto c # 0.44, -0.22
        ponto d # 0.44, 0.22

        """
        obj = self.supervisor.service('n')
        n = obj.n
        # print(n)
        blocks = []
        for i in range(1, n, 1):
            pose = self.supervisor.get_position(str(i))
            blocks.append(pose)

        from skimage.transform import ProjectiveTransform
        h = 112
        w = 112
        t = ProjectiveTransform()
        src = np.asarray([[0.16, -0.14], [0.16, 0.14], [0.44, -0.14],
                          [0.44, 0.14]])
        dst = np.asarray([[0, 0], [0, w], [h, 0], [h, w]])
        if not t.estimate(src, dst): raise Exception("estimate failed")

        max = np.zeros((h, w))
        curve = np.zeros((h, w))
        for block in blocks:
            # print(block)
            x = block.pose.position[0]
            y = block.pose.position[1]
            a = t((x, y))
            # print(a)
            u = int(a[0][0])
            v = int(a[0][1])

            for i in range(h):
                for j in range(w):
                    curve[i, j] = np.sqrt((u - i)**2 + (v - j)**2)
            curve = np.clip(curve, 0, 20)
            curve /= curve.max()
            curve = 1 - curve

            if u < 0: u = 0
            if v < 0: v = 0
            if u > h - 1: u = h - 1
            if v > w - 1: v = w - 1
            max[u, v] = 1
            max = np.maximum(max, curve)

        return max
Пример #30
0
 def _make_transform_to_crop(self, points, center_points):
     dst_points = self._get_points_to_transform(points, center_points)
     src_points = array([
         (0, 0),
         (0, self.HEIGHT),
         (self.WIDTH, self.HEIGHT),
         (self.WIDTH, 0),
     ])
     tform = ProjectiveTransform()
     tform.estimate(src_points, dst_points)
     return tform
Пример #31
0
def ransac_transform(src_keypoints, src_descriptors, dest_keypoints, dest_descriptors, max_trials=25,
                     residual_threshold=10,
                     return_matches=False):
    """Match keypoints of 2 images and find ProjectiveTransform using RANSAC algorithm.

    src_keypoints ((N, 2) np.ndarray) : source coordinates
    src_descriptors ((N, 256) np.ndarray) : source descriptors
    dest_keypoints ((N, 2) np.ndarray) : destination coordinates
    dest_descriptors ((N, 256) np.ndarray) : destination descriptors
    max_trials (int) : maximum number of iterations for random sample selection.
    residual_threshold (float) : maximum distance for a data point to be classified as an inlier.
    return_matches (bool) : if True function returns matches

    Returns:
        skimage.transform.ProjectiveTransform : transform of source image to destination image
        (Optional)(N, 2) np.ndarray : inliers' indexes of source and destination images
    """

    # your code here
    matches = match_descriptors(src_descriptors, dest_descriptors, cross_check=True)
    srt_matches = src_keypoints[matches[..., 0]]
    dest_matches = dest_keypoints[matches[..., 1]]
    size = matches.shape[0]
    assert (size > 20)
    random.seed(97)
    min = 1e18
    best_H = np.zeros((3, 3))

    def penalty(H):
        res = np.linalg.norm(dest_matches - ProjectiveTransform(H)(srt_matches), axis=1)
        res[res > residual_threshold] = residual_threshold
        return res.sum()

    for i in range(max_trials):
        subset = random.choices(np.arange(size), k=4)
        while np.unique(subset).size < 4:
            subset = random.choices(np.arange(size), k=4)
        H = find_homography(srt_matches[subset], dest_matches[subset])
        pen = penalty(H)
        if pen < min:
            min = pen
            best_H = H

    mask = np.ones(size)
    res = np.linalg.norm(dest_matches - ProjectiveTransform(best_H)(srt_matches), axis=1)
    mask[res > residual_threshold] = False
    mask[res <= residual_threshold] = True
    res[res > residual_threshold] = residual_threshold
    H = find_homography(srt_matches[mask == True], dest_matches[mask == True])
    print("match=", len(matches[mask == True]))
    if return_matches:
        return ProjectiveTransform(H), matches[mask == True]
    else:
        return ProjectiveTransform(H)
 def correct_warping(self):
     self.warpcorrected_keypoints = []
     coordinates = np.array(self.coordinates[:4])
     target_coordinates = np.asarray([[0, 0], [0, 1], [1, 1], [1, 0]])
     t = ProjectiveTransform()
     t.estimate(coordinates,target_coordinates)
     for frame_count, keypoint_t in enumerate(self.keypoints):
         if np.size(keypoint_t) != 0:
             corrected_kp_t = t(keypoint_t[:,:2])
             self.warpcorrected_keypoints.append(corrected_kp_t)
         else:
             self.warpcorrected_keypoints.append(np.array([]))
     return self.warpcorrected_keypoints
Пример #33
0
def test_projective_estimation():
    # exact solution
    tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
    assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])

    # over-determined
    tform2 = estimate_transform('projective', SRC, DST)
    assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)

    # via estimate method
    tform3 = ProjectiveTransform()
    tform3.estimate(SRC, DST)
    assert_array_almost_equal(tform3._matrix, tform2._matrix)
Пример #34
0
    def _redraw(self):
        # Clear the canvas
        self._plt.clear()

        if len(self._points) == 2 and len(self._lastPoints) == 4:
            # Get points
            src = self._pointsToVector(self._lastPoints)
            dest = self._pointsToVector(self._rectangle())

            # Compute Transformation
            self._projective = ProjectiveTransform()
            self._projective.estimate(src, dest)

            # Prepare output image
            self._render = warp(self._image, self._projective.inverse)

        # Plot the image
        if self._render is not None:
            self._plt.autoscale(True)
            self._plt.imshow(self._render)
            self._plt.autoscale(False)

        # Plot the points
        if len(self._points) > 0:
            xs = [x for (x, _) in self._rectangle()]
            ys = [y for (_, y) in self._rectangle()]
            self._plt.plot(xs + [xs[0]], ys + [ys[0]], '-', color='green')

            xs = [x for (x, _) in self._points]
            ys = [y for (_, y) in self._points]
            self._plt.plot(xs + [xs[0]], ys + [ys[0]], 'o', color='blue')

        # Draw the canvas
        self._canvas.draw()
Пример #35
0
class HomographyWidget(QtGui.QWidget):
    def __init__(self, parent=None):
        super(HomographyWidget, self).__init__(parent)
        self._initUI()

    # Initialize the UI
    def _initUI(self):
        # Widget parameters
        self.setMinimumWidth(300)

        # Create the figure
        self._fig = Figure()

        # Canvas configuration
        self._canvas = FigureCanvas(self._fig)
        self._canvas.setParent(self)
        self._canvas.mpl_connect('button_press_event', self._onPick)

        # Plot configuration
        self._plt = self._fig.add_subplot(111)
        self._plt.xaxis.set_visible(False)
        self._plt.yaxis.set_visible(False)

        # Finalize figure
        self._fig.subplots_adjust(wspace=0, hspace=0)

        # Reset the variables
        self.reset()

        # Create the layout
        vbox = QtGui.QVBoxLayout()

        # Add Canvas to the layout
        vbox.addWidget(self._canvas)

        # Set the layout
        self.setLayout(vbox)

        zp = ZoomPan()
        figZoom = zp.zoom_factory(self._plt)
        figPan = zp.pan_factory(self._plt)

    # Reset the variables to original state
    def reset(self):
        self._image = None
        self._render = None
        self._points = []
        self._lastPoints = []
        self._canvas.hide()

    # Set an image to the widget
    def setImage(self, image):
        self._image = image
        self._render = image
        self._canvas.show()
        self._redraw()

    # Get the image of the widget
    def getImage(self):
        pass

    def setHomography(self, points):
        # Save points
        self._lastPoints = points

        # Redraw canvas
        self._redraw()

    # Redraw the image and points
    def _redraw(self):
        # Clear the canvas
        self._plt.clear()

        if len(self._points) == 2 and len(self._lastPoints) == 4:
            # Get points
            src = self._pointsToVector(self._lastPoints)
            dest = self._pointsToVector(self._rectangle())

            # Compute Transformation
            self._projective = ProjectiveTransform()
            self._projective.estimate(src, dest)

            # Prepare output image
            self._render = warp(self._image, self._projective.inverse)

        # Plot the image
        if self._render is not None:
            self._plt.autoscale(True)
            self._plt.imshow(self._render)
            self._plt.autoscale(False)

        # Plot the points
        if len(self._points) > 0:
            xs = [x for (x, _) in self._rectangle()]
            ys = [y for (_, y) in self._rectangle()]
            self._plt.plot(xs + [xs[0]], ys + [ys[0]], '-', color='green')

            xs = [x for (x, _) in self._points]
            ys = [y for (_, y) in self._points]
            self._plt.plot(xs + [xs[0]], ys + [ys[0]], 'o', color='blue')

        # Draw the canvas
        self._canvas.draw()

    # Handle click events
    def _onPick(self, event):

        if event.button == 3:
            self._redraw()
        elif event.button != 1:
            return

        # Get point position
        x = event.xdata
        y = event.ydata

        if x is None or y is None:
            return

        # For each existing points
        for px, py in self._points:

            # Compute distance to current point
            dst = np.sqrt((px - x) ** 2 + (py - y) ** 2)

            # If the distance is small remove it
            if dst < 10:
                self._removePoint(px, py)
                self._redraw()
                return

        # Delegate to add the point
        self._addPoint(x, y)

        # Redraw the image
        self._redraw()

    # Add a new point
    def _addPoint(self, x, y):
        # Count points
        n = len(self._points)

        # If less than 3 points just add it
        if n < 2:
            self._points.append((x, y))
            return

    # Remove an existing point
    def _removePoint(self, x, y):
        # Remove the point
        self._points = list(filter(lambda v: v != (x, y), self._points))

    def _rectangle(self):
        # Get xs and ys
        xs = [x for (x, _) in self._points]
        ys = [y for (_, y) in self._points]

        # Compute ranges
        xmax = max(xs)
        xmin = min(xs)
        ymax = max(ys)
        ymin = min(ys)

        # Return rectangle
        return [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]

    def _pointsToVector(self, points):
        # Get points values
        x1, y1 = points[0]
        x2, y2 = points[1]
        x3, y3 = points[2]
        x4, y4 = points[3]

        # Return the vector
        return np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])