def fit_image(f1, f2, P):
    tx = 0
    ty = 0

    xy1 = matrix_transform([0, 0], np.linalg.inv(P))
    xy2 = matrix_transform([0, f2.shape[0]], np.linalg.inv(P))
    xy3 = matrix_transform([f2.shape[1], f2.shape[0]], np.linalg.inv(P))
    xy4 = matrix_transform([f2.shape[1], 0], np.linalg.inv(P))

    x_min = min(xy1[0][0], xy2[0][0], xy3[0][0], xy4[0][0], 0)
    x_max = max(xy1[0][0], xy2[0][0], xy3[0][0], xy4[0][0], f1.shape[1])
    y_min = min(xy1[0][1], xy2[0][1], xy3[0][1], xy4[0][1], 0)
    y_max = max(xy1[0][1], xy2[0][1], xy3[0][1], xy4[0][1], f1.shape[0])

    x = int(x_max - x_min)
    y = int(y_max - y_min)

    if y_min < 0:
        ty = y_min
    if x_min < 0:
        tx = x_min

    f_stitched = warp(f2, P, output_shape=(y, x))
    M, N = f1.shape[:2]
    f_stitched[0:M, 0:N, :] = f1
    tform = SimilarityTransform(translation=(tx, ty))
    warped = warp(f_stitched, tform)
    plt.imshow(warped)
    plt.axis('off')
    plt.show()
def rotate_coordinates(coords, angle, rotation_centre, imsize, resize=False):
    """Rotate coordinates in the image"""
    rot_centre = np.asanyarray(rotation_centre)
    angle = math.radians(angle)
    rot_matrix = np.array([
        [math.cos(angle), math.sin(angle), 0],
        [-math.sin(angle), math.cos(angle), 0],
        [0, 0, 1],
    ])
    coords = transform.matrix_transform(coords - rot_centre,
                                        rot_matrix) + rot_centre

    if resize:
        rows, cols = imsize[0], imsize[1]
        corners = np.array(
            [[0, 0], [0, rows - 1], [cols - 1, rows - 1], [cols - 1, 0]],
            dtype=np.float32)
        if rotation_centre is not None:
            corners = (
                transform.matrix_transform(corners - rot_centre, rot_matrix) +
                rot_centre)

        x_shift = min(corners[:, 0])
        y_shift = min(corners[:, 1])
        coords -= np.array([x_shift, y_shift])

    return coords
def get_error(P, h_inliers, h_model, b_error, b_model):
    inliers = np.array([])

    for i in h_model:
        new_pos = matrix_transform([i[0], i[1]], P)
        error = np.linalg.norm([i[2], i[3]] - new_pos)

        if error < t:
            inliers = np.append(inliers, i)

    if (inliers.shape[0] / 4) > d:
        inliers = np.append(inliers, h_inliers)
        inliers = inliers.reshape(((int(inliers.shape[0] / 4), 4)))
        P2 = perspectiveTransformMatrix(inliers)

        for i in h_model:
            new_pos = matrix_transform([i[0], i[1]], P)
            error += np.linalg.norm([i[2], i[3]] - new_pos)
            error = error / inliers.shape[0]

        if error < b_error:
            print("Found better match")
            b_error = error
            b_model = P2

    return b_model, b_error
def transform_point(point_list, transform_parameters):
    scale_x, scale_y = transform_parameters['zy'], transform_parameters['zx']
    translate_x_px, translate_y_px = transform_parameters[
        'ty'], transform_parameters['tx']
    rotate = transform_parameters['theta']
    shear = transform_parameters['shear']
    flip_horizontal = transform_parameters.get('flip_horizontal', False)
    flip_vertical = transform_parameters.get('flip_vertical', False)
    if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 \
            or shear != 0:
        matrix_to_topleft = skimage_tf.SimilarityTransform(
            translation=[-0.5, -0.5])
        matrix_transforms = skimage_tf.AffineTransform(
            scale=(scale_x, scale_y),
            translation=(translate_x_px, translate_y_px),
            rotation=math.radians(rotate),
            shear=math.radians(shear))
        matrix_to_center = skimage_tf.SimilarityTransform(
            translation=[0.5, 0.5])
        matrix = (matrix_to_topleft + matrix_transforms + matrix_to_center)
        point_list = skimage_tf.matrix_transform(point_list, matrix.params)
    if flip_horizontal or flip_vertical:
        matrix_to_topleft = skimage_tf.SimilarityTransform(
            translation=[-0.5, -0.5])
        point_list = skimage_tf.matrix_transform(point_list,
                                                 matrix_to_topleft.params)
        if flip_horizontal:
            point_list = [(-x, y) for x, y in point_list]
        if flip_vertical:
            point_list = [(x, -y) for x, y in point_list]
        matrix_to_center = skimage_tf.SimilarityTransform(
            translation=[0.5, 0.5])
        point_list = skimage_tf.matrix_transform(point_list,
                                                 matrix_to_center.params)
    return point_list
Exemple #5
0
def infer(edge_image, edge_lengths, mu, phi, sigma2,
          update_slice=slice(None),
          scale_estimate=None,
          rotation=0,
          translation=(0, 0)):
    # edge_points = np.array(np.where(edge_image)).T
    # edge_points[:, [0, 1]] = edge_points[:, [1, 0]]
    # edge_score = edge_image.shape[0] * np.exp(-edge_lengths[edge_image] / (0.25 * edge_image.shape[0])).reshape(-1, 1)
    # edge_points = np.concatenate((edge_points, edge_score), axis=1)
    #
    # edge_nn = NearestNeighbors(n_neighbors=1).fit(edge_points)

    edge_near = scipy.ndimage.distance_transform_edt(~edge_image)
    edge_near_blur = gaussian(edge_near, 2)
    Gy, Gx = np.gradient(edge_near_blur)
    mag = np.sqrt(np.power(Gy, 2) + np.power(Gx, 2))

    if scale_estimate is None:
        scale_estimate = min(edge_image.shape) * 4

    mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
    average_distance = np.sqrt(np.power(mu.reshape(-1, 2), 2).sum(axis=1)).mean()
    scale_estimate /= average_distance * np.sqrt(2)

    h = np.zeros((phi.shape[1], 1))

    psi = SimilarityTransform(scale=scale_estimate, rotation=rotation, translation=translation)

    while True:
        w = (mu + phi @ h).reshape(-1, 2)
        image_points = matrix_transform(w, psi.params)[update_slice, :]
        image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)

        # closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
        # closest_edge_points = edge_points[closest_edge_point_indices, :2]

        closest_edge_points = gradient_step(Gy, Gx, mag, image_points)

        w = mu.reshape(-1, 2)
        psi = estimate_transform('similarity', w[update_slice, :], closest_edge_points)

        image_points = matrix_transform(w, psi.params)[update_slice, :]
        image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)

        # closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
        # closest_edge_points = edge_points[closest_edge_point_indices, :2]

        closest_edge_points = gradient_step(Gy, Gx, mag, image_points)

        mu_slice = mu.reshape(-1, 2)[update_slice, :].reshape(-1, 1)
        K = phi.shape[-1]
        phi_full = phi.reshape(-1, 2, K)
        phi_slice = phi_full[update_slice, :].reshape(-1, K)
        h = update_h(sigma2, phi_slice, closest_edge_points, mu_slice, psi)

        w = (mu + phi @ h).reshape(-1, 2)
        image_points = matrix_transform(w, psi.params)

        update_slice = yield image_points, closest_edge_points
Exemple #6
0
def infer(edge_image,
          edge_lengths,
          mu,
          phi,
          sigma2,
          update_slice=slice(None),
          scale_estimate=None,
          rotation=0,
          translation=(0, 0)):
    edge_near = scipy.ndimage.distance_transform_edt(~edge_image)
    edge_near_blur = gaussian(edge_near, 2)
    Gy, Gx = np.gradient(edge_near_blur)
    mag = np.sqrt(np.power(Gy, 2) + np.power(Gx, 2))

    if scale_estimate is None:
        scale_estimate = min(edge_image.shape) * 4

    mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
    average_distance = np.sqrt(np.power(mu.reshape(-1, 2),
                                        2).sum(axis=1)).mean()
    scale_estimate /= average_distance * np.sqrt(2)

    h = np.zeros((phi.shape[1], 1))

    psi = SimilarityTransform(scale=scale_estimate,
                              rotation=rotation,
                              translation=translation)

    while True:
        w = (mu + phi @ h).reshape(-1, 2)
        image_points = matrix_transform(w, psi.params)[update_slice, :]
        image_points = np.concatenate(
            (image_points, np.zeros((image_points.shape[0], 1))), axis=1)

        closest_edge_points = gradient_step(Gy, Gx, mag, image_points)

        w = mu.reshape(-1, 2)
        psi = estimate_transform('similarity', w[update_slice, :],
                                 closest_edge_points)

        image_points = matrix_transform(w, psi.params)[update_slice, :]
        image_points = np.concatenate(
            (image_points, np.zeros((image_points.shape[0], 1))), axis=1)

        closest_edge_points = gradient_step(Gy, Gx, mag, image_points)

        mu_slice = mu.reshape(-1, 2)[update_slice, :].reshape(-1, 1)
        K = phi.shape[-1]
        phi_full = phi.reshape(-1, 2, K)
        phi_slice = phi_full[update_slice, :].reshape(-1, K)
        h = update_h(sigma2, phi_slice, closest_edge_points, mu_slice, psi)

        w = (mu + phi @ h).reshape(-1, 2)
        image_points = matrix_transform(w, psi.params)

        update_slice = yield image_points, closest_edge_points, h, psi
Exemple #7
0
    def crop_img(self, fullimg, depth, orig_coords, orig_labels):
        import numpy as np
        import skimage.transform as T
        [crop_height, crop_width, mat] = self.transform_matrix(depth)
        transform_mat = np.linalg.inv(mat)

        L.debug('Final transform matrix\n%s', transform_mat)
        cropped_img = T.warp(fullimg, transform_mat, output_shape=np.round((crop_height, crop_width)), order=3)
        # Bicubic, default is 1, bilinear

        cropped_coords = T.matrix_transform(orig_coords, mat)

        L.debug('Original coords %s', orig_coords)
        L.debug('Transformed coords %s', cropped_coords)
        # T.matrix_transform() # Use this to tranform coordinate

        index_mapping = self.get_joint_mapping(orig_labels)
        # index_mapping = np.array([6, 1, 3, 2, 0, 5, 8, 10, 12, 11, 9, 7, 13, 4])
        # The order for LSP dataset
        # PC format
        # The order is defined in here: http://www.comp.leeds.ac.uk/mat4saj/lsp.html
        L.debug('Transformed labels %s', [orig_labels[v] for v in index_mapping])

        resorted_cropped_coords = cropped_coords[index_mapping, :]
        L.debug('Transformed LSP coords\n%s', resorted_cropped_coords)
        # L.debug('Transformed joint labels %s', [labels[i] for i in index_mapping])
        return [cropped_img, resorted_cropped_coords]
Exemple #8
0
def transform_to_template(shape, template, transformation_type='similarity'):
    """Returns a transformed shape that is as close as possible to the given
    template under a certain type of transformation.

    Args:
        shape (ndarray): An n x 2 array where each row represents a vertex in
                         the shape. The first column is the x-coordinate and
                         the second column is the y-coordinate.

        template (ndarray): Another shape corresponding to the first input.
                            It must have the same array shape and type, and
                            corresponding rows must respresent corresponding
                            vertices. For example, the vertex respresented by
                            row **i** in the *input* will try to match as
                            closesly as possible to the vertex represented by
                            row **i** in the *template*.

        transformation_type (str): The type of transformation to use when
                                   fitting the shape to the template. The
                                   string must be one of the ones specified by
                                   `skimage.transform.estimate_transform`_.

    Returns:
        ndarray: Transformed shape of the same type and array shape as the
        input shape.

    ..  _skimage.transform.estimate_transform: http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.estimate_transform
    """
    transformation = estimate_transform(transformation_type, shape, template)
    return matrix_transform(shape, transformation.params)
def video_to_court(xs, ys, homography):
    '''
    Input: list of x, list of y, homography
    Output: transformed x and y
    '''
    xst, yst = zip(*tf.matrix_transform(zip(xs,ys),homography))
    return xst, yst
Exemple #10
0
    def test_find_transform_givensources(self):
        from skimage.transform import estimate_transform, matrix_transform

        source = np.array([
            [1.4, 2.2],
            [5.3, 1.0],
            [3.7, 1.5],
            [10.1, 9.6],
            [1.3, 10.2],
            [7.1, 2.0],
        ])
        nsrc = source.shape[0]
        scale = 1.5  # scaling parameter
        alpha = np.pi / 8.0  # rotation angle
        mm = scale * np.array([[np.cos(alpha), -np.sin(alpha)],
                               [np.sin(alpha), np.cos(alpha)]])
        tx, ty = 2.0, 1.0  # translation parameters
        transl = np.array([nsrc * [tx], nsrc * [ty]])
        dest = (mm.dot(source.T) + transl).T
        t_true = estimate_transform("similarity", source, dest)

        # disorder dest points so they don't match the order of source
        np.random.shuffle(dest)

        t, (src_pts, dst_pts) = aa.find_transform(source, dest)
        self.assertLess(t_true.scale - t.scale, 1e-10)
        self.assertLess(t_true.rotation - t.rotation, 1e-10)
        self.assertLess(np.linalg.norm(t_true.translation - t.translation),
                        1e-10)
        self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
        self.assertEqual(src_pts.shape[1], 2)
        self.assertEqual(dst_pts.shape[1], 2)
        dst_pts_test = matrix_transform(src_pts, t.params)
        self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10)
Exemple #11
0
    def align(self, sub, centroids):
        # Align sub to current keystars, updating its status.

        min_inliers = 4

        # choose warp model to project existing keystars so they lie closer to centroids (in theory)
        keys = self.keystars
        if self.warp_model:
            keys = matrix_transform(self.keystars, self.warp_model.params)
        else:
            keys = self.keystars

        # find closest matching star to each transformed keystar
        nstars = min(len(keys), len(centroids))

        matched_stars = np.zeros((nstars, 2))
        for i, (x1, y1) in enumerate(keys):
            if i < nstars:
                matched_stars[i, :] = centroids[np.argmin([
                    (x1 - x2)**2 + (y1 - y2)**2 for x2, y2 in centroids
                ])]

        # do we have enough matched stars?
        if len(matched_stars) < self.min_stars:
            sub.status = 'nalign'
        else:

            # apply RANSAC to find which matching pairs best fitting Euclidean model
            # can throw a warning in cases where no inliers (bug surely) which we ignore
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                warp_model, inliers = ransac(
                    (np.array(self.keystars[:nstars]), matched_stars),
                    EuclideanTransform,
                    4,
                    .5,
                    max_trials=100)

            # managed to align
            if (inliers is not None) and (sum(inliers) >= min_inliers):
                # update warp model
                self.warp_model = warp_model
                sub.image = warp(sub.image,
                                 self.warp_model,
                                 order=3,
                                 preserve_range=True)
                self.align_count += 1

                # change to select if it was nalign before; if reject before, leave it as reject
                if sub.status == 'nalign':
                    sub.status = 'select'
            else:
                sub.status = 'nalign'

            # return inverse transform of centroids (for platesolver)
            if self.warp_model:
                return self.warp_model.inverse(centroids)
            return None
Exemple #12
0
    def _augment_keypoints(self, keypoints_on_images, random_state, parents,
                           hooks):
        result = []
        nb_images = len(keypoints_on_images)
        scale_samples, translate_samples, rotate_samples, shear_samples, cval_samples, mode_samples, order_samples = self._draw_samples(
            nb_images, random_state)

        for i, keypoints_on_image in enumerate(keypoints_on_images):
            height, width = keypoints_on_image.height, keypoints_on_image.width
            shift_x = int(width / 2.0)
            shift_y = int(height / 2.0)
            scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
            translate_x, translate_y = translate_samples[0][
                i], translate_samples[1][i]
            #assert isinstance(translate_x, (float, int))
            #assert isinstance(translate_y, (float, int))
            if ia.is_single_float(translate_y):
                translate_y_px = int(
                    round(translate_y * keypoints_on_image.shape[0]))
            else:
                translate_y_px = translate_y
            if ia.is_single_float(translate_x):
                translate_x_px = int(
                    round(translate_x * keypoints_on_image.shape[1]))
            else:
                translate_x_px = translate_x
            rotate = rotate_samples[i]
            shear = shear_samples[i]
            #cval = cval_samples[i]
            #mode = mode_samples[i]
            #order = order_samples[i]
            if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 or shear != 0:
                matrix_to_topleft = tf.SimilarityTransform(
                    translation=[-shift_x, -shift_y])
                matrix_transforms = tf.AffineTransform(
                    scale=(scale_x, scale_y),
                    translation=(translate_x_px, translate_y_px),
                    rotation=math.radians(rotate),
                    shear=math.radians(shear))
                matrix_to_center = tf.SimilarityTransform(
                    translation=[shift_x, shift_y])
                matrix = (matrix_to_topleft + matrix_transforms +
                          matrix_to_center)

                coords = keypoints_on_image.get_coords_array()
                #print("coords", coords)
                #print("matrix", matrix.params)
                coords_aug = tf.matrix_transform(coords, matrix.params)
                #print("coords before", coords)
                #print("coordsa ftre", coords_aug, np.around(coords_aug).astype(np.int32))
                result.append(
                    ia.KeypointsOnImage.from_coords_array(
                        np.around(coords_aug).astype(np.int32),
                        shape=keypoints_on_image.shape))
            else:
                result.append(keypoints_on_image)
        return result
Exemple #13
0
def similarity(edge_image, mu, phi, sigma2, h, psi):
    height, width = edge_image.shape
    edge_distance = scipy.ndimage.distance_transform_edt(~edge_image)
    w = (mu + phi @ h).reshape(-1, 2)
    image_points = matrix_transform(w, psi.params)
    closest_distances = scipy.interpolate.interp2d(range(width), range(height), edge_distance)
    K = h.size
    noise = scipy.stats.multivariate_normal(mean=np.zeros(K), cov=np.eye(K))
    if noise.pdf(h.flatten()) == 0:
        print(h.flatten())
    noise = np.log(noise.pdf(h.flatten()))
    return -closest_distances(image_points[:, 0], image_points[:, 1]).sum() / sigma2 + noise
Exemple #14
0
def apply_random_transformation(background_size, segmented_box, config_params):
    """apply a random transformation to 2D coordinates nomalized to image size"""
    # translate object coordinates to the object center's frame, i.e. whitens
    whitened_coords_norm = segmented_box.segmented_coords_norm - (
        segmented_box.x_center_norm, segmented_box.y_center_norm)

    # then generate a random rotation around the z-axis (perpendicular to the image plane), and limit the object scale
    # to maximum (default) 50% of the background image, i.e. the normalized largest dimension of the object must be at
    # most 0.5. To put it simply, scale the objects down if they're too big.
    # TODO(minhnh) add shear
    max_scale = config_params.max_scale
    if segmented_box.max_dimension_norm > config_params.max_obj_size_in_bg:
        max_scale = config_params.max_obj_size_in_bg / segmented_box.max_dimension_norm
    random_rot_angle = np.random.uniform(0, np.pi)
    rand_scale = np.random.uniform(config_params.min_scale,
                                   config_params.max_scale)

    # generate a random translation within the image boundaries for whitened, normalized coordinates, taking into
    # account the maximum allowed object dimension. After this translation, the normalized coordinates should
    # stay within [margin, 1-margin] for each dimension
    scaled_max_dimension = segmented_box.max_dimension_norm * max_scale
    low_norm_bound, high_norm_bound = ((scaled_max_dimension / 2) +
                                       config_params.margin,
                                       1 - config_params.margin -
                                       (scaled_max_dimension / 2))
    random_translation_x = np.random.uniform(
        low_norm_bound, high_norm_bound) * background_size[1]
    random_translation_y = np.random.uniform(
        low_norm_bound, high_norm_bound) * background_size[0]

    # create the transformation matrix for the generated rotation, translation and scale
    if np.random.uniform() < config_params.prob_rand_rotation:
        tf_matrix = SimilarityTransform(
            rotation=random_rot_angle,
            scale=rand_scale * min(background_size),
            translation=(random_translation_x, random_translation_y)).params
    else:
        tf_matrix = SimilarityTransform(
            scale=rand_scale * min(background_size),
            translation=(random_translation_x, random_translation_y)).params

    # apply transformation
    transformed_coords = matrix_transform(whitened_coords_norm, tf_matrix)

    # we clip the object coordinates so that they are within the image boundaries
    transformed_coords[np.where(transformed_coords[:, 0] < 0), 0] = 0
    transformed_coords[np.where(transformed_coords[:, 0] > background_size[1]),
                       0] = background_size[1] - 1

    transformed_coords[np.where(transformed_coords[:, 1] < 0), 1] = 0
    transformed_coords[np.where(transformed_coords[:, 1] > background_size[0]),
                       1] = background_size[0] - 1
    return transformed_coords
Exemple #15
0
 def transform(self, T):
     """
     Applies a skimage Transformation to self.x and self.y
     The original coords are stored at self.x_orig and self.y_orig
     The new coordinates can also be found at self.x_t and self.y_t
     Arguments:
         T -- A skimage Transformation to apply
     """
     self.x_orig = self.x
     self.y_orig = self.y
     self.x, self.y = matrix_transform([self.x, self.y], T.params)[0]
     self.x_t, self.y_t = self.x, self.y
     self.pos = [self.x, self.y]
Exemple #16
0
def similarity(edge_image, mu, phi, sigma2, h, psi):
    height, width = edge_image.shape
    edge_distance = scipy.ndimage.distance_transform_edt(~edge_image)
    w = (mu + phi @ h).reshape(-1, 2)
    image_points = matrix_transform(w, psi.params)
    closest_distances = scipy.interpolate.interp2d(range(width), range(height),
                                                   edge_distance)
    K = h.size
    noise = scipy.stats.multivariate_normal(mean=np.zeros(K), cov=np.eye(K))
    if noise.pdf(h.flatten()) == 0:
        print(h.flatten())
    noise = np.log(noise.pdf(h.flatten()))
    return -closest_distances(image_points[:, 0],
                              image_points[:, 1]).sum() / sigma2
Exemple #17
0
    def check_if_findtransform_ok(self, numstars):
        """Helper function to test find_transform with common test code
        for 3, 4, 5, and 6 stars"""
        from skimage.transform import estimate_transform, matrix_transform

        if numstars > 6:
            raise NotImplementedError

        # x and y of stars in the ref frame (int's)
        self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars]
        self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars]
        self.num_stars = numstars
        # Fluxes of stars
        self.star_f = np.array(numstars * [700.0])

        (
            self.image,
            self.image_ref,
            self.star_ref_pos,
            self.star_new_pos,
        ) = simulate_image_pair(
            shape=(self.h, self.w),
            translation=(self.x_offset, self.y_offset),
            rot_angle_deg=50.0,
            num_stars=self.num_stars,
            star_refx=self.star_refx,
            star_refy=self.star_refy,
            star_flux=self.star_f,
        )

        source = self.star_ref_pos
        dest = self.star_new_pos.copy()
        t_true = estimate_transform("similarity", source, dest)

        # disorder dest points so they don't match the order of source
        np.random.shuffle(dest)

        t, (src_pts, dst_pts) = aa.find_transform(source, dest)
        self.assertLess(t_true.scale - t.scale, 1e-10)
        self.assertLess(t_true.rotation - t.rotation, 1e-10)
        self.assertLess(np.linalg.norm(t_true.translation - t.translation),
                        1.0)
        self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
        self.assertLessEqual(src_pts.shape[0], source.shape[0])
        self.assertEqual(src_pts.shape[1], 2)
        self.assertEqual(dst_pts.shape[1], 2)
        dst_pts_test = matrix_transform(src_pts, t.params)
        self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0)
Exemple #18
0
def transform_coords(t, coords_in, centre_patch):
    def tform_centred_rec(radian, translation, center):
        tform1 = skt.SimilarityTransform(translation=translation)
        tform2 = skt.SimilarityTransform(translation=center)
        tform3 = skt.SimilarityTransform(rotation=radian)
        tform4 = skt.SimilarityTransform(translation=-center)
        tform = tform4 + tform3 + tform2 + tform1
        return tform

    param = t.get_params()
    rot_radian = -param[0]
    tx = param[2]
    ty = param[1]
    # transform the transformed patch coordinates back
    tform_patch_rec = tform_centred_rec(radian=rot_radian,
                                        translation=(tx, ty),
                                        center=centre_patch)
    coords_out = skt.matrix_transform(coords_in, tform_patch_rec.params)
    return coords_out
Exemple #19
0
def transform(postX, postY, transX, transY, rot):
    """Coordinate transform function.

    Args:
        postX (list): X coordinates to transform (1D).
        postY (list): Y coordinates to transform (1D).
        transX (float): Translation value in X dimension.
        transY (float): Translation value in Y dimension.
        rot (float): Rotation value in degree.

    Returns:
        transformed (list): Transformed coordinates (2D).

    """
    tform = tf.SimilarityTransform(scale=1,
                                   rotation=rot,
                                   translation=(transX, transY))
    transformed = tf.matrix_transform(np.transpose([postX, postY]),
                                      tform.params)
    return transformed
Exemple #20
0
def get_im_coor(path, json):
	import math
	import cv2
	import numpy as np
	img = cv2.imread(path)
	rows = img.shape[0]# height y
	columns = img.shape[1]# width x 
	# get the head and tail coordinates
	if(np.size(json.annotations) == 0):
		return None, None,None, None
	x1 = json.annotations[0][u'x']
	x2 = json.annotations[1][u'x']
	y1 = json.annotations[0][u'y']
	y2 = json.annotations[1][u'y']
	# find appropriate placement
	x1_ = np.maximum(0, np.minimum(x1,x2)-50) 
	x2_ = np.minimum(columns, np.maximum(x1,x2)+50)
	y1_ = np.maximum(0, np.minimum(y1,y2)-50)
	y2_ = np.minimum(rows, np.maximum(y1,y2)+50)
	target = 139
	resized = cv2.resize(img, (target, target), cv2.INTER_LINEAR)
	height, width = img.shape[:2]
	x_ratio = float(target)/float(width)
	y_ratio = float(target)/float(height)
	x1_r = x1_*x_ratio
	y1_r = y1_*y_ratio
	x2_r = x2_*x_ratio
	y2_r = y2_*y_ratio
	# data augmentation:
	M = np.array([[  0.85,  -0.15,  18.  ],[ -0.15,   0.85,  18.  ]])
	M1 = np.concatenate((M, np.array([[0,0,1]])),axis = 0)
	# get the transformed image
	dst = cv2.warpAffine(resized, M, (target, target))
	coord = np.array([[x1_r,y1_r],[x2_r,y2_r]])
	coord1 = matrix_transform(coord, M1)
	# resized x0, y0, w, h
	resized_array = np.array([x1_r, y1_r, x2_r-x1_r, y2_r-y1_r])
	dst_array = np.array([coord1[0][0], coord1[0][1], coord1[1][0]-coord1[0][0], coord1[1][1]-coord1[0][1]])
	return resized, resized_array, dst, dst_array
def apply_random_transformation(background_size,
                                segmented_box,
                                margin=0.1,
                                max_obj_size_in_bg=0.4):
    """apply a random transformation to 2D coordinates nomalized to image size"""
    orig_coords_norm = segmented_box.segmented_coords_homog_norm[:, :2]
    # translate object coordinates to the object center's frame, i.e. whitens
    whitened_coords_norm = orig_coords_norm - (segmented_box.x_center_norm,
                                               segmented_box.y_center_norm)

    # then generate a random rotation around the z-axis (perpendicular to the image plane), and limit the object scale
    # to maximum (default) 50% of the background image, i.e. the normalized largest dimension of the object must be at
    # most 0.5. To put it simply, scale the objects down if they're too big.
    # TODO(minhnh) add shear
    max_scale = 1.
    if segmented_box.max_dimension_norm > max_obj_size_in_bg:
        max_scale = max_obj_size_in_bg / segmented_box.max_dimension_norm
    random_rot_angle = np.random.uniform(0, np.pi)
    rand_scale = np.random.uniform(0.5 * max_scale, max_scale)

    # generate a random translation within the image boundaries for whitened, normalized coordinates, taking into
    # account the maximum allowed object dimension. After this translation, the normalized coordinates should
    # stay within [margin, 1-margin] for each dimension
    scaled_max_dimension = segmented_box.max_dimension_norm * max_scale
    low_norm_bound, high_norm_bound = ((scaled_max_dimension / 2) + margin,
                                       1 - margin - (scaled_max_dimension / 2))
    random_translation_x = np.random.uniform(low_norm_bound, high_norm_bound)
    random_translation_y = np.random.uniform(low_norm_bound, high_norm_bound)

    # create the transformation matrix for the generated rotation, translation and scale
    tf_matrix = SimilarityTransform(rotation=random_rot_angle,
                                    scale=rand_scale,
                                    translation=(random_translation_x,
                                                 random_translation_y)).params

    # apply transformation
    transformed_coords_norm = matrix_transform(whitened_coords_norm, tf_matrix)
    return transformed_coords_norm
def find_transform(source,
                   target,
                   max_control_points=50,
                   detection_sigma=5,
                   min_area=5):
    """Estimate the transform between ``source`` and ``target``.

    Return a SimilarityTransform object ``T`` that maps pixel x, y indices from
    the source image s = (x, y) into the target (destination) image t = (x, y).
    T contains parameters of the tranformation: ``T.rotation``,
    ``T.translation``, ``T.scale``, ``T.params``.

    Args:
        source (array-like): Either a numpy array of the source image to be
            transformed or an interable of (x, y) coordinates of the target
            control points.
        target (array-like): Either a numpy array of the target (destination)
            image or an interable of (x, y) coordinates of the target
            control points.
        max_control_points: The maximum number of control point-sources to find
            the transformation.
        detection_sigma: Factor of background std-dev above which is considered
            a detection. This value is ignored if input are not images.
        min_area: Minimum number of connected pixels to be considered a source.
            This value is ignored if input are not images.

    Returns:
        The transformation object and a tuple of corresponding star positions
        in source and target.::

            T, (source_pos_array, target_pos_array)

    Raises:
        TypeError: If input type of ``source`` or ``target`` is not supported.
        ValueError: If it cannot find more than 3 stars on any input.
    """
    from scipy.spatial import KDTree

    try:
        if len(source[0]) == 2:
            # Assume it's a list of (x, y) pairs
            source_controlp = _np.array(source)[:max_control_points]
        else:
            # Assume it's a 2D image
            source_controlp = _find_sources(
                source, detection_sigma=detection_sigma,
                min_area=min_area)[:max_control_points]
    except Exception:
        raise TypeError("Input type for source not supported.")

    try:
        if len(target[0]) == 2:
            # Assume it's a list of (x, y) pairs
            target_controlp = _np.array(target)[:max_control_points]
        else:
            # Assume it's a 2D image
            target_controlp = _find_sources(
                target, detection_sigma=detection_sigma,
                min_area=min_area)[:max_control_points]
    except Exception:
        raise TypeError("Input type for target not supported.")

    # Check for low number of reference points
    if len(source_controlp) < 3:
        raise ValueError("Reference stars in source image are less than the "
                         "minimum value (3).")
    if len(target_controlp) < 3:
        raise ValueError("Reference stars in target image are less than the "
                         "minimum value (3).")

    source_invariants, source_asterisms = _generate_invariants(source_controlp)
    source_invariant_tree = KDTree(source_invariants)

    target_invariants, target_asterisms = _generate_invariants(target_controlp)
    target_invariant_tree = KDTree(target_invariants)

    # r = 0.1 is the maximum search distance, 0.1 is an empirical value that
    # returns about the same number of matches than inputs
    # matches_list is a list of lists such that for each element
    # source_invariant_tree.data[i], matches_list[i] is a list of the indices
    # of its neighbors in target_invariant_tree.data
    matches_list = source_invariant_tree.query_ball_tree(target_invariant_tree,
                                                         r=0.1)

    # matches unravels the previous list of matches into pairs of source and
    # target control point matches.
    # matches is a (N, 3, 2) array. N sets of similar corresponding triangles.
    # 3 indices for a triangle in ref
    # and the 3 indices for the corresponding triangle in target;
    matches = []
    # t1 is an asterism in source, t2 in target
    for t1, t2_list in zip(source_asterisms, matches_list):
        for t2 in target_asterisms[t2_list]:
            matches.append(list(zip(t1, t2)))
    matches = _np.array(matches)

    inv_model = _MatchTransform(source_controlp, target_controlp)
    n_invariants = len(matches)
    max_iter = n_invariants
    # Set the minimum matches to be between 1 and 10 asterisms
    min_matches = max(1, min(10, int(n_invariants * MIN_MATCHES_FRACTION)))
    if (len(source_controlp) == 3
            or len(target_controlp) == 3) and len(matches) == 1:
        best_t = inv_model.fit(matches)
        inlier_ind = _np.arange(len(matches))  # All of the indices
    else:
        best_t, inlier_ind = _ransac(matches, inv_model, 1, max_iter,
                                     PIXEL_TOL, min_matches)
    triangle_inliers = matches[inlier_ind]
    d1, d2, d3 = triangle_inliers.shape
    inl_arr = triangle_inliers.reshape(d1 * d2, d3)
    inl_unique = set(tuple(pair) for pair in inl_arr)
    # In the next, multiple assignements to the same source point s are removed
    # We keep the pair (s, t) with the lowest reprojection error.
    inl_dict = {}
    for s_i, t_i in inl_unique:
        # calculate error
        s_vertex = source_controlp[s_i]
        t_vertex = target_controlp[t_i]
        t_vertex_pred = matrix_transform(s_vertex, best_t.params)
        error = _np.linalg.norm(t_vertex_pred - t_vertex)

        # if s_i not in dict, or if its error is smaller than previous error
        if s_i not in inl_dict or (error < inl_dict[s_i][1]):
            inl_dict[s_i] = (t_i, error)
    inl_arr_unique = _np.array([[s_i, t_i]
                                for s_i, (t_i, e) in inl_dict.items()])
    s, d = inl_arr_unique.T

    return best_t, (source_controlp[s], target_controlp[d])
Exemple #23
0
def align_crop_5pts_skimage(img,
                            src_landmarks,
                            mean_landmarks=_DEFAULT_MEAN_LANDMARKS,
                            crop_size=512,
                            face_factor=0.7,
                            landmark_factor=0.35,
                            align_type='similarity',
                            order=3,
                            mode='edge'):
    """Align and crop a face image by 5 landmarks.

    Arguments:
        img             : Face image to be aligned and cropped.
        src_landmarks   : 5 landmarks:
                              [[left_eye_x, left_eye_y],
                               [right_eye_x, right_eye_y],
                               [nose_x, nose_y],
                               [left_mouth_x, left_mouth_y],
                               [right_mouth_x, right_mouth_y]].
        mean_landmarks  : Mean shape, should be normalized in [-0.5, 0.5].
        crop_size       : Output image size.
        face_factor     : The factor of face area relative to the output image.
        landmark_factor : The factor of landmarks' area relative to the face.
        align_type      : 'similarity' or 'affine'.
        order           : The order of interpolation. The order has to be in the range 0-5:
                              - 0: Nearest-neighbor
                              - 1: Bi-linear
                              - 2: Bi-quadratic
                              - 3: Bi-cubic
                              - 4: Bi-quartic
                              - 5: Bi-quintic
        mode            : One of ['constant', 'edge', 'symmetric', 'reflect', 'wrap'].
                          Points outside the boundaries of the input are filled according
                          to the given mode.
    """
    import skimage.transform as transform

    # check
    assert align_type in [
        'affine', 'similarity'
    ], 'Invalid `align_type`! Allowed: %s!' % ['affine', 'similarity']
    assert order in [0, 1, 2, 3, 4,
                     5], 'Invalid `order`! Allowed: %s!' % [0, 1, 2, 3, 4, 5]
    assert mode in ['constant', 'edge', 'symmetric', 'reflect',
                    'wrap'], 'Invalid `mode`! Allowed: %s!' % [
                        'constant', 'edge', 'symmetric', 'reflect', 'wrap'
                    ]

    # move
    move = np.array([img.shape[1] // 2, img.shape[0] // 2])

    # pad border
    v_border = img.shape[0] - crop_size
    w_border = img.shape[1] - crop_size
    if v_border < 0:
        v_half = (-v_border + 1) // 2
        img = np.pad(img, ((v_half, v_half), (0, 0), (0, 0)), mode=mode)
        src_landmarks += np.array([0, v_half])
        move += np.array([0, v_half])
    if w_border < 0:
        w_half = (-w_border + 1) // 2
        img = np.pad(img, ((0, 0), (w_half, w_half), (0, 0)), mode=mode)
        src_landmarks += np.array([w_half, 0])
        move += np.array([w_half, 0])

    # estimate transform matrix
    mean_landmarks -= np.array([mean_landmarks[0, :] + mean_landmarks[1, :]
                                ]) / 2.0  # middle point of eyes as center
    trg_landmarks = mean_landmarks * (crop_size * face_factor *
                                      landmark_factor) + move
    tform = transform.estimate_transform(align_type, trg_landmarks,
                                         src_landmarks)

    # fix the translation to match the middle point of eyes
    trg_mid = (trg_landmarks[0, :] + trg_landmarks[1, :]) / 2.0
    src_mid = (src_landmarks[0, :] + src_landmarks[1, :]) / 2.0
    new_trg_mid = transform.matrix_transform([trg_mid], tform.params)[0]
    tform.params[:2, 2] += src_mid - new_trg_mid

    # warp image by given transform
    output_shape = (crop_size // 2 + move[1] + 1, crop_size // 2 + move[0] + 1)
    img_align = transform.warp(img,
                               tform,
                               output_shape=output_shape,
                               order=order,
                               mode=mode)

    # crop
    img_crop = img_align[-crop_size:, -crop_size:]

    return img_crop
Exemple #24
0
def test_matrix_transform():
    tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
    assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
def getNewMarkers(model_robust, markers):
    mat = model_robust.params
    markers1 = transform.matrix_transform(markers, mat)
    return markers1
def evaluate_model(data_root, model_name, i_model='0500', display=None):
    #    data_root='../Datasets/Balvan_patches/fold1/patch_tlevel4/'
    #    model_name='balvan_fold1_us_b2a'
    #    i_model='1500'

    # dataset-specific variables
    if 'eliceiri' in model_name:
        img_root = '../Datasets/HighRes_Splits/WSI'
        w = 834
        o = 608
    elif 'balvan' in model_name:
        img_root = '../Datasets/Balvan_1to4tiles'
        w = 300  # patch width
        o = w // 2  # offset: upper-left corner of patch

    method = 'VXM'
    gan_name = ''
    mode = model_name.split('_')[-1]

    coords_ref = np.array(([0, 0], [0, w], [w, w], [w, 0]))
    centre_patch = np.array((w, w)) / 2. - 0.5

    dir_A = data_root + 'A/test/'
    dir_B = data_root + 'B/test/'

    supervision = model_name.split('_')[-2]
    assert supervision in ['su', 'us'], "supervision must be in ['su', 'us']"
    supervision_dict = {'su': 'supervised', 'us': 'unsupervised'}

    assert mode in ['a2b', 'b2a', 'a2a',
                    'b2b'], "mode must be in ['a2b', 'b2a', 'a2a', 'b2b']"
    if mode == 'a2b':
        dir_src = dir_A
        dir_tar = dir_B
    elif mode == 'b2a':
        dir_src = dir_B
        dir_tar = dir_A
    elif mode == 'a2a':
        dir_src = dir_A
        dir_tar = dir_A
    elif mode == 'b2b':
        dir_src = dir_B
        dir_tar = dir_B

    load_model_file = glob(f'./models/{model_name}/{i_model}*')[-1]

    # device handling
    if args.gpu and (args.gpu != '-1'):
        device = '/gpu:' + args.gpu
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        tf.keras.backend.set_session(tf.Session(config=config))
    else:
        device = '/cpu:0'
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    # load the affine model
    with tf.device(device):
        model = vxm.networks.VxmAffine.load(load_model_file)

    suffix_src = '_' + os.listdir(dir_src)[0].split('_')[-1]
    name_srcs = set([name[:-len(suffix_src)] for name in os.listdir(dir_src)])
    suffix_tar = '_' + os.listdir(dir_tar)[0].split('_')[-1]
    name_tars = set([name[:-len(suffix_tar)] for name in os.listdir(dir_tar)])
    f_names = name_srcs & name_tars
    f_names = list(f_names)
    f_names.sort()

    df = pd.read_csv(data_root + 'info_test.csv', index_col='Filename')

    cnt_disp = 0
    for f_name in tqdm(f_names):
        # extract transformed patch coordinates
        coords_trans = df.loc[f_name, [
            'X1_Trans', 'Y1_Trans', 'X2_Trans', 'Y2_Trans', 'X3_Trans',
            'Y3_Trans', 'X4_Trans', 'Y4_Trans'
        ]].to_numpy().reshape((4, 2))

        # load test pair
        img_src = load_image(dir_src +
                             f"{f_name}_T.{suffix_src.split('.')[-1]}")
        img_tar = load_image(dir_tar +
                             f"{f_name}_R.{suffix_tar.split('.')[-1]}")
        if img_src.ndim == 2:
            img_src = img_src[np.newaxis, ..., np.newaxis]
        if img_tar.ndim == 2:
            img_tar = img_tar[np.newaxis, ..., np.newaxis]
        img_src = img_src.astype('float') / 255
        img_tar = img_tar.astype('float') / 255

        # register
        with tf.device(device):
            affine = model.register(img_src, img_tar)
        affine_matrix = np.concatenate(
            [affine.squeeze().reshape(
                (2, 3)), np.zeros((1, 3))], 0) + np.eye(3)
        #        coords_rec = skt.matrix_transform(coords_trans, affine_matrix)
        tform = skt.SimilarityTransform(affine_matrix)
        tform_patch_rec = tform_centred_rec(radian=tform.rotation,
                                            translation=(tform.translation[0],
                                                         tform.translation[1]),
                                            center=centre_patch)
        coords_rec = skt.matrix_transform(coords_trans, tform_patch_rec.params)

        #        img_rec = vxm.tf.utils.transform(img_src, affine, rescale=1.0)

        # calculate error
        disp_error = dist_coords(coords_rec, coords_ref)

        result = {
            'X1_Recover': coords_rec[0][0],
            'Y1_Recover': coords_rec[0][1],
            'X2_Recover': coords_rec[1][0],
            'Y2_Recover': coords_rec[1][1],
            'X3_Recover': coords_rec[2][0],
            'Y3_Recover': coords_rec[2][1],
            'X4_Recover': coords_rec[3][0],
            'Y4_Recover': coords_rec[3][1],
            'Error': np.mean(disp_error)
        }
        # update result
        df.loc[f_name, [
            'X1_Recover', 'Y1_Recover', 'X2_Recover', 'Y2_Recover',
            'X3_Recover', 'Y3_Recover', 'X4_Recover', 'Y4_Recover', 'Error'
        ]] = result

        # display patch outline in original image
        if display:
            if cnt_disp < display:
                suffix = os.path.basename(
                    os.listdir(f'{img_root}/A/')[0]).split('.')[-1]
                imgB = skio.imread(f"{img_root}/B/{f_name}.{suffix}")
                dispdirB = f'{data_root}/display/B/test'
                if not os.path.exists(dispdirB):
                    os.makedirs(dispdirB)
                if 'eliceiri' in model_name:
                    imgB_disp = imgB
                elif 'balvan' in model_name:
                    imgB_disp = np.pad(imgB, w // 2, mode='reflect')
                if len(imgB_disp.shape) == 2:
                    imgB_disp = np.repeat(imgB_disp.reshape(
                        imgB_disp.shape[0], imgB_disp.shape[1], 1),
                                          3,
                                          axis=-1)
                imgB_disp = cv2.polylines(imgB_disp,
                                          pts=[(o + coords_ref).reshape(
                                              (-1, 1, 2))],
                                          isClosed=True,
                                          color=(0, 255, 0),
                                          thickness=2)
                imgB_disp = cv2.polylines(
                    imgB_disp,
                    pts=[np.int32(o + coords_trans).reshape((-1, 1, 2))],
                    isClosed=True,
                    color=(0, 0, 255),
                    thickness=2)
                imgB_disp = cv2.polylines(
                    imgB_disp,
                    pts=[np.int32(o + coords_rec).reshape((-1, 1, 2))],
                    isClosed=True,
                    color=(255, 0, 0),
                    thickness=2)
                #                skio.imshow(imgB_disp)
                skio.imsave(
                    f'{dispdirB}/{f_name}_{method+gan_name}_{mode}_{supervision}.{suffix}',
                    imgB_disp)
                cnt_disp += 1
#                img_rec = vxm.tf.utils.transform(img_src, affine, rescale=1.0)
#                skio.imshow(img_rec[0, ..., 0])
            else:
                return

    df.to_csv(data_root + f'results_{method}_{mode}_{supervision}.csv')

    return
Exemple #27
0
def transform(postX, postY,transX, transY, rot):
    tform = tf.SimilarityTransform(scale=1, rotation=rot, translation=(transX, transY))
    transformed = tf.matrix_transform(np.transpose([postX, postY]), tform.params)
    return transformed
def compute_homography_and_warp(image,
                                vp1,
                                vp2,
                                trajectories,
                                corners,
                                method="posture",
                                output_dir=""):
    height, width, _ = image.shape

    # Find Projective Transform
    vanishing_line = np.cross(vp1, vp2)
    H = np.eye(3)
    H[2] = vanishing_line / vanishing_line[2]
    H = H / H[2, 2]  # As h32 needs to be 1 in projection

    final_homography = H

    # If trajectories are not provided, we cannot rely on them for affine correction
    if trajectories is not None:
        # Determine a and b for the affine component of the homography
        intersections, mean_intersection = extract_circular_points(
            trajectories, H, method, output_dir)

        # for mean_intersection in intersections:
        A = np.eye(3, 3)

        if len(mean_intersection) > 0:
            a = mean_intersection[0]
            b = mean_intersection[1]

            A[0, 0] = 1 / b
            A[0, 1] = -a / b

        final_homography = np.dot(A, H)

    # region Translation and scaling operations

    # The image corners are transformed by the current matrix to determine the
    # endpoints of the resulting wrapped image. Each column is a corner in homogenous
    # coordinates

    image_corners = np.array([[0, 0, width, width], [0, height, 0, height],
                              [1, 1, 1, 1]])

    # Apply the current transformation
    cords = np.dot(final_homography, image_corners)

    # Normalize the points
    cords = cords[:2] / cords[2]

    # Now, the lines contain x and y coordinates of all points of the model
    # The smallest ones are translated to 0-0 borders by determining the min
    tx = min(0, cords[0].min())
    ty = min(0, cords[1].min())

    # Considering the applied transformation, determine the farthest points to the
    # upper left corner
    max_x = int(cords[0].max() - tx)
    max_y = int(cords[1].max() - ty)

    T = np.array([[1, 0, -tx], [0, 1, -ty], [0, 0, 1]])

    final_homography = np.dot(T, final_homography)

    S = np.array([[width / max_x, 0, 0], [0, height / max_y, 0], [0, 0, 1]])

    final_homography = np.dot(S, final_homography)

    # We end up with a image that has the same size but perspective corrected
    # We don't clamp the result as losing information is not an option

    # endregion

    warped_img = transform.warp(image,
                                np.linalg.inv(final_homography),
                                clip=False,
                                output_shape=(height, width))

    transformed_corners = transform.matrix_transform(corners, final_homography)

    return warped_img, transformed_corners, final_homography
def make_patches(
        img_root,
        target_root,
        fold=None,
        t_level=1,
        #                 trans_min=0, trans_max=20, rot_min=0, rot_max=5,
        mode='train',
        display=None):
    #    img_root='../Datasets/HighRes_Splits/WSI'
    #    target_root='../Datasets/Eliceiri_patches'
    #    trans_min=0
    #    trans_max=20
    #    rot_min=0
    #    rot_max=0
    #    mode='train'

    w = 834
    o = 608

    coords_ref = np.array(([0, 0], [0, w], [w, w], [w, 0]))
    centre_patch = np.array((w, w)) / 2. - 0.5

    step_trans = 20
    step_rot = 5
    trans_min = step_trans * (t_level - 1)
    trans_max = step_trans * t_level
    rot_min = step_rot * (t_level - 1)
    rot_max = step_rot * t_level

    #modalities = {'MI':'SHG', 'WB':'BF'}
    tardirA = f'{target_root}/patch_tlevel{t_level}/A/{mode}'
    tardirB = f'{target_root}/patch_tlevel{t_level}/B/{mode}'
    if not os.path.exists(tardirA):
        os.makedirs(tardirA)
    if not os.path.exists(tardirB):
        os.makedirs(tardirB)

    f_names = set([
        '_'.join(name.split('_')[:-1])
        for name in os.listdir(f'{img_root}/{mode}')
    ])
    f_names = list(f_names)
    f_names.sort()

    # csv information
    header = [
        'ReferenceImage', 'Method', 'X1_Ref', 'Y1_Ref', 'X2_Ref', 'Y2_Ref',
        'X3_Ref', 'Y3_Ref', 'X4_Ref', 'Y4_Ref', 'X1_Trans', 'Y1_Trans',
        'X2_Trans', 'Y2_Trans', 'X3_Trans', 'Y3_Trans', 'X4_Trans', 'Y4_Trans',
        'X1_Recover', 'Y1_Recover', 'X2_Recover', 'Y2_Recover', 'X3_Recover',
        'Y3_Recover', 'X4_Recover', 'Y4_Recover', 'Displacement', 'Tx', 'Ty',
        'AngleDegree', 'AngleRad', 'Error', 'DisplacementCategory'
    ]
    df = pd.DataFrame(index=f_names, columns=header)
    df.index.set_names('Filename', inplace=True)

    if display is not None:
        cnt_disp = 0
    for f_name in tqdm(f_names):
        # load WSI
        if mode == 'train':
            f_nameA = f_name + '_MI.tif'
            f_nameB = f_name + '_WB.tif'
        else:
            f_nameA = f_name + '_SHG.tif'
            f_nameB = f_name + '_BF.tif'
        imgA = skio.imread(f'{img_root}/{mode}/{f_nameA}')
        imgB = skio.imread(f'{img_root}/{mode}/{f_nameB}')

        # random transformation parameters
        rot_degree = random.choice(
            (random.uniform(-rot_max,
                            -rot_min), random.uniform(rot_min, rot_max)))
        tx = random.choice(
            (random.uniform(-trans_max,
                            -trans_min), random.uniform(trans_min, trans_max)))
        ty = random.choice(
            (random.uniform(-trans_max,
                            -trans_min), random.uniform(trans_min, trans_max)))
        rot_radian = np.deg2rad(rot_degree)

        # transform WSI
        centre_img = np.array((imgA.shape[0], imgA.shape[1])) / 2. - 0.5
        tform_img = tform_centred(radian=rot_radian,
                                  translation=(tx, ty),
                                  center=centre_img)
        imgA_trans = np.asarray(skt.warp(imgA, tform_img, preserve_range=True),
                                dtype=np.uint8)
        imgB_trans = np.asarray(skt.warp(imgB, tform_img, preserve_range=True),
                                dtype=np.uint8)

        # crop patches
        patchA_ref = imgA[o:o + w, o:o + w]
        patchB_ref = imgB[o:o + w, o:o + w]
        patchA_trans = imgA_trans[o:o + w, o:o + w]
        patchB_trans = imgB_trans[o:o + w, o:o + w]

        # transform patch coordinates
        tform_patch = tform_centred(radian=rot_radian,
                                    translation=(tx, ty),
                                    center=centre_patch)
        coords_trans = skt.matrix_transform(coords_ref, tform_patch.params)

        # calculate distance
        dist = dist_coords(coords_trans, coords_ref)

        # write csv line
        line = {
            'X1_Ref': coords_ref[0][0],
            'Y1_Ref': coords_ref[0][1],
            'X2_Ref': coords_ref[1][0],
            'Y2_Ref': coords_ref[1][1],
            'X3_Ref': coords_ref[2][0],
            'Y3_Ref': coords_ref[2][1],
            'X4_Ref': coords_ref[3][0],
            'Y4_Ref': coords_ref[3][1],
            'X1_Trans': coords_trans[0][0],
            'Y1_Trans': coords_trans[0][1],
            'X2_Trans': coords_trans[1][0],
            'Y2_Trans': coords_trans[1][1],
            'X3_Trans': coords_trans[2][0],
            'Y3_Trans': coords_trans[2][1],
            'X4_Trans': coords_trans[3][0],
            'Y4_Trans': coords_trans[3][1],
            'Displacement': np.mean(dist),
            'Tx': tx,
            'Ty': ty,
            'AngleDegree': rot_degree,
            'AngleRad': rot_radian
        }
        df.loc[f_name] = line

        # save patches
        skio.imsave(f'{tardirA}/{f_name}_R.tif', patchA_ref)
        skio.imsave(f'{tardirA}/{f_name}_T.tif', patchA_trans)
        skio.imsave(f'{tardirB}/{f_name}_R.tif', patchB_ref)
        skio.imsave(f'{tardirB}/{f_name}_T.tif', patchB_trans)

        # display patch outline in original image
        if display is not None and cnt_disp < display:
            dispdirA = f'{target_root}/patch_tlevel{t_level}/display/A/{mode}'
            dispdirB = f'{target_root}/patch_tlevel{t_level}/display/B/{mode}'
            if not os.path.exists(dispdirA):
                os.makedirs(dispdirA)
            if not os.path.exists(dispdirB):
                os.makedirs(dispdirB)
            imgA = np.repeat(imgA.reshape(imgA.shape[0], imgA.shape[1], 1),
                             3,
                             axis=-1)
            imgA = cv2.polylines(imgA,
                                 pts=[(o + coords_ref).reshape((-1, 1, 2))],
                                 isClosed=True,
                                 color=(0, 255, 0),
                                 thickness=3)
            imgA = cv2.polylines(
                imgA,
                pts=[np.int32(o + coords_trans).reshape((-1, 1, 2))],
                isClosed=True,
                color=(0, 0, 255),
                thickness=3)
            skio.imsave(f'{dispdirA}/{f_name}_display.tif', imgA)
            imgB = cv2.polylines(imgB,
                                 pts=[(o + coords_ref).reshape((-1, 1, 2))],
                                 isClosed=True,
                                 color=(0, 255, 0),
                                 thickness=3)
            imgB = cv2.polylines(
                imgB,
                pts=[np.int32(o + coords_trans).reshape((-1, 1, 2))],
                isClosed=True,
                color=(0, 0, 255),
                thickness=3)
            #            imgB = cv2.polylines(imgB, pts=[np.int32(o+coords_rec).reshape((-1,1,2))], isClosed=True, color=(255,0,0), thickness=3)
            skio.imsave(f'{dispdirB}/{f_name}_display.tif', imgB)
            cnt_disp += 1

    df.to_csv(f'{target_root}/patch_tlevel{t_level}/info_{mode}.csv')
def make_patches(img_root,
                 target_root,
                 fold=1,
                 t_level=1,
                 mode='train',
                 display=None):
    #    img_root='./Datasets/Zurich_tiles'
    #    target_root='./Datasets/Zurich_patches'
    #    fold=1
    #    t_level=2
    #    mode='test'

    w = 300  # patch width
    o = 0  # upper-left corner of patch

    coords_ref = np.array(([0, 0], [0, w], [w, w], [w, 0]))
    centre_patch = np.array((w, w)) / 2. - 0.5

    step_trans = 7
    step_rot = 5
    trans_min = step_trans * (t_level - 1)
    trans_max = step_trans * t_level
    rot_min = step_rot * (t_level - 1)
    rot_max = step_rot * t_level

    #modalities = {'IR':'A', 'RGB':'B'}
    tardirA = f'{target_root}/fold{fold}/patch_tlevel{t_level}/A/{mode}'
    tardirB = f'{target_root}/fold{fold}/patch_tlevel{t_level}/B/{mode}'
    if not os.path.exists(tardirA):
        os.makedirs(tardirA)
    if not os.path.exists(tardirB):
        os.makedirs(tardirB)

    ids_train, ids_test = split_zurich_data(fold)

    if mode == 'train':
        f_names = [
            os.path.basename(f_path).split('.')[0] for id_img in ids_train
            for f_path in glob(f'{img_root}/A/zh{id_img}_*')
        ]
    elif mode == 'test':
        f_names = [
            os.path.basename(f_path).split('.')[0] for id_img in ids_test
            for f_path in glob(f'{img_root}/A/zh{id_img}_*')
        ]

    f_names = list(f_names)
    f_names.sort()

    # csv information
    header = [
        'ReferenceImage', 'Method', 'X1_Ref', 'Y1_Ref', 'X2_Ref', 'Y2_Ref',
        'X3_Ref', 'Y3_Ref', 'X4_Ref', 'Y4_Ref', 'X1_Trans', 'Y1_Trans',
        'X2_Trans', 'Y2_Trans', 'X3_Trans', 'Y3_Trans', 'X4_Trans', 'Y4_Trans',
        'X1_Recover', 'Y1_Recover', 'X2_Recover', 'Y2_Recover', 'X3_Recover',
        'Y3_Recover', 'X4_Recover', 'Y4_Recover', 'Displacement',
        'RelativeDisplacement', 'Tx', 'Ty', 'AngleDegree', 'AngleRad', 'Error',
        'DisplacementCategory'
    ]
    df = pd.DataFrame(index=f_names, columns=header)
    df.index.set_names('Filename', inplace=True)

    if display is not None:
        cnt_disp = 0
    for f_name in tqdm(f_names):
        # load original images
        suffix = os.path.basename(
            os.listdir(f'{img_root}/A/')[0]).split('.')[-1]
        imgA = skio.imread(f"{img_root}/A/{f_name}.{suffix}")
        imgB = skio.imread(f"{img_root}/B/{f_name}.{suffix}")

        # random transformation parameters
        rot_degree = random.choice(
            (random.uniform(-rot_max,
                            -rot_min), random.uniform(rot_min, rot_max)))
        tx = random.choice(
            (random.uniform(-trans_max,
                            -trans_min), random.uniform(trans_min, trans_max)))
        ty = random.choice(
            (random.uniform(-trans_max,
                            -trans_min), random.uniform(trans_min, trans_max)))
        rot_radian = np.deg2rad(rot_degree)

        # transform original images
        centre_img = np.array((imgA.shape[0], imgA.shape[1])) / 2. - 0.5
        tform_img = tform_centred(radian=rot_radian,
                                  translation=(tx, ty),
                                  center=centre_img)
        imgA_trans = np.asarray(skt.warp(imgA,
                                         tform_img,
                                         mode='reflect',
                                         preserve_range=True),
                                dtype=np.uint8)
        imgB_trans = np.asarray(skt.warp(imgB,
                                         tform_img,
                                         mode='reflect',
                                         preserve_range=True),
                                dtype=np.uint8)

        # crop patches
        patchA_ref = imgA[o:o + w, o:o + w]
        patchB_ref = imgB[o:o + w, o:o + w]
        patchA_trans = imgA_trans[o:o + w, o:o + w]
        patchB_trans = imgB_trans[o:o + w, o:o + w]

        # transform patch coordinates
        tform_patch = tform_centred(radian=rot_radian,
                                    translation=(tx, ty),
                                    center=centre_patch)
        coords_trans = skt.matrix_transform(coords_ref, tform_patch.params)

        # calculate distance
        dist_array = dist_coords(coords_trans, coords_ref)
        dist = np.mean(dist_array)

        # write csv line
        line = {
            'X1_Ref': coords_ref[0][0],
            'Y1_Ref': coords_ref[0][1],
            'X2_Ref': coords_ref[1][0],
            'Y2_Ref': coords_ref[1][1],
            'X3_Ref': coords_ref[2][0],
            'Y3_Ref': coords_ref[2][1],
            'X4_Ref': coords_ref[3][0],
            'Y4_Ref': coords_ref[3][1],
            'X1_Trans': coords_trans[0][0],
            'Y1_Trans': coords_trans[0][1],
            'X2_Trans': coords_trans[1][0],
            'Y2_Trans': coords_trans[1][1],
            'X3_Trans': coords_trans[2][0],
            'Y3_Trans': coords_trans[2][1],
            'X4_Trans': coords_trans[3][0],
            'Y4_Trans': coords_trans[3][1],
            'Displacement': dist,
            'RelativeDisplacement': dist / w,
            'Tx': tx,
            'Ty': ty,
            'AngleDegree': rot_degree,
            'AngleRad': rot_radian
        }
        df.loc[f_name] = line

        # save patches
        skio.imsave(f'{tardirA}/{f_name}_R.{suffix}', patchA_ref)
        skio.imsave(f'{tardirA}/{f_name}_T.{suffix}', patchA_trans)
        skio.imsave(f'{tardirB}/{f_name}_R.{suffix}', patchB_ref)
        skio.imsave(f'{tardirB}/{f_name}_T.{suffix}', patchB_trans)

        # display patch outline in original image
        if display is not None and cnt_disp < display:
            dispdirA = f'{target_root}/fold{fold}/patch_tlevel{t_level}/display/A/{mode}'
            dispdirB = f'{target_root}/fold{fold}/patch_tlevel{t_level}/display/B/{mode}'
            if not os.path.exists(dispdirA):
                os.makedirs(dispdirA)
            if not os.path.exists(dispdirB):
                os.makedirs(dispdirB)
            if len(imgA.shape) == 2:
                imgA_disp = np.pad(imgA, w // 2, mode='reflect')
                imgA_disp = np.repeat(imgA_disp.reshape(
                    imgA_disp.shape[0], imgA_disp.shape[1], 1),
                                      3,
                                      axis=-1)
            else:
                imgA_disp = np.pad(imgA, ((w // 2, w // 2), (w // 2, w // 2),
                                          (0, 0)),
                                   mode='reflect')
            imgA_disp = cv2.polylines(imgA_disp,
                                      pts=[(w // 2 + coords_ref).reshape(
                                          (-1, 1, 2))],
                                      isClosed=True,
                                      color=(0, 255, 0),
                                      thickness=2)
            imgA_disp = cv2.polylines(
                imgA_disp,
                pts=[np.int32(w // 2 + coords_trans).reshape((-1, 1, 2))],
                isClosed=True,
                color=(0, 0, 255),
                thickness=2)
            skio.imsave(f'{dispdirA}/{f_name}_display.{suffix}', imgA_disp)
            if len(imgB.shape) == 2:
                imgB_disp = np.pad(imgB, w // 2, mode='reflect')
                imgB_disp = np.repeat(imgB_disp.reshape(
                    imgB_disp.shape[0], imgB_disp.shape[1], 1),
                                      3,
                                      axis=-1)
            else:
                imgB_disp = np.pad(imgB, ((w // 2, w // 2), (w // 2, w // 2),
                                          (0, 0)),
                                   mode='reflect')
            imgB_disp = cv2.polylines(imgB_disp,
                                      pts=[(w // 2 + coords_ref).reshape(
                                          (-1, 1, 2))],
                                      isClosed=True,
                                      color=(0, 255, 0),
                                      thickness=2)
            imgB_disp = cv2.polylines(
                imgB_disp,
                pts=[np.int32(w // 2 + coords_trans).reshape((-1, 1, 2))],
                isClosed=True,
                color=(0, 0, 255),
                thickness=2)
            #            imgB_disp = cv2.polylines(imgB_disp, pts=[np.int32(w//2+coords_rec).reshape((-1,1,2))], isClosed=True, color=(255,0,0), thickness=2)
            skio.imsave(f'{dispdirB}/{f_name}_display.{suffix}', imgB_disp)
            cnt_disp += 1

    df.to_csv(
        f'{target_root}/fold{fold}/patch_tlevel{t_level}/info_{mode}.csv')
Exemple #31
0
def align_ecc(img,
              img_ref,
              method='ecc',
              mode='affine',
              coords=None,
              rescale=False,
              use_gradient=True):

    try:
        import cv2
    except ModuleNotFoundError:
        print('It seems OpenCV is not install. Please do so by running:'
              'pip install opencv-python-headless')

    if rescale:
        img0 = rescale_intensity(img_ref,
                                 in_range='image',
                                 out_range='float32').astype('float32')
        img1 = rescale_intensity(img, in_range='image',
                                 out_range='float32').astype('float32')
    else:
        img0 = img_ref.astype('float32')
        img1 = img.astype('float32')

    if use_gradient:

        def get_gradient(im):
            # Calculate the x and y gradients using Sobel operator
            grad_x = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=1)
            grad_y = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=1)

            # Combine the two gradients
            grad = cv2.addWeighted(np.absolute(grad_x), 0.5,
                                   np.absolute(grad_y), 0.5, 0)
            return grad

        img0 = get_gradient(img0)
        img1 = get_gradient(img1)

    shift = register_translation(img0, img1, 10)
    print('Found init shift: {}'.format(shift[0]))

    warp_matrix = np.eye(2, 3, dtype=np.float32)
    warp_matrix[:, 2] = -shift[0][::-1]
    # warp_matrix[:,2] = -shift[0]
    number_of_iterations = 1000000
    termination_eps = 1e-6
    ecc_mode = {
        'affine': cv2.MOTION_AFFINE,
        'translation': cv2.MOTION_TRANSLATION
    }
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                number_of_iterations, termination_eps)
    # print(warp_matrix)
    (cc, warp_matrix) = cv2.findTransformECC(img0, img1, warp_matrix,
                                             ecc_mode[mode], criteria)
    # print(warp_matrix)
    img_x = cv2.warpAffine(img1, cv2.invertAffineTransform(warp_matrix),
                           img1.shape)
    imgref_x = cv2.warpAffine(img0, warp_matrix, img0.shape)

    # make a scikit-image/ndimage-compatible output transform matrix (y and x flipped!)
    trans_matrix = np.vstack((np.hstack(
        (np.rot90(warp_matrix[:2, :2],
                  2), np.flipud(warp_matrix[:, 2:]))), [0, 0, 1]))
    if coords is not None:
        coords_x = matrix_transform(coords, trans_matrix)
    else:
        coords_x = []

    return trans_matrix, coords_x, img_x, imgref_x
def test_matrix_transform():
    tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
    assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
Exemple #33
0
# transform coordinates
response = requests.get(
    "https://d24h2xsgaj29mf.cloudfront.net/raw/spatial_transcriptomics_stahl_2016/"
    "Layer1_BC_transformation.txt")
transform = np.array([
    float(v) for v in response.content.decode().strip().split()
]).reshape(3, 3).T

x, y = zip(*[map(float, v.split('x')) for v in data.index])

xy = np.hstack([
    np.array(x)[:, None],
    np.array(y)[:, None],
])

transformed = matrix_transform(xy, transform)

dims = (MATRIX_AXES.REGIONS.value, MATRIX_AXES.FEATURES.value)
coords = {
    MATRIX_REQUIRED_REGIONS.REGION_ID:
    (MATRIX_AXES.REGIONS, np.arange(data.shape[0])),
    MATRIX_REQUIRED_REGIONS.X_REGION: (MATRIX_AXES.REGIONS, transformed[:, 0]),
    MATRIX_REQUIRED_REGIONS.Y_REGION: (MATRIX_AXES.REGIONS, transformed[:, 1]),
    MATRIX_REQUIRED_FEATURES.GENE_NAME: (MATRIX_AXES.FEATURES, data.columns)
}
data = da.from_array(data.values, chunks=MATRIX_CHUNK_SIZE)

matrix = starspace.Matrix.from_expression_data(data=data,
                                               coords=coords,
                                               dims=dims,
                                               name="matrix",
Exemple #34
0
    # find the transformation matrix
    # we only use rigid body, gives: x shift, y shift, rotation
    sreg = StackReg(StackReg.RIGID_BODY)
    tmat = sreg.register(ref=ref_img, mov=mov_img)

    # if average_over_experiments:
    #     out_img = sreg.transform(mov=mov_img, tmat=tmat)
    #     ref_img = np.mean([ref_img, out_img], axis=0)

    # apply the matrix to the old ROIs
    # this is essentially just:
    # src = np.vstack((x, y, np.ones_like(x)))
    # dst = src.T @ matrix.T
    mov_points = np.copy(ref_points)
    mov_points[:, 0:2] = tf.matrix_transform(coords=ref_points[:, 0:2],
                                             matrix=tmat)

    # save in netcals image format. import via "load roi (legacy)"
    x, y, i = mov_points[:].T
    ut.save_rois(fname=mov_roi_saveto_list[idx],
                 roi_id=i,
                 x=x,
                 y=y,
                 roi_width=roi_width)

    # plot to check if alignment worked and compare
    if plt is not None:
        mov_mask = np.zeros(shape=(width, height, 4), dtype="uint8")
        for point in mov_points:
            ut.paint_roi(point[0], point[1], roi_width, mov_mask, channel=2)