コード例 #1
0
def align(img, landmarks, image_size=(112, 112)):
	'''
	Takes oirignial image and detected landmarks as numpy format
	:param img: original image
	:param landmarks: detected landmarks, [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5]]
	:param image_size: output image_size
	:return warped_landmarks:
	:return warped_image:
	'''
	assert isinstance(img, np.ndarray)
	assert isinstance(landmarks, np.ndarray)
	assert landmarks.shape == (5, 2)

	M = None
	src = np.array([
		[30.2946, 51.6963],
		[65.5318, 51.5014],
		[48.0252, 71.7366],
		[33.5493, 92.3655],
		[62.7299, 92.2041]], dtype=np.float32)
	if image_size[1] == 112:
		src[:, 0] += 8.0
	dst = landmarks.astype(np.float32)
	tform = SimilarityTransform()
	tform.estimate(dst, src)
	M = tform.params[0:2, :]
	warped_image = cv2.warpAffine(img, M, image_size, borderValue=0.0)
	warped_landmarks = cv2.perspectiveTransform(landmarks[np.newaxis, ...], tform.params[0:3, :])[0]
	return warped_landmarks, warped_image
コード例 #2
0
def warp_images(image, transform, translation=None):
    r, c = image.shape[:2]
    # Note that transformations take coordinates in (x, y) format,
    # not (row, column), in order to be consistent with most literature
    corners = np.array([[0, 0], [0, r], [c, r], [c, 0]]).astype(np.float)

    # Warp the image corners to their new positions
    if translation is not None:
        corners -= translation
    warped_corners = transform(corners)

    # Find the extents of both the reference image and the warped
    # target image
    all_corners = np.vstack((warped_corners))

    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)

    output_shape = (corner_max - corner_min)
    output_shape = np.ceil(output_shape[::-1])

    translation_0 = np.array([0.0, 0.0])
    if translation is not None:
        translation_0 += translation

    offset_0 = SimilarityTransform(translation=-translation_0)
    offset = SimilarityTransform(translation=-corner_min)

    image = warp(image, (offset_0 + transform + offset).inverse,
                 output_shape=output_shape,
                 cval=0)

    im = Image.fromarray((image).astype('uint8'))

    return im, offset.translation
コード例 #3
0
def test_3d_similarity_estimation():
    src_points = np.random.rand(1000, 3)

    # Random transformation for testing
    angles = np.random.random((3, )) * 2 * np.pi - np.pi
    scale = np.random.randint(0, 20)
    rotation_matrix = _euler_rotation_matrix(angles) * scale
    translation_vector = np.random.random((3, ))
    dst_points = []
    for pt in src_points:
        pt_r = pt.reshape(3, 1)
        dst = np.matmul(rotation_matrix, pt_r) + \
            translation_vector.reshape(3, 1)
        dst = dst.reshape(3)
        dst_points.append(dst)

    dst_points = np.array(dst_points)
    # estimating the transformation
    tform = SimilarityTransform(dimensionality=3)
    assert tform.estimate(src_points, dst_points)
    estimated_rotation = tform.rotation
    estimated_translation = tform.translation
    estimated_scale = tform.scale
    assert_almost_equal(estimated_translation, translation_vector)
    assert_almost_equal(estimated_scale, scale)
    assert_almost_equal(estimated_rotation, rotation_matrix)
コード例 #4
0
def _rotate(image, angle, center, scale, cval=0):
    """
    Rotate function taken mostly from scikit image. Main difference is that
    this one allows dimensional scaling and records the final translation
    to ensure no image content is lost. This is needed to rotate the seam
    back into the original image.
    """
    rows, cols = image.shape[0], image.shape[1]
    tform1 = SimilarityTransform(translation=center)
    tform2 = SimilarityTransform(rotation=angle)
    tform3 = SimilarityTransform(translation=-center)
    tform4 = AffineTransform(scale=(1 / scale, 1))
    tform = tform4 + tform3 + tform2 + tform1
    corners = np.array([[0, 0], [0, rows - 1], [cols - 1, rows - 1],
                        [cols - 1, 0]])
    corners = tform.inverse(corners)
    minc = corners[:, 0].min()
    minr = corners[:, 1].min()
    maxc = corners[:, 0].max()
    maxr = corners[:, 1].max()
    out_rows = maxr - minr + 1
    out_cols = maxc - minc + 1
    output_shape = np.around((out_rows, out_cols))
    # fit output image in new shape
    translation = (minc, minr)
    tform5 = SimilarityTransform(translation=translation)
    tform = tform5 + tform
    tform.params[2] = (0, 0, 1)
    return tform, warp(image,
                       tform,
                       output_shape=output_shape,
                       order=0,
                       cval=cval,
                       clip=False,
                       preserve_range=True)
コード例 #5
0
    def rotate_selection(self, event):
        selection = self.get_selection()
        sel_pos = np.array([self.canvas.coords(s) for s in selection])
        sel_pos = sel_pos[..., :2] + (sel_pos[..., 2:] - sel_pos[..., :2]) / 2

        sel_center = np.mean(sel_pos, axis=0)
        if self.rotation_center is not None:
            sel_center = self.rotation_center

        pos_old = np.array([self._drag_data["x"], self._drag_data["y"]])
        if np.all(pos_old == np.array([0, 0])):
            pos_old = np.array([event.x, event.y])
        direction_old = pos_old - sel_center

        pos_new = np.array([event.x, event.y])
        direction_new = pos_new - sel_center

        X = np.array([direction_new[0], direction_old[0]])
        Y = np.array([direction_new[1], direction_old[1]])

        angle = np.arctan2(Y, X)
        angle = angle[1] - angle[0]

        rot_trans = SimilarityTransform(rotation=angle)
        new_coords = rot_trans.inverse(sel_pos - sel_center) + sel_center

        for s, old, new in zip(selection, sel_pos, new_coords):
            delta_x = new[0] - old[0]
            delta_y = new[1] - old[1]
            self.canvas.move(s, delta_x, delta_y)

        self._drag_data["x"] = event.x
        self._drag_data["y"] = event.y
コード例 #6
0
def check_ruler_points():
    points = pd.read_csv('../output/ruler_points.csv')
    for _, row in points.iterrows():
        video_id = row.video_id
        img = scipy.misc.imread(os.path.join(IMAGES_DIR, video_id, "0001.jpg"))

        dst_w = 720
        dst_h = 360
        ruler_points = np.array([[row.ruler_x0, row.ruler_y0],
                                 [row.ruler_x1, row.ruler_y1]])
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)
        crop = skimage.transform.warp(img,
                                      tform,
                                      mode='edge',
                                      order=3,
                                      output_shape=(dst_h, dst_w))

        print('ruler:\n', ruler_points)
        print('img:\n', img_points)

        print('ruler from img:\n', tform(img_points))
        print('img from ruler:\n', tform.inverse(ruler_points))
        print('scale', tform.scale)

        plt.subplot(2, 1, 1)
        plt.imshow(img)
        plt.plot([row.ruler_x0, row.ruler_x1], [row.ruler_y0, row.ruler_y1])

        plt.subplot(2, 1, 2)
        plt.imshow(crop)
        plt.show()
コード例 #7
0
ファイル: roi_transform.py プロジェクト: openem-team/openem
    def transform_for_clip(self,
                           video_id,
                           dst_w=720,
                           dst_h=360,
                           points_random_shift=0):
        """Finds transform to crop around ruler.

        # Arguments
            video_id: Video ID.
            dst_w: Width of cropped image.
            dst_h: Height of cropped image.
            points_random_shift: How many points to randomly shift image.
        """
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])
        if video_id not in self.ruler_points:
            return None

        points = self.ruler_points[video_id]

        ruler_points = np.array([[points.x1, points.y1],
                                 [points.x2, points.y2]])

        if points_random_shift > 0:
            img_points += np.random.uniform(-points_random_shift,
                                            points_random_shift, (2, 2))

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)

        return tform
コード例 #8
0
ファイル: tools.py プロジェクト: Martlgap/FaceIDLight
    def get_face(img, dst, target_size=(112, 112)):
        """
        :param img: image
        :param dst:
        :param target_size:
        :return:
        """
        # MS1M
        # [38.128662 51.516567]
        # [74.21549  51.55989 ]
        # [56.056564 72.434525]
        # [40.48149  90.873665]
        # [71.38436  90.78255 ]

        # LFW
        # [38.411846 52.59001 ]
        # [73.68209  52.300644]
        # [56.092415 72.949585]
        # [40.763634 90.94648 ]
        # [71.64599  90.62956 ]

        src = np.array(
            [
                [38.2946, 51.6963],
                [73.5318, 51.5014],
                [56.0252, 71.7366],
                [41.5493, 92.3655],
                [70.7299, 92.2041],
            ],
            dtype=np.float32,
        )
        tform = SimilarityTransform()
        tform.estimate(dst, src)
        tmatrix = tform.params[0:2, :]
        return cv2.warpAffine(img, tmatrix, target_size, borderValue=0.0)
コード例 #9
0
def test_similarity_init():
    # init with implicit parameters
    scale = 0.1
    rotation = 1
    translation = (1, 1)
    tform = SimilarityTransform(scale=scale,
                                rotation=rotation,
                                translation=translation)
    assert_array_almost_equal(tform.scale, scale)
    assert_array_almost_equal(tform.rotation, rotation)
    assert_array_almost_equal(tform.translation, translation)

    # init with transformation matrix
    tform2 = SimilarityTransform(tform._matrix)
    assert_array_almost_equal(tform2.scale, scale)
    assert_array_almost_equal(tform2.rotation, rotation)
    assert_array_almost_equal(tform2.translation, translation)

    # test special case for scale if rotation=0
    scale = 0.1
    rotation = 0
    translation = (1, 1)
    tform = SimilarityTransform(scale=scale,
                                rotation=rotation,
                                translation=translation)
    assert_array_almost_equal(tform.scale, scale)
    assert_array_almost_equal(tform.rotation, rotation)
    assert_array_almost_equal(tform.translation, translation)
コード例 #10
0
ファイル: facedataset.py プロジェクト: sweetcocoa/challenge
    def align_face(self, img, src_img_size, landmark, origin):
        dst_size = 112
        new_dst_size = 224
        dst = np.array([
            [30.2946 + 8, 51.6963],
            [65.5318 + 8, 51.5014],
            [48.0252 + 8, 71.7366],
            [33.5493 + 8, 92.3655],
            [62.7299 + 8, 92.2041]], dtype=np.float32 )  * new_dst_size / dst_size
        p = src_img_size / new_dst_size
        dst = dst * p

        src = landmark - np.array(origin)
        # print("landmark2", src)
        #dst = np.transpose(landmark).reshape(1,5,2)
        #src = src.reshape(1,5,2)
        # print(src)
        # print(dst)
        # transmat = cv2.estimateRigidTransform(dst.astype(np.float32),
        #                                       src.astype(np.float32), False)
        # out = cv2.warpAffine(img, transmat, (dst_img_size, dst_img_size))
        tform = SimilarityTransform()
        tform.estimate(src, dst)
        M = tform.params[0:2, :]
        out = cv2.warpAffine(img, M, (src_img_size, src_img_size), borderValue=0.0)
        return out
コード例 #11
0
def doOpticalFlow(prevOutput, targetPoints, target_frame, prev_target_frame,
                  frame_no):
    p0 = np.asarray(targetPoints).astype(np.float32)[:, :, None]
    p0 = np.transpose(p0, (0, 2, 1))
    old_gray = cv2.cvtColor(prev_target_frame, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.cvtColor(target_frame, cv2.COLOR_BGR2GRAY)

    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    newOutput = np.copy(prevOutput)

    transform = SimilarityTransform()
    if transform.estimate(good_old, good_new):
        newOutput = transform_image(good_old, good_new, prevOutput,
                                    target_frame, frame_no)
        # make a convex fill mask inside this hull
        # for each point in the new image, copy from the old image/prevOutput (use transformation/float points..)
        # use the above mask to only keep the masked part of the above copy
        # seamless cloning will not be required.

    return newOutput, listOfListToTuples(good_new.tolist())
コード例 #12
0
ファイル: iterators.py プロジェクト: msbektas/VirtualWardrobe
def im_affine_transform(img, scale, rotation, shear, translation_y, translation_x, return_tform=False):
    # Assumed img in c01. Convert to 01c for skimage
    img = img.transpose(1, 2, 0)
    # Normalize so that the param acts more like im_rotate, im_translate etc
    scale = 1 / scale
    translation_x = - translation_x
    translation_y = - translation_y

    # shift to center first so that image is rotated around center
    center_shift = np.array((img.shape[0], img.shape[1])) / 2. - 0.5
    tform_center = SimilarityTransform(translation=-center_shift)
    tform_uncenter = SimilarityTransform(translation=center_shift)

    rotation = np.deg2rad(rotation)
    tform = AffineTransform(scale=(scale, scale), rotation=rotation,
                            shear=shear,
                            translation=(translation_x, translation_y))
    tform = tform_center + tform + tform_uncenter

    warped_img = warp(img, tform)

    # Convert back from 01c to c01
    warped_img = warped_img.transpose(2, 0, 1)
    warped_img = warped_img.astype(img.dtype)
    if return_tform:
        return warped_img, tform
    else:
        return warped_img
コード例 #13
0
def merge_two_images(image0, offset0, image1, offset1):
    cmin_0, cmax_0 = get_inplace_images_conners(image0, offset0)
    cmin_1, cmax_1 = get_inplace_images_conners(image1, offset1)

    out_min = np.min(np.vstack((cmin_0, cmin_1, cmax_0, cmax_1)), axis=0)
    out_max = np.max(np.vstack((cmin_0, cmin_1, cmax_0, cmax_1)), axis=0)

    print(out_min, out_max)

    output_shape = (out_max - out_min)
    output_shape = np.ceil(output_shape[::-1])

    im0_translation = SimilarityTransform(translation=-offset_0)
    im1_translation = SimilarityTransform(translation=-offset_1)
    all_translation = SimilarityTransform(translation=-out_min)

    image0 = warp(image0, (im0_translation + all_translation).inverse,
                  output_shape=output_shape,
                  cval=0)
    image1 = warp(image1, (im1_translation + all_translation).inverse,
                  output_shape=output_shape,
                  cval=0)

    im0 = Image.fromarray((image0).astype('uint8'))
    im1 = Image.fromarray((image1).astype('uint8'))

    im0.show()
    im1.show()
def jitter(img):
    ''' Jitter the image as described in the paper referenced here:
        http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf'''

    # knobs
    max_rot = 15 * math.pi / 180.0
    max_scale = 0.1
    max_delta = 2
    max_gamma = 0.7

    # randomize
    rot = random.uniform(-1, 1) * max_rot
    scl = random.uniform(-1, 1) * max_scale + 1.0
    xd = random.randint(-max_delta, max_delta)
    yd = random.randint(-max_delta, max_delta)
    gamma = random.uniform(-1, 1) * max_gamma + 1.0

    # scale, roation, and translation
    tform = SimilarityTransform(rotation=rot, scale=scl, translation=(xd, yd))
    offx, offy = np.array(img.shape[:2]) / 2
    recenter = SimilarityTransform(translation=(offx, offy))
    recenter_inv = SimilarityTransform(translation=(-offx, -offy))
    img = warp(img, (recenter_inv + (tform + recenter)).inverse, mode='edge')

    # gamma
    img = adjust_gamma(img, gamma)

    # convert back to RGB [0-255] and ignore the silly precision warning
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        return img_as_ubyte(img)
コード例 #15
0
ファイル: gcnet.py プロジェクト: RomanJuranek/photo-rectify
    def __call__(self, image):
        """
        """
        downscaled = self.downscale(image) / 256
        h, w = downscaled.shape[:2]
        u, v = self.gcnet_shape
        half_size = (min(h, w) - 1) // 2

        scale = min(image.shape[:2]) / u

        #print(half_size, u, v, h, w, scale)
        src = np.array([
            (w / 2 - half_size, h / 2 - half_size),  # A
            (w / 2 + half_size, h / 2 - half_size),  # B
            (w / 2 - half_size, h / 2 + half_size),  # C
        ])
        #print(src)
        dst = np.array([
            (0, 0),  # A
            (u, 0),  # B
            (0, v)  # C
        ])

        T = SimilarityTransform()
        T.estimate(src, dst)
        X = warp(downscaled, T.inverse, output_shape=self.gcnet_shape,
                 order=2).astype(np.float32)

        T_pred, R_pred = self.gcnet.predict(X[None, ...])

        return theta_from_soft_label(T_pred), scale * rho_from_soft_label(
            R_pred, K_range=100)
コード例 #16
0
 def upgrade(self, mult):
     # Creates a train data set via concatenating morphing initial images with slight rotation and shifting mult times.
     # Returns a tuple of a train data set and a labels vector.
     X_upg = np.array([
         warp(
             item,
             SimilarityTransform(translation=(np.random.randint(-1, 1),
                                              np.random.randint(-1, 1))))
         for item in
         [rotate(item, np.random.randint(-15, 15)) for item in self.X]
     ])
     y_upg = self.y
     for i in range(mult - 1):
         X_upg = np.concatenate(
             (X_upg,
              np.array([
                  warp(
                      item,
                      SimilarityTransform(
                          translation=(np.random.randint(-1, 1),
                                       np.random.randint(-1, 1))))
                  for item in [
                      rotate(item, np.random.randint(-15, 15))
                      for item in self.X
                  ]
              ])))
         y_upg = np.concatenate((y_upg, self.y))
         print('iteration {0} is sucsessfully done'.format(i))
     return X_upg, y_upg
コード例 #17
0
ファイル: algo_utils.py プロジェクト: arnaudmarcoux/histo_mri
def register_histo(histo_img, preprocessed_mr_images, ref='t1', n_points=7):

    grayscale_histo = np.mean(histo_img, axis=2)
    reference = nib.load(preprocessed_mr_images.file_paths[ref]).get_data()

    myfig = plt.figure(4)
    myfig.suptitle("Select corresponding landmarks (" + str(n_points) + ") for similarity transform !")

    plt.subplot(1, 2, 1)
    plt.imshow(grayscale_histo, cmap="gray")
    x_histo = plt.ginput(n_points, timeout=0)
    print("Selected points for histological cut:")
    x_histo = np.array(x_histo)
    print(x_histo)

    plt.subplot(1, 2, 2)
    plt.imshow(reference, cmap="gray")
    x_mr = plt.ginput(n_points, timeout=0)
    print("Selected points for " + ref + ":")
    x_mr = np.array(x_mr)
    print(x_mr)

    sim_transform = SimilarityTransform()
    sim_transform.estimate(x_mr, x_histo)
    print(Fore.GREEN + "Parameter estimation - transformation matrix: " + Fore.RESET)
    print(sim_transform.params)
    warped = warp(grayscale_histo,
                  sim_transform,
                  output_shape=reference.shape)
    # grayscale_histo = grayscale_histo.astype('float32')
    return sim_transform.params, warped
コード例 #18
0
 def __image_transform__(self,theImage,scaleFactor,rotationAngle,txFactor,tyFactor):
     centerY,centerX=np.array(theImage.shape[:2])/2.
     theRotation=SimilarityTransform(rotation=np.deg2rad(rotationAngle))
     theZoom=SimilarityTransform(scale=scaleFactor)
     theShift=SimilarityTransform(translation=[-centerX,-centerY])
     theShiftInv=SimilarityTransform(translation=[centerX,centerY])
     theTranslation=SimilarityTransform(translation=[txFactor*2*centerX,tyFactor*2*centerY])
     return warp(theImage, (theShift+(theRotation+theShiftInv))+(theShift+(theZoom+theShiftInv))+theTranslation, mode='reflect')
コード例 #19
0
def test_union():
    tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
    tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
    tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)

    tform = tform1 + tform2

    assert_array_almost_equal(tform._matrix, tform3._matrix)
コード例 #20
0
ファイル: masks.py プロジェクト: xyu335/SemanticSeg
def create_img_mask(r, c, n, transforms, priorities):
    pro_idx = priorities[0]  # [0, 1, 2, 3]
    print(r, c, n)
    stitch_masks = np.array([pow(2, i) * np.ones((r, c)) for i in range(2)])
    return_masks = []
    corners = np.array([[0, 0], [0, r], [c, r], [c, 0]]).astype(np.float)
    for index in range(n):
        # create mask for index-th image

        if index == pro_idx:
            return_masks.append(None)
            continue
        else:
            al_corners = corners
            warped_corners = transforms[pro_idx](corners)
            al_corners = np.vstack((al_corners, warped_corners))
            warped_corners = transforms[index](corners)
            al_corners = np.vstack((al_corners, warped_corners))

            corner_min = np.min(al_corners, axis=0)
            corner_max = np.max(al_corners, axis=0)
            output_shape = (corner_max - corner_min)

            output_shape = np.ceil(output_shape[::-1])
            offset = SimilarityTransform(translation=-corner_min)
            offset_inv = SimilarityTransform(translation=corner_min)

            total_masks = []
            total_masks.append(
                warp(stitch_masks[pro_idx, :, :],
                     (transforms[pro_idx] + offset).inverse,
                     output_shape=output_shape,
                     cval=0))
            total_masks.append(
                warp(stitch_masks[1, :, :], (transforms[1] + offset).inverse,
                     output_shape=output_shape,
                     cval=0))
            total_masks = np.sum(np.array(total_masks), axis=0)

            # return val
            transform_inv = ProjectiveTransform(transforms[index]._inv_matrix)
            return_masks.append(
                warp(total_masks, (offset_inv + transform_inv).inverse,
                     output_shape=[r, c],
                     cval=0))
            return_masks[index][(return_masks[index] % 1.0 != 0)] = pow(
                2, 1)  # pow(2,i)

            ret_masks = return_masks[index]
            # now the image that has to be bitwise-and. so the background image has to be 255
            # the mask[i]. the overlap_label will be 2^len - 1 = 3, overlap label
            overlap_value = pow(2, 2) - 1  # 3
            # print ((ret_masks==3.0).sum())
            ret_masks[(ret_masks != overlap_value)] = 255  # white
            ret_masks[(ret_masks == overlap_value)] = 0  # reverse mask
            ret_masks = ret_masks.astype('uint8')
            # print((ret_masks[index] == 255).sum())
    return return_masks
コード例 #21
0
def get_pairs(imgs, unique_pairs, offsets, subsample_factor, overlap_pixels,
              n_kp):
    """Create inlier keypoint pairs."""

    orb = ORB(n_keypoints=n_kp, fast_threshold=0.05)
    k = gaussian(offsets * 2 + 1, 1, sym=True)
    tf = SimilarityTransform  # tf = RigidTransform
    tf0 = SimilarityTransform()
    # FIXME: is there no rigid model in scikit-image???
    # this is needed for input to RANSAC

    pairs = []
    init_tfs = np.empty([n_slcs, n_tiles, 3])
    for p in unique_pairs:
        pair_tstart = time()

        full_im1, full_im2 = subsample_images(p, imgs, subsample_factor)

        part_im1, part_im2 = select_imregions(p[2], full_im1, full_im2,
                                              overlap_pixels)
        keyp_im1, desc_im1 = get_keypoints(orb, part_im1)
        keyp_im2, desc_im2 = get_keypoints(orb, part_im2)
        keyp_im1, keyp_im2 = reset_imregions(p[2], keyp_im1, keyp_im2,
                                             overlap_pixels, full_im1.shape)

        matches = match_descriptors(desc_im1, desc_im2, cross_check=True)
        dst = keyp_im1[matches[:, 0]][:, ::-1]
        src = keyp_im2[matches[:, 1]][:, ::-1]
        model, inliers = ransac((src, dst),
                                tf,
                                min_samples=4,
                                residual_threshold=2,
                                max_trials=300)

        w = k[offsets - (p[1][0] - p[0][0])]

        pairs.append((p, src[inliers], dst[inliers], model, w))

        if (p[0][1] == 0) & (p[0][0] == p[1][0]
                             ):  # referenced to tile 0 within the same slice
            tf1 = tf0.__add__(model)
            itf = [
                math.acos(min(tf1.params[0, 0],
                              1)),  # FIXME!!! with RigidTransform
                tf1.params[0, 2],
                tf1.params[1, 2]
            ]
            init_tfs[p[1][0], p[1][1], :] = np.array(itf)
        if (p[0][1] == p[1][1] == 0) & (
                p[1][0] - p[0][0] == 1):  # if [slcX,tile0] to [slcX-1,tile0]
            tf0 = tf0.__add__(model)

        plot_pair_ransac(p, full_im1, full_im2, keyp_im1, keyp_im2, matches,
                         inliers)
        print('Pair done in: %.2f s' % (time() - pair_tstart, ))

    return pairs, init_tfs
コード例 #22
0
ファイル: test_warps.py プロジェクト: Aakalpa/Thrift
def test_zero_image_size():
    with testing.raises(ValueError):
        warp(np.zeros(0), SimilarityTransform())
    with testing.raises(ValueError):
        warp(np.zeros((0, 10)), SimilarityTransform())
    with testing.raises(ValueError):
        warp(np.zeros((10, 0)), SimilarityTransform())
    with testing.raises(ValueError):
        warp(np.zeros((10, 10, 0)), SimilarityTransform())
コード例 #23
0
ファイル: transform.py プロジェクト: Keesiu/meta-kaggle
def build_center_uncenter_transforms(image_shape):
    """
    These are used to ensure that zooming and rotation happens around the center of the image.
    Use these transforms to center and uncenter the image around such a transform.
    Copied this function from Kaggle NDSB winners: https://github.com/benanne/kaggle-ndsb
    """
    center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5 # need to swap rows and cols here apparently! confusing!
    tform_uncenter = SimilarityTransform(translation=-center_shift)
    tform_center = SimilarityTransform(translation=center_shift)
    return tform_center, tform_uncenter
コード例 #24
0
ファイル: utils.py プロジェクト: JanMigon/Diffusion-mri
def transform_image(image, rotation=0):
    transform = AffineTransform(rotation=np.deg2rad(rotation))
    shift_y, shift_x = (np.array(image.shape) - 1) / 2.
    shift_fwd = SimilarityTransform(translation=[-shift_x, -shift_y])
    shift_back = SimilarityTransform(translation=[shift_x, shift_y])
    transformed = warp(image, (shift_fwd + (transform + shift_back)).inverse,
                       order=1,
                       preserve_range=True,
                       mode='constant')
    return transformed
コード例 #25
0
ファイル: test_rotate.py プロジェクト: coldgemini/python-all
def rotate(image, angle, cval):
    center = np.array((image.shape[0], image.shape[1])) / 2. - 0.5
    tform1 = SimilarityTransform(translation=center)
    tform2 = SimilarityTransform(rotation=angle * np.pi / 180)
    tform3 = SimilarityTransform(translation=-center)
    tform = tform3 + tform2 + tform1

    result = []
    for i in range(image.shape[2]):
        result.append(warp(image[:, :, i], tform, cval=cval))
    return np.transpose(result, (1, 2, 0))
コード例 #26
0
def scale_image(img, scaling_factor):
    """Scale the image foreground

    This function scales the image foreground by a factor.
    The background of the image is assumed to be black (0 grayscale).

    .. versionadded:: 0.7

    Parameters
    ----------
    img : ndarray
        A 2D NumPy array representing a (height, width) grayscale image, or a
        3D NumPy array representing a (height, width, channels) RGB image
    scaling_factor : float
        Factory by which to scale the image

    Returns
    -------
    img : ndarray
        A copy of the scaled image

    """
    if img.ndim < 2 or img.ndim > 3:
        raise ValueError("Only 2D and 3D images are allowed, not "
                         "%dD." % img.ndim)
    if img.ndim == 3 and img.shape[-1] > 3:
        raise ValueError("Only RGB and grayscale images are allowed, not "
                         "%d-channel images." % img.shape[-1])
    if scaling_factor <= 0:
        raise ValueError("Scaling factor must be greater than zero")
    # Calculate center of mass:
    m = moments(img, order=1)
    # No area found:
    if isclose(m[0, 0], 0):
        return img
    # Shift the phosphene to (0, 0):
    center_mass = np.array([m[0, 1] / m[0, 0], m[1, 0] / m[0, 0]])
    tf_shift = SimilarityTransform(translation=-center_mass)
    # Scale the phosphene:
    tf_scale = SimilarityTransform(scale=scaling_factor)
    # Shift the phosphene back to where it was:
    tf_shift_inv = SimilarityTransform(translation=center_mass)
    # Combine all three transforms:
    tf = tf_shift + tf_scale + tf_shift_inv
    img_warped = warp(img, tf.inverse)
    # Warp automatically converts to double, so we need to convert the image
    # back to its original format:
    if img.dtype == bool:
        return img_as_bool(img_warped)
    if img.dtype == np.uint8:
        return img_as_ubyte(img_warped)
    if img.dtype == np.float32:
        return img_as_float32(img_warped)
    return img_warped
コード例 #27
0
def distort(img):
    shift_y, shift_x = np.array(img.shape[:2]) / 2.
    
    shift = SimilarityTransform(translation=[-shift_x, -shift_y])
    tf = SimilarityTransform(
        rotation=np.deg2rad(random.uniform(-5.0, 5.0)), 
        scale=random.uniform(0.9, 1.1),
        translation=(random.uniform(-0.1, 0.1)*img.shape[0], random.uniform(-0.1, 0.1)*img.shape[1])
    )
    shift_inv = SimilarityTransform(translation=[shift_x, shift_y])
    
    return warp(img, (shift + (tf + shift_inv)).inverse, mode='edge')
コード例 #28
0
def align_face(img, landmark, destination_size=(96, 112)):
    # TODO :: 원하는 사이즈의 이미지로 align
    """
    얼굴 랜드마크 좌표 (눈, 코, 입)을 중심으로 이미지를 정렬함.

    :param img:  np array image (H, W, C)
    :param landmark:  np array coordinates 5 x 2. 눈1, 눈2, 코, 입 양 끝.

    if len(landmark) == 68:
        It will be converted to 5x2

    68개 랜드마크 기준으로
    [36:42 평균,
    42:48 평균,
    30,
    48,
    54]

    :return:

    """

    if len(landmark) == 68:
        landmark = np.array(landmark)
        landmark = np.array([
            landmark[36:42, :2].mean(axis=0), landmark[42:48, :2].mean(axis=0),
            landmark[30, :2], landmark[48, :2], landmark[54, :2]
        ])

    ref_size = np.array([96, 112])

    # dst = np.array([
    #     [30.2946, 51.6963],
    #     [65.5318, 51.5014],
    #     [48.0252, 71.7366],
    #     [33.5493, 92.3655],
    #     [62.7299, 92.2041]], dtype=np.float32)

    dst = np.array([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
                    [33.5493, 92.3655], [62.7299, 92.2041]],
                   dtype=np.float32)

    dst_size = np.array(destination_size)

    p = dst_size / ref_size
    dst = dst * p

    src = landmark
    tform = SimilarityTransform()
    tform.estimate(src, dst)
    M = tform.params[0:2, :]
    out = cv2.warpAffine(img, M, destination_size, borderValue=0.0)
    return out
コード例 #29
0
def apply_random_transformation(background_size, segmented_box, config_params):
    """apply a random transformation to 2D coordinates nomalized to image size"""
    # translate object coordinates to the object center's frame, i.e. whitens
    whitened_coords_norm = segmented_box.segmented_coords_norm - (
        segmented_box.x_center_norm, segmented_box.y_center_norm)

    # then generate a random rotation around the z-axis (perpendicular to the image plane), and limit the object scale
    # to maximum (default) 50% of the background image, i.e. the normalized largest dimension of the object must be at
    # most 0.5. To put it simply, scale the objects down if they're too big.
    # TODO(minhnh) add shear
    max_scale = config_params.max_scale
    if segmented_box.max_dimension_norm > config_params.max_obj_size_in_bg:
        max_scale = config_params.max_obj_size_in_bg / segmented_box.max_dimension_norm
    random_rot_angle = np.random.uniform(0, np.pi)
    rand_scale = np.random.uniform(config_params.min_scale,
                                   config_params.max_scale)

    # generate a random translation within the image boundaries for whitened, normalized coordinates, taking into
    # account the maximum allowed object dimension. After this translation, the normalized coordinates should
    # stay within [margin, 1-margin] for each dimension
    scaled_max_dimension = segmented_box.max_dimension_norm * max_scale
    low_norm_bound, high_norm_bound = ((scaled_max_dimension / 2) +
                                       config_params.margin,
                                       1 - config_params.margin -
                                       (scaled_max_dimension / 2))
    random_translation_x = np.random.uniform(
        low_norm_bound, high_norm_bound) * background_size[1]
    random_translation_y = np.random.uniform(
        low_norm_bound, high_norm_bound) * background_size[0]

    # create the transformation matrix for the generated rotation, translation and scale
    if np.random.uniform() < config_params.prob_rand_rotation:
        tf_matrix = SimilarityTransform(
            rotation=random_rot_angle,
            scale=rand_scale * min(background_size),
            translation=(random_translation_x, random_translation_y)).params
    else:
        tf_matrix = SimilarityTransform(
            scale=rand_scale * min(background_size),
            translation=(random_translation_x, random_translation_y)).params

    # apply transformation
    transformed_coords = matrix_transform(whitened_coords_norm, tf_matrix)

    # we clip the object coordinates so that they are within the image boundaries
    transformed_coords[np.where(transformed_coords[:, 0] < 0), 0] = 0
    transformed_coords[np.where(transformed_coords[:, 0] > background_size[1]),
                       0] = background_size[1] - 1

    transformed_coords[np.where(transformed_coords[:, 1] < 0), 1] = 0
    transformed_coords[np.where(transformed_coords[:, 1] > background_size[0]),
                       1] = background_size[0] - 1
    return transformed_coords
コード例 #30
0
def augmentation_image_ms(image,
                          rotation_range=0,
                          shear_range=0,
                          scale_range=1,
                          transform_range=0,
                          horizontal_flip=False,
                          vertical_flip=False,
                          warp_mode='edge'):
    from skimage.transform import AffineTransform, SimilarityTransform, warp
    from numpy import deg2rad, flipud, fliplr
    from numpy.random import uniform, random_integers
    from random import choice

    image_shape = image.shape
    # Generate image transformation parameters
    rotation_angle = uniform(low=-abs(rotation_range),
                             high=abs(rotation_range))
    shear_angle = uniform(low=-abs(shear_range), high=abs(shear_range))
    scale_value = uniform(low=abs(1 / scale_range), high=abs(scale_range))
    translation_values = (random_integers(-abs(transform_range),
                                          abs(transform_range)),
                          random_integers(-abs(transform_range),
                                          abs(transform_range)))

    # Horizontal and vertical flips
    if horizontal_flip:
        # randomly flip image up/down
        if choice([True, False]):
            image = flipud(image)
    if vertical_flip:
        # randomly flip image left/right
        if choice([True, False]):
            image = fliplr(image)

    # Generate transformation object
    transform_toorigin = SimilarityTransform(scale=(1, 1),
                                             rotation=0,
                                             translation=(-image_shape[0],
                                                          -image_shape[1]))
    transform_revert = SimilarityTransform(scale=(1, 1),
                                           rotation=0,
                                           translation=(image_shape[0],
                                                        image_shape[1]))
    transform = AffineTransform(scale=(scale_value, scale_value),
                                rotation=deg2rad(rotation_angle),
                                shear=deg2rad(shear_angle),
                                translation=translation_values)
    # Apply transform
    image = warp(image, ((transform_toorigin) + transform) + transform_revert,
                 mode=warp_mode,
                 preserve_range=True)
    return image
コード例 #31
0
def test_similarity_estimation():
    # exact solution
    tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
    assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
    assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
    assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])

    # over-determined
    tform2 = estimate_transform('similarity', SRC, DST)
    assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
    assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
    assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])

    # via estimate method
    tform3 = SimilarityTransform()
    tform3.estimate(SRC, DST)
    assert_array_almost_equal(tform3._matrix, tform2._matrix)
コード例 #32
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))
コード例 #33
0
ファイル: test_geometric.py プロジェクト: Cadair/scikit-image
def test_union_differing_types():
    tform1 = SimilarityTransform()
    tform2 = PolynomialTransform()
    with testing.raises(TypeError):
        tform1.__add__(tform2)