Пример #1
0
    def transform_for_clip(self,
                           video_id,
                           dst_w=720,
                           dst_h=360,
                           points_random_shift=0):
        """Finds transform to crop around ruler.

        # Arguments
            video_id: Video ID.
            dst_w: Width of cropped image.
            dst_h: Height of cropped image.
            points_random_shift: How many points to randomly shift image.
        """
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])
        if video_id not in self.ruler_points:
            return None

        points = self.ruler_points[video_id]

        ruler_points = np.array([[points.x1, points.y1],
                                 [points.x2, points.y2]])

        if points_random_shift > 0:
            img_points += np.random.uniform(-points_random_shift,
                                            points_random_shift, (2, 2))

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)

        return tform
Пример #2
0
    def __call__(self, image):
        """
        """
        downscaled = self.downscale(image) / 256
        h, w = downscaled.shape[:2]
        u, v = self.gcnet_shape
        half_size = (min(h, w) - 1) // 2

        scale = min(image.shape[:2]) / u

        #print(half_size, u, v, h, w, scale)
        src = np.array([
            (w / 2 - half_size, h / 2 - half_size),  # A
            (w / 2 + half_size, h / 2 - half_size),  # B
            (w / 2 - half_size, h / 2 + half_size),  # C
        ])
        #print(src)
        dst = np.array([
            (0, 0),  # A
            (u, 0),  # B
            (0, v)  # C
        ])

        T = SimilarityTransform()
        T.estimate(src, dst)
        X = warp(downscaled, T.inverse, output_shape=self.gcnet_shape,
                 order=2).astype(np.float32)

        T_pred, R_pred = self.gcnet.predict(X[None, ...])

        return theta_from_soft_label(T_pred), scale * rho_from_soft_label(
            R_pred, K_range=100)
Пример #3
0
def align(img, landmarks, image_size=(112, 112)):
	'''
	Takes oirignial image and detected landmarks as numpy format
	:param img: original image
	:param landmarks: detected landmarks, [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5]]
	:param image_size: output image_size
	:return warped_landmarks:
	:return warped_image:
	'''
	assert isinstance(img, np.ndarray)
	assert isinstance(landmarks, np.ndarray)
	assert landmarks.shape == (5, 2)

	M = None
	src = np.array([
		[30.2946, 51.6963],
		[65.5318, 51.5014],
		[48.0252, 71.7366],
		[33.5493, 92.3655],
		[62.7299, 92.2041]], dtype=np.float32)
	if image_size[1] == 112:
		src[:, 0] += 8.0
	dst = landmarks.astype(np.float32)
	tform = SimilarityTransform()
	tform.estimate(dst, src)
	M = tform.params[0:2, :]
	warped_image = cv2.warpAffine(img, M, image_size, borderValue=0.0)
	warped_landmarks = cv2.perspectiveTransform(landmarks[np.newaxis, ...], tform.params[0:3, :])[0]
	return warped_landmarks, warped_image
Пример #4
0
    def align_face(self, img, src_img_size, landmark, origin):
        dst_size = 112
        new_dst_size = 224
        dst = np.array([
            [30.2946 + 8, 51.6963],
            [65.5318 + 8, 51.5014],
            [48.0252 + 8, 71.7366],
            [33.5493 + 8, 92.3655],
            [62.7299 + 8, 92.2041]], dtype=np.float32 )  * new_dst_size / dst_size
        p = src_img_size / new_dst_size
        dst = dst * p

        src = landmark - np.array(origin)
        # print("landmark2", src)
        #dst = np.transpose(landmark).reshape(1,5,2)
        #src = src.reshape(1,5,2)
        # print(src)
        # print(dst)
        # transmat = cv2.estimateRigidTransform(dst.astype(np.float32),
        #                                       src.astype(np.float32), False)
        # out = cv2.warpAffine(img, transmat, (dst_img_size, dst_img_size))
        tform = SimilarityTransform()
        tform.estimate(src, dst)
        M = tform.params[0:2, :]
        out = cv2.warpAffine(img, M, (src_img_size, src_img_size), borderValue=0.0)
        return out
Пример #5
0
def register_histo(histo_img, preprocessed_mr_images, ref='t1', n_points=7):

    grayscale_histo = np.mean(histo_img, axis=2)
    reference = nib.load(preprocessed_mr_images.file_paths[ref]).get_data()

    myfig = plt.figure(4)
    myfig.suptitle("Select corresponding landmarks (" + str(n_points) + ") for similarity transform !")

    plt.subplot(1, 2, 1)
    plt.imshow(grayscale_histo, cmap="gray")
    x_histo = plt.ginput(n_points, timeout=0)
    print("Selected points for histological cut:")
    x_histo = np.array(x_histo)
    print(x_histo)

    plt.subplot(1, 2, 2)
    plt.imshow(reference, cmap="gray")
    x_mr = plt.ginput(n_points, timeout=0)
    print("Selected points for " + ref + ":")
    x_mr = np.array(x_mr)
    print(x_mr)

    sim_transform = SimilarityTransform()
    sim_transform.estimate(x_mr, x_histo)
    print(Fore.GREEN + "Parameter estimation - transformation matrix: " + Fore.RESET)
    print(sim_transform.params)
    warped = warp(grayscale_histo,
                  sim_transform,
                  output_shape=reference.shape)
    # grayscale_histo = grayscale_histo.astype('float32')
    return sim_transform.params, warped
Пример #6
0
def check_ruler_points():
    points = pd.read_csv('../output/ruler_points.csv')
    for _, row in points.iterrows():
        video_id = row.video_id
        img = scipy.misc.imread(os.path.join(IMAGES_DIR, video_id, "0001.jpg"))

        dst_w = 720
        dst_h = 360
        ruler_points = np.array([[row.ruler_x0, row.ruler_y0],
                                 [row.ruler_x1, row.ruler_y1]])
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)
        crop = skimage.transform.warp(img,
                                      tform,
                                      mode='edge',
                                      order=3,
                                      output_shape=(dst_h, dst_w))

        print('ruler:\n', ruler_points)
        print('img:\n', img_points)

        print('ruler from img:\n', tform(img_points))
        print('img from ruler:\n', tform.inverse(ruler_points))
        print('scale', tform.scale)

        plt.subplot(2, 1, 1)
        plt.imshow(img)
        plt.plot([row.ruler_x0, row.ruler_x1], [row.ruler_y0, row.ruler_y1])

        plt.subplot(2, 1, 2)
        plt.imshow(crop)
        plt.show()
Пример #7
0
    def get_face(img, dst, target_size=(112, 112)):
        """
        :param img: image
        :param dst:
        :param target_size:
        :return:
        """
        # MS1M
        # [38.128662 51.516567]
        # [74.21549  51.55989 ]
        # [56.056564 72.434525]
        # [40.48149  90.873665]
        # [71.38436  90.78255 ]

        # LFW
        # [38.411846 52.59001 ]
        # [73.68209  52.300644]
        # [56.092415 72.949585]
        # [40.763634 90.94648 ]
        # [71.64599  90.62956 ]

        src = np.array(
            [
                [38.2946, 51.6963],
                [73.5318, 51.5014],
                [56.0252, 71.7366],
                [41.5493, 92.3655],
                [70.7299, 92.2041],
            ],
            dtype=np.float32,
        )
        tform = SimilarityTransform()
        tform.estimate(dst, src)
        tmatrix = tform.params[0:2, :]
        return cv2.warpAffine(img, tmatrix, target_size, borderValue=0.0)
Пример #8
0
def align_face(img, landmark, destination_size=(96, 112)):
    # TODO :: 원하는 사이즈의 이미지로 align
    """
    얼굴 랜드마크 좌표 (눈, 코, 입)을 중심으로 이미지를 정렬함.

    :param img:  np array image (H, W, C)
    :param landmark:  np array coordinates 5 x 2. 눈1, 눈2, 코, 입 양 끝.

    if len(landmark) == 68:
        It will be converted to 5x2

    68개 랜드마크 기준으로
    [36:42 평균,
    42:48 평균,
    30,
    48,
    54]

    :return:

    """

    if len(landmark) == 68:
        landmark = np.array(landmark)
        landmark = np.array([
            landmark[36:42, :2].mean(axis=0), landmark[42:48, :2].mean(axis=0),
            landmark[30, :2], landmark[48, :2], landmark[54, :2]
        ])

    ref_size = np.array([96, 112])

    # dst = np.array([
    #     [30.2946, 51.6963],
    #     [65.5318, 51.5014],
    #     [48.0252, 71.7366],
    #     [33.5493, 92.3655],
    #     [62.7299, 92.2041]], dtype=np.float32)

    dst = np.array([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
                    [33.5493, 92.3655], [62.7299, 92.2041]],
                   dtype=np.float32)

    dst_size = np.array(destination_size)

    p = dst_size / ref_size
    dst = dst * p

    src = landmark
    tform = SimilarityTransform()
    tform.estimate(src, dst)
    M = tform.params[0:2, :]
    out = cv2.warpAffine(img, M, destination_size, borderValue=0.0)
    return out
Пример #9
0
def estimate_similarity_transform(ref, points):
    '''
    
    ref = np.vstack([landmarks_im['x'],landmarks_im['y']]).T
    match = point_stream.data    
    cor = np.vstack([match['x'],match['y']]).T
    
    M = estimate_similarity_transform(ref, cor)
    '''
    from skimage.transform import SimilarityTransform
    M = SimilarityTransform()
    M.estimate(ref, points)
    return M
Пример #10
0
 def similarity_transform(image, landmarks):
     # anchor coordinate are based on the 240x320 resolution and need to be scaled accordingly for different size images.
     anchor_scale = 320 / image.shape[1]
     anchor = np.array([[110, 71], [210, 71], [160, 170]],
                       np.float32) / anchor_scale
     idx = [36, 45, 57]
     tform = SimilarityTransform()
     tform.estimate(landmarks[idx, :], anchor)
     sim_mat = tform.params[:2, :]
     dst = cv2.warpAffine(image, sim_mat, (image.shape[1], image.shape[0]))
     dst_lmks = np.matmul(
         np.concatenate((landmarks, np.ones((landmarks.shape[0], 1))), 1),
         sim_mat.T)[:, :2]
     return dst, dst_lmks
Пример #11
0
def doOpticalFlow(prevOutput, targetPoints, target_frame, prev_target_frame,
                  frame_no):
    p0 = np.asarray(targetPoints).astype(np.float32)[:, :, None]
    p0 = np.transpose(p0, (0, 2, 1))
    old_gray = cv2.cvtColor(prev_target_frame, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.cvtColor(target_frame, cv2.COLOR_BGR2GRAY)

    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    newOutput = np.copy(prevOutput)

    transform = SimilarityTransform()
    if transform.estimate(good_old, good_new):
        newOutput = transform_image(good_old, good_new, prevOutput,
                                    target_frame, frame_no)
        # make a convex fill mask inside this hull
        # for each point in the new image, copy from the old image/prevOutput (use transformation/float points..)
        # use the above mask to only keep the masked part of the above copy
        # seamless cloning will not be required.

    return newOutput, listOfListToTuples(good_new.tolist())
Пример #12
0
def test_3d_similarity_estimation():
    src_points = np.random.rand(1000, 3)

    # Random transformation for testing
    angles = np.random.random((3, )) * 2 * np.pi - np.pi
    scale = np.random.randint(0, 20)
    rotation_matrix = _euler_rotation_matrix(angles) * scale
    translation_vector = np.random.random((3, ))
    dst_points = []
    for pt in src_points:
        pt_r = pt.reshape(3, 1)
        dst = np.matmul(rotation_matrix, pt_r) + \
            translation_vector.reshape(3, 1)
        dst = dst.reshape(3)
        dst_points.append(dst)

    dst_points = np.array(dst_points)
    # estimating the transformation
    tform = SimilarityTransform(dimensionality=3)
    assert tform.estimate(src_points, dst_points)
    estimated_rotation = tform.rotation
    estimated_translation = tform.translation
    estimated_scale = tform.scale
    assert_almost_equal(estimated_translation, translation_vector)
    assert_almost_equal(estimated_scale, scale)
    assert_almost_equal(estimated_rotation, rotation_matrix)
Пример #13
0
def preprocess_face(image, landmark, image_size=(112, 112)):
    """Prepares the face image for the recognition model.

    Uses the detected landmarks from the face detection stage, then aligns
    the image, pads to `image_side`x`image_side`, and turns it into the BGR
    CxHxW format.

    Parameters
    ----------
    image : np.ndarray of size HxWxC.
        Image containing a face to preprocess.
    landmark : np.ndarray of size (5, 2).
        Landmark coordinates.

    """

    # Target location of the facial landmarks.
    src = np.array([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
                    [33.5493, 92.3655], [62.7299, 92.2041]],
                   dtype=np.float32)

    if image_size[1] == 112:
        src[:, 0] += 8.0

    dst = landmark.astype(np.float32)

    t_form = SimilarityTransform()
    t_form.estimate(dst, src)

    #
    # Do align using landmark.
    #
    # The Image.transform method requires the inverted transformation matrix,
    # without the last row, and flattened
    #
    t_matrix = np.linalg.inv(t_form.params)[0:-1, :].flatten()

    warped = Image.fromarray(image).transform(
        size=(image_size[1], image_size[0]),
        method=Image.AFFINE,
        data=t_matrix,
        resample=Image.BILINEAR,
        fillcolor=0,
    )

    warped = np.array(warped)
    return warped.transpose([2, 0, 1])[::-1, ...]
Пример #14
0
def alignment(src_img, landmarks):
    ref_pts = [
        REF_LEFT_EYE, REF_RIGHT_EYE, REF_NOSE, REF_LEFT_MOUTH_CORNER,
        REF_RIGHT_MOUTH_CORNER
    ]
    crop_size = (TARGET_IMG_WIDTH, TARGET_IMG_HEIGHT)

    s = np.array(ref_pts).astype(np.float32)
    r = np.array(landmarks).astype(np.float32)

    tfm = SimilarityTransform()
    tfm.estimate(r, s)
    M = tfm.params[0:2, :]

    face_img = cv2.warpAffine(src_img, M, crop_size)

    return face_img
Пример #15
0
def test_similarity_estimation():
    # exact solution
    tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
    assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
    assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
    assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])

    # over-determined
    tform2 = estimate_transform('similarity', SRC, DST)
    assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
    assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
    assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])

    # via estimate method
    tform3 = SimilarityTransform()
    tform3.estimate(SRC, DST)
    assert_array_almost_equal(tform3._matrix, tform2._matrix)
Пример #16
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    # See gh-3926 for discussion details
    tform = ProjectiveTransform()
    for i in range(20):
        # Some random coordinates
        src = np.random.rand(4, 2) * 100
        dst = np.random.rand(4, 2) * 100

        # Degenerate the case by arranging points on a single line
        src[:, 1] = np.random.rand()
        # Prior to gh-3926, under the above circumstances,
        # a transform could be returned with nan values.
        assert (not tform.estimate(src, dst)
                or np.isfinite(tform.params).all())
Пример #17
0
def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1.0):

    std_pts = _standard_face_pts()  # [-1,1]
    target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0

    # print(target_pts)

    h, w, c = img.shape
    if normalize == True:
        landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
        landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0

    # print(landmark)

    affine = SimilarityTransform()

    affine.estimate(target_pts, landmark)

    return affine.params
Пример #18
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = EuclideanTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    # See gh-3926 for discussion details
    tform = ProjectiveTransform()
    for i in range(20):
        # Some random coordinates
        src = np.random.rand(4, 2) * 100
        dst = np.random.rand(4, 2) * 100

        # Degenerate the case by arranging points on a single line
        src[:, 1] = np.random.rand()
        # Prior to gh-3926, under the above circumstances,
        # a transform could be returned with nan values.
        assert(not tform.estimate(src, dst) or np.isfinite(tform.params).all())

    src = np.array([[0, 2, 0], [0, 2, 0], [0, 4, 0]])
    dst = np.array([[0, 1, 0], [0, 1, 0], [0, 3, 0]])
    tform = AffineTransform()
    assert not tform.estimate(src, dst)
    # Prior to gh-6207, the above would set the parameters as the identity.
    assert np.all(np.isnan(tform.params))

    # The tesselation on the following points produces one degenerate affine
    # warp within PiecewiseAffineTransform.
    src = np.asarray([
        [0, 192, 256], [0, 256, 256], [5, 0, 192], [5, 64, 0], [5, 64, 64],
        [5, 64, 256], [5, 192, 192], [5, 256, 256], [0, 192, 256],
    ])

    dst = np.asarray([
        [0, 142, 206], [0, 206, 206], [5, -50, 142], [5, 14, 0], [5, 14, 64],
        [5, 14, 206], [5, 142, 142], [5, 206, 206], [0, 142, 206],
    ])
    tform = PiecewiseAffineTransform()
    assert not tform.estimate(src, dst)
    assert np.all(np.isnan(tform.affines[4].params))  # degenerate affine
    for idx, affine in enumerate(tform.affines):
        if idx != 4:
            assert not np.all(np.isnan(affine.params))
    for affine in tform.inverse_affines:
        assert not np.all(np.isnan(affine.params))
Пример #19
0
    def transform_for_clip(self,
                           video_id,
                           dst_w=720,
                           dst_h=360,
                           points_random_shift=0):
        points = self.ruler_points[video_id]

        ruler_points = np.array([[points.x1, points.y1],
                                 [points.x2, points.y2]])
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])

        if points_random_shift > 0:
            img_points += np.random.uniform(-points_random_shift,
                                            points_random_shift, (2, 2))

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)

        return tform
Пример #20
0
def compute_transformation_matrix(img,
                                  landmark,
                                  normalize,
                                  target_face_scale=1.0,
                                  inverse=False):
    std_pts = _standard_face_pts()  # [-1,1]
    target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0

    h, w, c = img.shape
    if normalize:
        landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
        landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0

    affine = SimilarityTransform()
    if inverse:
        affine.estimate(landmark, target_pts)
    else:
        affine.estimate(target_pts, landmark)

    return affine
Пример #21
0
    def compute_transformation(points: np.ndarray,
                               reference: np.ndarray) -> np.ndarray:
        """Obtain a tranformation for aligning key points to
        reference positions

        Arguments
        ---------
        points:
            A sequence of points to be mapped onto the reference points,
            given as (x,y) coordinates
        reference:
            A sequence with the same number of points serving as reference
            points to which `points` should be moved.

        """
        transformation = SimilarityTransform()
        transformation.estimate(reference, points)
        # transform.params: 3x3 matrix, projective coordinates,
        # last row [0,0,1]
        return transformation
Пример #22
0
 def get_face(img, dst, target_size=(112, 112)):
     """
     :param img: image
     :param dst:
     :param target_size:
     :return:
     """
     src = np.array(
         [
             [38.2946, 51.6963],
             [73.5318, 51.5014],
             [56.0252, 71.7366],
             [41.5493, 92.3655],
             [70.7299, 92.2041],
         ],
         dtype=np.float32,
     )
     tform = SimilarityTransform()
     tform.estimate(dst, src)
     tmatrix = tform.params[0:2, :]
     return cv2.warpAffine(img, tmatrix, target_size, borderValue=0.0)
Пример #23
0
    def estimate_norm(lmk):
        assert lmk.shape == (5, 2)

        tform = SimilarityTransform()
        lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
        min_M = []
        min_index = []
        min_error = np.inf
        src = ARCFACE_SRC

        for i in np.arange(src.shape[0]):
            tform.estimate(lmk, src[i])
        M = tform.params[0:2, :]

        results = np.dot(M, lmk_tran.T)
        results = results.T
        error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2, axis=1)))

        if min_error > error:
            min_M = M
            min_index = i

        return min_M, min_index
 def align(self,
           image: np.ndarray) -> Tuple[List[Any], List[Any], List[Any]]:
     ret = self.model.detect_face(image, det_type=0)
     if ret is None:
         return [], [], []
     bounding_boxes, landmarks = ret
     if bounding_boxes.shape[0] == 0:
         return [], [], []
     reference_facial_points = np.array(
         [[30.29459953, 51.69630051], [65.53179932, 51.50139999],
          [48.02519989, 71.73660278], [33.54930115, 92.3655014],
          [62.72990036, 92.20410156]],
         dtype=np.float32)
     reference_facial_points[:, 0] += 8.
     transform = SimilarityTransform()
     faces = []
     for landmark in landmarks:
         tmp_landmark = np.array(landmark, dtype=np.float32).reshape(
             (2, 5)).T
         transform.estimate(tmp_landmark, reference_facial_points)
         M = transform.params[0:2, :]
         warped_face = cv2.warpAffine(image, M, (112, 112), borderValue=0.0)
         faces.append(warped_face)
     return bounding_boxes, landmarks, faces
Пример #25
0
def forceAlignment(sources, si, ti):

    #Select bright sources
    s = sources[sources.IMG == si][sources.MAG_BEST < -10][[
        "X_IMAGE", "Y_IMAGE"
    ]]
    t = sources[sources.IMG == ti][sources.MAG_BEST < -10][[
        "X_IMAGE", "Y_IMAGE"
    ]]

    #Match sources
    sa, ta = matchSources(s, t)

    tr = SimilarityTransform()
    status = tr.estimate(sa, ta)
    return tr, (sa, ta)
Пример #26
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))
Пример #27
0
def test_degenerate():
    src = dst = np.zeros((10, 2))

    tform = SimilarityTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = AffineTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))

    tform = ProjectiveTransform()
    tform.estimate(src, dst)
    assert np.all(np.isnan(tform.params))
Пример #28
0
def get_optical_flow(prevOutput, targetPoints, target_frame, prev_target_frame,
                     frame_no):
    p0 = np.asarray(targetPoints).astype(np.float32)[:, :, None]
    p0 = np.transpose(p0, (0, 2, 1))
    old_gray = cv2.cvtColor(prev_target_frame, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.cvtColor(target_frame, cv2.COLOR_BGR2GRAY)

    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    newOutput = np.copy(prevOutput)

    transform = SimilarityTransform()
    if transform.estimate(good_old, good_new):
        newOutput = transform_image(good_old, good_new, prevOutput,
                                    target_frame, frame_no)
    return newOutput, tuplify(good_new.tolist())
Пример #29
0
def get_transform_matrix(facial_pts,
                         reference_pts=None,
                         crop_size=(112, 112),
                         align_type='similarity'):
    """
    Function:
    ----------
        get affine transform matrix 'trans' to uv
    Parameters:
    ----------
        @facial_pts: could be
            1)a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        @reference_pts: could be
            1) a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        or
            3) None
            if None, use default reference facial points
        @crop_size: (w, h)
            output face image size
        @align_type: transform type, could be one of
            1) 'similarity': use similarity transform
            2) 'cv2_affine': use the first 3 points to do affine transform,
                    by calling cv2.getAffineTransform()
            3) 'affine': use all points to do affine transform
    Returns:
    ----------
        @tfm: transform matrix [2x3] for affine transformation
    """

    if reference_pts is None:
        reference_pts = get_reference_points(output_size=crop_size)

    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape
    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
        raise FaceWarpException(
            'reference_pts.shape must be (K,2) or (2,K) and K>2')

    if ref_pts_shp[0] == 2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
        raise FaceWarpException(
            'facial_pts.shape must be (K,2) or (2,K) and K>2')

    if src_pts_shp[0] == 2:
        src_pts = src_pts.T

#    #print('--->src_pts:\n', src_pts
#    #print('--->ref_pts\n', ref_pts

    if src_pts.shape != ref_pts.shape:
        raise FaceWarpException(
            'facial_pts and reference_pts must have the same shape')

    if align_type is 'cv2_affine':  # 3 points
        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
#        #print(('cv2.getAffineTransform() returns tfm=\n' + str(tfm))
    elif align_type is 'affine':
        tfm = get_affine_transform_matrix(src_pts, ref_pts)
#        #print(('get_affine_transform_matrix() returns tfm=\n' + str(tfm))
    elif align_type is 'similarity':  # all points
        tform = SimilarityTransform()
        tform.estimate(src_pts, ref_pts)
        tfm = tform.params[0:2, :]
    else:
        tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)

    return tfm
Пример #30
0
def applyGeometricTransformation(startXs, startYs, newXs, newYs, bbox):
  #find the number of faces and number of features on each face
  [numFeatures, numFaces] = startXs.shape

  #instantiate the outputs
  Xs = []
  Ys = []
  newbbox = np.zeros((numFaces,4,2))

  #loop over the number of faces
  for face in range(0,numFaces):
    #find the distances between the points
    #distances = np.linalg.norm((startXs[:,face] - newXs[:,face]) + (startYs[:,face] - newYs[:,face]))
    distances = ((startXs[:,face] - newXs[:,face])**2 + (startYs[:,face] - newYs[:,face])**2)**.5

    #set the maxDistance beyond which a feature is an outlier
    maxDistance = 4

    #Remove all the outlier points with distance between original and correspondance greater than maxDistance
    newXofFace= newXs[:,face]
    newXsWithoutOutliers = newXofFace[distances < maxDistance]
    newYofFace = newYs[:,face]
    newYsWithoutOutliers = newYofFace[distances < maxDistance]
    startXofFace = startXs[:,face]
    startXsWithoutOutliers = startXofFace[distances < maxDistance]
    startYofFace = startYs[:, face]
    startYsWithoutOutliers = startYofFace[distances < maxDistance]

    #get the current bounding box
    currentBbox = bbox[face, :, :]

    #find the similarity transform
    transform = SimilarityTransform()
    src = np.column_stack((startXsWithoutOutliers,startYsWithoutOutliers))
    dest = np.column_stack((newXsWithoutOutliers,newYsWithoutOutliers))
    transformationWorked = transform.estimate(src,dest)

    currentNewBbox = []

    #if the transformation was successful
    if (transformationWorked):
        #get the transformation matrix
        homoMatrix = transform.params
        #do the transform
        if (homoMatrix.shape==(3,3)):
            currentNewBbox = matrix_transform(currentBbox, homoMatrix)
    else:
        #set the old bbox to the current one
        currentNewBbox = currentBbox

    #add to newbbox
    newbbox[face,:,:] = currentNewBbox

    #HERE we need to modify Xs and Ys based on the newbbox that we just found
    #need to do
    #Xs = Xs[Xs > xstart and Xs < xend]
    #Ys = Ys[Xs > xstart and Xs < xend]
    #Xs = Xs[Ys > ystart and Ys < yend]
    #Ys = Ys[Ys > ystart and Ys < yend]
    #all 4 of these need to be done in order to delete Xs and Ys that are out of bounds, but I wasnt
    #sure how to access the start and end boundaries from bbox nor how to index into Xs and Ys if there are multiple faces

    #add the new Xs and Ys to final Xs and Ys
    #only once face or the first face
    if (len(Xs) ==0):
        Xs = newXsWithoutOutliers
    else: #multiple faces
        np.append(Xs,newXsWithoutOutliers)

    # only once face or the first face
    if (len(Ys) == 0):
        Ys = newYsWithoutOutliers
    else: #multiple faces
        np.append(Ys,newYsWithoutOutliers)

  return np.asarray(Xs), np.asarray(Ys), newbbox
Пример #31
0
  def per_sensor_analysis(self): # hardcoded Jungfrau 16M geometry
    for isensor in range(32):
      print ("Panel Sensor  <Δx>(μm)     <Δy>(μm)      Nrefl  RMS Δx(μm)  RMS Δy(μm) ")

      if len(self.cumCALC[isensor]) < 2: continue

      for ipanel in range(8*isensor, 8*(1+isensor)):
        if len(self.panel_deltax[ipanel])<2: continue
        Sx = flex.mean_and_variance(1000.*self.panel_deltax[ipanel])
        Sy = flex.mean_and_variance(1000.*self.panel_deltay[ipanel])
        RMSDx = 1000.*math.sqrt(flex.mean(self.panel_deltax[ipanel]*self.panel_deltax[ipanel]))
        RMSDy = 1000.*math.sqrt(flex.mean(self.panel_deltay[ipanel]*self.panel_deltay[ipanel]))
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(self.panel_deltax[ipanel])),
            "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )
      print("")
      cumD = (self.cumCALC[isensor]-self.cumOBS[isensor]).parts()
      print ( "All  %3d %7.2f        %7.2f        %6d"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("")

  # Now we'll do a linear least squares refinement over sensors:
  #Method 1. Simple rectilinear translation.
      if self.params.verbose:
        veclength = len(self.cumCALC[isensor])
        correction = flex.vec3_double( veclength, (flex.mean(cumD[0]), flex.mean(cumD[1]), flex.mean(cumD[2])) )

        new_delta = (self.cumCALC[isensor]-correction ) -self.cumOBS[isensor]
        for ipanel in range(8*isensor, 8*(1+isensor)):
          panel_delta = new_delta.select(self.cumPANNO[isensor]==ipanel)
          if len(panel_delta)<2: continue
          deltax_part, deltay_part = panel_delta.parts()[0:2]
          RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
          RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
          Sx = flex.mean_and_variance(1000.*deltax_part)
          Sy = flex.mean_and_variance(1000.*deltay_part)
          print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
          "    %5.1f   %5.1f"%(RMSDx,RMSDy),
          )
        print()
  # Method 2. Translation + rotation.
      src = []
      dst = []
      for icoord in range(len(self.cumCALC[isensor])):
        src.append(self.cumCALC[isensor][icoord][0:2])
        dst.append(self.cumOBS[isensor][icoord][0:2])
      src = np.array(src)
      dst = np.array(dst)

      # estimate affine transform model using all coordinates
      model = SimilarityTransform()
      model.estimate(src, dst)

      # robustly estimate affine transform model with RANSAC
      model_robust, inliers = ransac((src, dst), SimilarityTransform, min_samples=3,
                               residual_threshold=2, max_trials=10)
      outliers = flex.bool(inliers == False)

      # compare "true" and estimated transform parameters
      if self.params.verbose:
        print("Similarity transform:")
        print("%2d"%isensor, "Scale: %.5f,"%(model.scale),
        "Translation(μm): (%7.2f,"%(1000.*model.translation[0]),
        "%7.2f),"%(1000.*model.translation[1]),
        "Rotation (°): %7.4f"%((180./math.pi)*model.rotation))
      print("RANSAC:")
      print("%2d"%isensor, "Scale: %.5f,"%(model_robust.scale),
      "Translation(μm): (%7.2f,"%(1000.*model_robust.translation[0]),
      "%7.2f),"%(1000.*model_robust.translation[1]),
      "Rotation (°): %7.4f,"%((180./math.pi)*model_robust.rotation),
      "Outliers:",outliers.count(True)
      )
      """from documentation:
      X = a0 * x - b0 * y + a1 = s * x * cos(rotation) - s * y * sin(rotation) + a1
      Y = b0 * x + a0 * y + b1 = s * x * sin(rotation) + s * y * cos(rotation) + b1"""

      oldCALC = self.cumCALC[isensor].parts()

      ransacCALC = flex.vec3_double(
               (float(model_robust.scale) * oldCALC[0] * math.cos(model_robust.rotation) -
               float(model_robust.scale) * oldCALC[1] * math.sin(model_robust.rotation) +
               float(model_robust.translation[0])),
               (float(model_robust.scale) * oldCALC[0] * math.sin(model_robust.rotation) +
               float(model_robust.scale) * oldCALC[1] * math.cos(model_robust.rotation) +
               float(model_robust.translation[1])),
               oldCALC[2]
               )
      new_delta = ransacCALC - self.cumOBS[isensor]
      inlier_delta = new_delta.select(~outliers)
      inlier_panno = self.cumPANNO[isensor].select(~outliers)

      for ipanel in range(8*isensor, 8*(1+isensor)):
        panel_delta = inlier_delta.select(inlier_panno==ipanel)
        if len(panel_delta)<2: continue
        deltax_part, deltay_part = panel_delta.parts()[0:2]
        RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
        RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
        Sx = flex.mean_and_variance(1000.*deltax_part)
        Sy = flex.mean_and_variance(1000.*deltay_part)
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
        "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )

      if self.params.verbose:
        print("")
        cumD = (inlier_delta).parts()
        print ( "     %3d %7.2f        %7.2f        %6d\n"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("----\n")