Exemple #1
0
def cv2_estimateRigidTransform(from_pts, to_pts, full=False):
    """Estimate transforms in OpenCV 3 or OpenCV 4"""
    if not from_pts.shape[0] or not to_pts.shape[0]:
        return None

    if imutils.is_cv4():
        transform = cv2.estimateAffinePartial2D(from_pts, to_pts)[0]
    else:
        transform = cv2.estimateRigidTransform(from_pts, to_pts, full)

    return transform
Exemple #2
0
def align(img, landmark):
    image_size = (112, 112)
    src = np.array([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
                    [33.5493, 92.3655], [62.7299, 92.2041]],
                   dtype=np.float32)
    dst = landmark.astype(np.float32)
    M = cv2.estimateAffinePartial2D(dst, src)[0]
    warped = cv2.warpAffine(img,
                            M, (image_size[1], image_size[0]),
                            borderValue=0.0)
    return warped
Exemple #3
0
def rectify(image, endpoints):
    """ Rectifies an image such that the ruler(in endpoints) is flat
        image: array
               Represents an image or image mask
        endpoints: array
                   Represents 2 pair of endpoints for a ruler
    """
    dst = np.array([[image.shape[1] * .1, image.shape[0] / 2],
                    [image.shape[1] * .9, image.shape[0] / 2]])
    rt_matrix, _ = cv2.estimateAffinePartial2D(endpoints, dst)
    return cv2.warpAffine(image, rt_matrix, (image.shape[1], image.shape[0]))
 def get_initial_pos(thumbnail_fixed, thumbnail_float, thumbnail_down_rate):
     brisk = cv2.BRISK_create()
     (kps_fixed, descs_fixed) = brisk.detectAndCompute(np.array(thumbnail_fixed), None)
     (kps_float, descs_float) = brisk.detectAndCompute(np.array(thumbnail_float), None)
     if (descs_fixed is None) or (descs_float is None):
         reg_status = 0
         init_reg_offset = (0, 0)
         return init_reg_offset, reg_status
     if len(kps_fixed) < 3 or len(kps_float) < 3:
         reg_status = 0
         init_reg_offset = (0, 0)
         return init_reg_offset, reg_status
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
     try:
         matches = bf.match(descs_fixed, descs_float)
         matches = sorted(matches, key=lambda x: x.distance)
         if len(matches) < 3:  # less than 3 points
             reg_status = 0
             init_reg_offset = (0, 0)
             return init_reg_offset, reg_status
         if len(matches) <= 10:  # get first 10 points
             selected_matches = matches
         else:
             selected_matches = matches[0:10]
         selected_kps_fixed = []
         selected_kps_float = []
         for m in selected_matches:
             selected_kps_float.append(kps_float[m.trainIdx].pt)
             selected_kps_fixed.append(kps_fixed[m.queryIdx].pt)
         reprojThresh = 3
         confidence_ratio = 0.86
         (E, status) = cv2.estimateAffinePartial2D(np.float32(selected_kps_fixed), np.float32(selected_kps_float),
                                                   ransacReprojThreshold=reprojThresh, confidence=confidence_ratio)
         if 0 not in status:
             theta = - math.atan2(E[0, 1], E[0, 0]) * 180 / math.pi
             if abs(theta) > 1:
                 reg_status = 0
                 init_reg_offset = (0, 0)
             else:
                 reg_status = 1
                 init_reg_offset = (E[0, 2] * thumbnail_down_rate, E[1, 2] * thumbnail_down_rate)
         else:
             counts = np.count_nonzero(status == 0)
             if counts > 5:  # if over 50% fails
                 reg_status = 0
                 init_reg_offset = (0, 0)
             else:
                 init_reg_offset = (E[0, 2] * thumbnail_down_rate, E[1, 2] * thumbnail_down_rate)
                 reg_status = 1
         return init_reg_offset, reg_status
     except:
         reg_status = 0
         init_reg_offset = (0, 0)
         return init_reg_offset, reg_status
Exemple #5
0
def AlignImages_Affine(im1_path, im2_path, factor, MAX_FEATURES = 500, GOOD_MATCH_PERCENT = 0.1):

    im1 = Image.open(im1_path)
    im2 = Image.open(im2_path)

    im1array = np.array(im1)
    im2array = np.array(im2)


    # Detect ORB features and compute descriptors.
    imip_1 = cv2.resize(im1array, dsize = None, fx = factor, fy = factor, interpolation=cv2.INTER_CUBIC)
    imip_2 = cv2.resize(im2array, dsize = None, fx = factor, fy = factor, interpolation=cv2.INTER_CUBIC)
    
    orb = cv2.ORB_create(nfeatures = MAX_FEATURES, patchSize = 100)
    keypoints1, descriptors1 = orb.detectAndCompute(imip_1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(imip_2, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)
    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)
    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    good = matches[:numGoodMatches]

    # Draw top matches
    imMatches = cv2.drawMatches(imip_1, keypoints1, imip_2, keypoints2, good, None)
    # cv2.imwrite("matches.jpg", imMatches)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    h, mask= cv2.estimateAffinePartial2D(points2, points1, cv2.RANSAC)
    
    # correct the homography matrix with scale factor
    new_h = h
    new_h[0, 2] = h[0, 2]/factor 
    new_h[1, 2] = h[1, 2]/factor
    print(new_h)
    
    # Use homography
    height, width = im1array.shape
    # im2Reg = cv2.warpPerspective(im2array, new_h, (width, height))
    im2Reg = cv2.warpAffine(im2array, new_h, (width, height))

    return im1array, im2array, im2Reg, new_h, imMatches
def findAffine(src, dst, fullAffine=False):
    #print("src:", src)
    #print("dst:", dst)
    if len(src) >= affine_minpts:
        # affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
        affine, status = \
            cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
                                        np.array([dst]).astype(np.float32))
    else:
        affine = None
    #print str(affine)
    return affine
def track_frame(src, dst, pointsSrcInput): 
	pointsDstOutput = pointsSrcInput
	try:
		pointsDstOutput, st, err = cv2.calcOpticalFlowPyrLK(src, dst, pointsSrcInput, None, **lk_params)
		trs, inliers = cv2.estimateAffinePartial2D(pointsSrcInput, pointsDstOutput)
	except:
		trs = transforms_to_matrix([0,0,0])
		err = np.array([[100],[100]])
	if cv2.mean(err)[0] > 5:
		print(cv2.mean(err)[0])
		trs = transforms_to_matrix([0,0,0])
	return(trs, pointsDstOutput, err)
def partialaffine(from_data, to_data, target_data):
    """Compute the partial 2D affine transformation between the data sets via opencv"""
    # calculate an approximate affine
    affine_matrix, inliers = cv2.estimateAffinePartial2D(from_data, to_data, ransacReprojThreshold=1,
                                                         maxIters=20000, confidence=0.95,
                                                         refineIters=100, method=cv2.LMEDS)
    assert affine_matrix is not None, "Affine transform was not possible"
    # print('Percentage inliers used:' + str(np.sum(inliers)*100/from_data.shape[0]))
    # make the transformed data homogeneous for multiplication with the affine
    transformed_data = np.squeeze(cv2.convertPointsToHomogeneous(target_data))
    # apply the affine matrix
    return np.matmul(transformed_data, affine_matrix.T)
def track_frame(src, dst, pointsSrcInput, findFeatures): 
	if findFeatures:
		pointsSrcInput = find_features(src)
	pointsDstOutput = pointsSrcInput
	mtrx = np.array([[1, 0, 0], [0, 1, 0]], np.float64)
	try:
		pointsDstOutput, st, err = cv2.calcOpticalFlowPyrLK(src, dst, pointsSrcInput, None, **lk_params)
		mtrx, inliers = cv2.estimateAffinePartial2D(pointsSrcInput, pointsDstOutput)
	except:
		mtrx = np.array([[1, 0, 0], [0, 1, 0]], np.float64)
		err = np.array([[100],[100]])
	return(mtrx, pointsDstOutput, err)
Exemple #10
0
def detect_and_align_face(img):
    mtcnn = MTCNN(post_process=False, device="cpu")
    _, _, landmarks = mtcnn.detect(img, landmarks=True)
    assert len(landmarks), "no face detected"
    lmk = landmarks[0]
    # print(lmk)
    src = np.array([lmk[0], lmk[1]], dtype=np.float32)
    dst = np.array([(68, 112), (108, 112)], dtype=np.float32)
    M, _ = cv2.estimateAffinePartial2D(src, dst)
    return cv2.warpAffine(
        img, M, (178, 218), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE
    )
def face_algin_by_landmark(face_img: np.ndarray,
                           face_landmark: np.ndarray,
                           template: np.ndarray = TEMPLATE) -> np.ndarray:
    img_dim = face_img.shape[:2][::-1]
    M, _ = cv2.estimateAffinePartial2D(face_landmark, img_dim * template)
    warped_img = cv2.warpAffine(face_img, M, img_dim)
    h_ratio = img_dim[0]
    w_ratio = int(h_ratio * FACE_DIMENSION[0] / FACE_DIMENSION[1])
    resized = cv2.resize(
        warped_img[:,
                   int((h_ratio - w_ratio) / 2):int((h_ratio + w_ratio) / 2)],
        tuple(FACE_DIMENSION))
    return resized
Exemple #12
0
    def estimate_rigid_transform(self, image1_pts, image2_pts, use_full):
        if int(self.OPENCV_MAJOR) < 4: # new in opencv4
            affine = cv2.estimateRigidTransform(image1_pts, image2_pts, fullAffine=use_full)
        else:
            if use_full:
                # noinspection PyUnresolvedReferences
                # new implementation returns also a vector which indicated which points are inliers
                affine = cv2.estimateAffine2D(image1_pts, image2_pts)
            else:
                # noinspection PyUnresolvedReferences
                affine = cv2.estimateAffinePartial2D(image1_pts, image2_pts)

        return affine
Exemple #13
0
def align_crop(img,
               src_landmarks,
               mean_landmarks,
               crop_size=256,
               face_factor=0.7,
               landmark_factor=0.35):

    # move
    move = np.array([img.shape[1] // 2, img.shape[0] // 2])

    # pad border
    v_border = img.shape[0] - crop_size
    w_border = img.shape[1] - crop_size
    if v_border < 0:
        v_half = (-v_border + 1) // 2
        img = np.pad(img, ((v_half, v_half), (0, 0), (0, 0)), mode='edge')
        src_landmarks += np.array([0, v_half])
        move += np.array([0, v_half])
    if w_border < 0:
        w_half = (-w_border + 1) // 2
        img = np.pad(img, ((0, 0), (w_half, w_half), (0, 0)), mode='edge')
        src_landmarks += np.array([w_half, 0])
        move += np.array([w_half, 0])

    # estimate transform matrix
    mean_landmarks -= np.array([mean_landmarks[0, :] + mean_landmarks[1, :]
                                ]) / 2.0  # middle point of eyes as center
    trg_landmarks = mean_landmarks * (crop_size * face_factor *
                                      landmark_factor) + move
    tform = cv2.estimateAffinePartial2D(trg_landmarks,
                                        src_landmarks,
                                        ransacReprojThreshold=np.Inf)[0]

    # fix the translation to match the middle point of eyes
    trg_mid = (trg_landmarks[0, :] + trg_landmarks[1, :]) / 2.0
    src_mid = (src_landmarks[0, :] + src_landmarks[1, :]) / 2.0
    new_trg_mid = cv2.transform(np.array([[trg_mid]]), tform)[0, 0]
    tform[:, 2] += src_mid - new_trg_mid

    # warp image by given transform
    output_shape = (crop_size // 2 + move[1] + 1, crop_size // 2 + move[0] + 1)
    img_align = cv2.warpAffine(img,
                               tform,
                               output_shape[::-1],
                               flags=cv2.WARP_INVERSE_MAP + cv2.INTER_CUBIC,
                               borderMode=cv2.BORDER_REPLICATE)

    # crop
    img_crop = img_align[-crop_size:, -crop_size:]

    return img_crop
Exemple #14
0
def match_feature(templ,
                  haystack,
                  *,
                  min_match=10,
                  templ_mask=None,
                  haystack_mask=None,
                  limited_transform=False) -> FeatureMatchingResult:
    templ = np.asarray(templ.convert('L'))
    haystack = np.asarray(haystack.convert('L'))

    detector = cv.SIFT_create()
    kp1, des1 = detector.detectAndCompute(templ, templ_mask)
    kp2, des2 = detector.detectAndCompute(haystack, haystack_mask)

    # index_params = dict(algorithm=6,
    #                     table_number=6,
    #                     key_size=12,
    #                     multi_probe_level=2)
    index_params = dict(algorithm=0, trees=5)  # algorithm=FLANN_INDEX_KDTREE
    search_params = {}
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    good = []
    for group in matches:
        if len(group) >= 2 and group[0].distance < 0.75 * group[1].distance:
            good.append(group[0])

    result = FeatureMatchingResult(len(kp1), len(good))

    if len(good) >= min_match:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)

        if limited_transform:
            M, _ = cv.estimateAffinePartial2D(src_pts, dst_pts)
        else:
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 4.0)

        h, w = templ.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        if limited_transform:
            dst = cv.transform(pts, M)
        else:
            dst = cv.perspectiveTransform(pts, M)
        result.M = M
        result.template_corners = dst.reshape(-1, 2)
        # img2 = cv.polylines(haystack, [np.int32(dst)], True, 0, 2, cv.LINE_AA)
    return result
    def compute_transformation(points: np.ndarray,
                               reference: np.ndarray) -> np.ndarray:
        """Obtain a tranformation for aligning key points to
        reference positions

        Arguments
        ---------
        points:
            A sequence of points to be mapped onto the reference points,
            given as (x,y) coordinates
        reference:
            A sequence with the same number of points serving as reference
            points to which `points` should be moved.

        """

        # Obtain point and reference coordinates in a format suitable
        # for OpenCV, that can be numpy arrays of shape (N, 2), with
        # N being the number of points, and each point is described
        # by 2-dimensional cartesian coordinates (x, y).
        dst = points.reshape(1, -1, 2)
        src = reference.reshape(1, -1, 2)

        if src.shape != dst.shape:
            raise ValueError("Incompatible shapes: "
                             f"points {src.shape} vs. reference {dst.shape}")

        # fullAffine=False
        transformation, _ = cv2.estimateAffinePartial2D(dst, src)

        # fullAffine=True
        # transformation, _ = cv2.estimateAffine2D(dst, src)

        # There are also function to estimate a similarity transform
        # (not a general affine transformation):
        #  * opencv 3.2 and newer (including 4.x):
        #    cv2.estimateAffinePartial2D()
        #
        #  * before opencv 3.2:
        #    cv2.estimateRigidTransform()
        #    https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#
        #      ga762cbe5efd52cf078950196f3c616d48
        #    transformation = \
        #        cv2.estimateRigidTransform(dst, src, fullAffine=False)
        #
        #  M, inliers = cv2.estimateAffinePartial2D(pts1, pts2)
        #
        # There is also cv2.getAffineTransform in case there
        # are only three points, which allows to calculate an exact
        # solution.
        return transformation
Exemple #16
0
    def callback2(self, msg):
        timestamp = time()
        # to skip first frame
        if self.prev_gray == []:
            curr_img = self.bridge.imgmsg_to_cv2(
                msg, desired_encoding='passthrough')
            curr_gray = cv.cvtColor(curr_img, cv.COLOR_BGR2GRAY)
            self.prev_gray = curr_gray
        else:
            # Detect features to track
            prev_pts = cv.goodFeaturesToTrack(self.prev_gray,
                                              maxCorners=1000,
                                              qualityLevel=0.1,
                                              minDistance=30)
            # Get the current img
            curr_img = self.bridge.imgmsg_to_cv2(
                msg, desired_encoding='passthrough')
            # Convert to gray scales
            curr_gray = cv.cvtColor(curr_img, cv.COLOR_BGR2GRAY)
            # Track feature points
            curr_pts, status, err = cv.calcOpticalFlowPyrLK(
                self.prev_gray, curr_gray, prev_pts, None)
            # Sanity check
            assert prev_pts.shape == curr_pts.shape
            # Filter only valid points
            idx = np.where(status == 1)[0]
            prev_pts = prev_pts[idx]
            curr_pts = curr_pts[idx]

            # Save raw image
            cv.imwrite(
                '../raw/' + self.session_name + '/' + str(self.j).zfill(10) +
                '.jpg', curr_img)
            for i in curr_pts:
                x, y = i.ravel()
                cv.circle(curr_img, (x, y), 3, 255, -1)
            self.j += 1
            # Save annotated image
            cv.imwrite(
                '../temp/' + self.session_name + '/' + str(self.j).zfill(10) +
                '.jpg', curr_img)

            m, _ = cv.estimateAffinePartial2D(prev_pts, curr_pts)
            dx = m[0][2]
            dy = m[1][2]
            # Rotation angle
            da = np.arctan2(m[1][0], m[0][0])
            # Store transformation
            self.transforms.append([timestamp, dx, dy, da])

            self.prev_gray = curr_gray
def get_image_homography_SIFT(im1, im2, mask=None, filename=''):
    # Convert images to grayscale
    if len(im1.shape) == 3:
        im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
        im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    orb = True
    sift = False  #True
    # Detect features and compute descriptors.
    feature_detector = cv2.SIFT_create(nfeatures=MAX_FEATURES,
                                       nOctaveLayers=3,
                                       contrastThreshold=0.04,
                                       edgeThreshold=5,
                                       sigma=1.6)
    keypoints1, descriptors1 = feature_detector.detectAndCompute(im1, mask)
    keypoints2, descriptors2 = feature_detector.detectAndCompute(im2, mask)
    # Match features.
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(descriptors1, descriptors2, k=2)

    matcher = cv2.BFMatcher()
    matches = matcher.knnMatch(descriptors1, descriptors2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    points1 = np.float32([keypoints1[m.queryIdx].pt
                          for m in good]).reshape(-1, 1, 2)
    points2 = np.float32([keypoints2[m.trainIdx].pt
                          for m in good]).reshape(-1, 1, 2)

    # Find homography
    if len(points1) == 0 or len(points2) == 0:
        print("ERROR: no points found!!!")
    #print(points1, points2)
    #h, mask = cv2.findHomography(points1, points2, cv2.RANSAC, 5.0) # <- wtf is the 5.0 - some method??
    h, mask = cv2.estimateAffinePartial2D(points1, points2)  # cv2.RANSAC)

    if not homography_is_translation(h) and filename != '':
        print("ERROR: the homography found is no translation!", h)
        # Draw top matches
        #imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
        #cv2.imwrite(os.path.dirname(os.path.realpath(__file__)) + os.sep + filename + "_matches.tif", imMatches)

    return h, mask
def find_drift_poi(points1, points2):
    """ Find the best fit for the rotation and translation that map points1 to points2 using 
    the RANSAC algorithm (fitting robust to outliers).

    Args:
        points1 (numpy.ndarray, int): list of points (n x 2)
        points2 (numpy.ndarray, int): list of points (n x 2)
    Returns:
        array: x and y translation
    """
    # Find affine transformation matrix from points1 to points2 (i.e. Ax1 = x2)
    h, _ = cv2.estimateAffinePartial2D(points1,points2)
    # Extract x and y translation
    return np.array([h[0,2], h[1,2]])
Exemple #19
0
    def stabilize_frame(self, previousBwFrame, currentBwFrame):

        # Detect feature points in previous frame
        t0 = time.perf_counter()
        if not self.flagPts:
            self.prev_pts = cv2.goodFeaturesToTrack(
                previousBwFrame,
                maxCorners=self.maxGoodPoints,
                qualityLevel=0.01,
                minDistance=30,
                blockSize=3)
            self.flagPts = True
        t1 = time.perf_counter()

        t2 = time.perf_counter()
        # Calculate optical flow (i.e. track feature points) using the same pts, as they should not have moved after image stabilization.
        curr_pts, status, err = cv2.calcOpticalFlowPyrLK(
            previousBwFrame,
            currentBwFrame,
            self.prev_pts,
            None,
            maxLevel=self.maxLevel,
            winSize=(self.winSize, self.winSize))
        t3 = time.perf_counter()

        # Sanity check
        assert self.prev_pts.shape == curr_pts.shape
        #print(status)

        # Keep only pts for which the flow was found
        idx = np.where(status == 1)[0]
        self.prev_pts = self.prev_pts[idx]
        curr_pts = curr_pts[idx]
        if len(self.prev_pts) < self.maxGoodPoints / 2:
            self.flagPts = False  # We have lost too many points, need to redo

        #Find transformation matrix to go from curr_pts -> prev_pts
        t4 = time.perf_counter()
        m, _ = cv2.estimateAffinePartial2D(curr_pts, self.prev_pts)
        t5 = time.perf_counter()

        if m is not None:
            # stabilize the frame
            t6 = time.perf_counter()
            frameStabilized = cv2.warpAffine(
                currentBwFrame, m, (self.frameWidth, self.frameHeight))
            t7 = time.perf_counter()
            #frameStabilized = self.fixBorder(frameStabilized)
            self.timing.append([t1 - t0, t3 - t2, t5 - t4, t7 - t6])
            return m, frameStabilized
def findAffine(src, dst, fullAffine=False):
    #print("src:", src)
    #print("dst:", dst)
    if len(src) >= affine_minpts:
        # affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
        affine, status = \
            cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
                                        np.array([dst]).astype(np.float32))
    else:
        affine = None
        status = None
    print("num pts:", len(src), "used:", np.count_nonzero(status), "affine:\n", affine)
    #print str(affine)
    return affine, status
Exemple #21
0
def get_alignment_img(img, kp2, des2):

    height, width = img.shape[:2]
    # 対応点を探索
    apt1, apt2 = get_matcher(img, kp2, des2)

    # アフィン行列の推定
    mtx = cv2.estimateAffinePartial2D(apt1, apt2)[0]

    # アフィン変換
    if mtx is not None:
        return cv2.warpAffine(img, mtx, (width, height))
    else:
        return None
Exemple #22
0
    def align(self, image: np.ndarray, marks: np.ndarray):
        """Aligns a face image.

        Parameters
        ----------
        image : array_like
            The image face to be aligned.

        marks : array_like of shape = [5, 2]
            The landmark set of the input image face. Each entry in `marks`
            must contains the (x, y) locations of the face landmarks in the
            following order: (0) left eye center, (1) right eye center, (2)
            nose center, (3) mouth left corner and (4) mouth right corner.

        Returns
        -------
        aligned_image : array_like of shape = [out_size, out_size, 3]
            The resulting aligned face image. Alignment is made by computing
            the optimal limited affine transformation with 4 degrees of freedom
            between the set of input face landmarks and a set of reference
            points that represents a reference front-aligned face.
        nose_deviation : tuple of length = 2
            The nose deviation of the input face image. The nose deviation is a
            tuple where the first and second elements represents the horizontal
            and vertical deviation of nose landmark, respectively For a
            deviation lower than (0.4, 0.3), the face is approximately frontal
            in both planes.
        """

        if marks.dtype != np.float32:
            marks = np.float32(marks)

        tfm, _ = cv.estimateAffinePartial2D(marks,
                                            self._reference.copy(),
                                            method=cv.LMEDS)
        aligned_image = cv.warpAffine(image, tfm,
                                      (self.out_size, self.out_size))
        aligned_marks = (np.matmul(tfm[:, 0:2], marks.T) + tfm[:, 2].reshape(
            (-1, 1))).T

        face_center = np.mean(aligned_marks[[0, 1, 3, 4]], axis=0)
        eye_dist = abs(aligned_marks[0][0] - aligned_marks[1][0])
        nose_x, nose_y = aligned_marks[2]
        nose_deviation = (
            abs(face_center[0] - nose_x) / eye_dist,
            abs(face_center[1] - nose_y) / eye_dist,
        )

        return aligned_image, nose_deviation
    def PerformFeatureMatching(grayBlur1, grayBlur2):
        # create orb object
        orb = cv.ORB_create()
        try:
            # find key points and descriptors
            kp1, des1 = orb.detectAndCompute(grayBlur1, None)
            kp2, des2 = orb.detectAndCompute(grayBlur2, None)
            # initialise brute force matcher and match descriptors
            matcher = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

            rawMatches = matcher.match(des1, des2)
            # sort matches based on distance
            dmatches = sorted(rawMatches, key=lambda x: x.distance)
            # draw matches
            matches = cv.drawMatches(grayBlur1,
                                     kp1,
                                     grayBlur2,
                                     kp2,
                                     dmatches[:10],
                                     None,
                                     flags=2)
            rsmatches = cv.resize(matches, (1280, 720))

            cv.imshow('matches', rsmatches)
            # find key point matrixes
            p1 = np.float32([kp1[m.queryIdx].pt
                             for m in dmatches]).reshape(-1, 1, 2)
            p2 = np.float32([kp2[m.trainIdx].pt
                             for m in dmatches]).reshape(-1, 1, 2)
            # if key point matrixes are not null fetch and estimate affine transformation matrix
            if (p1.size != 0 and p2.size != 0):
                H, _ = cv.estimateAffinePartial2D(p2,
                                                  p1,
                                                  cv.RANSAC,
                                                  ransacReprojThreshold=5.0)
                # slice to find translation matrix only
                H2 = H[:, 2]
                M = np.float32([[1, 0, H2[0]], [0, 1, H2[1]]])
                # return translation matrix
                return M
            else:
                print(
                    "Please check that the input video exists within the project."
                )
                return None
        except:
            print(
                "Please check that the input video exists within the project.")
            return None
Exemple #24
0
    def _getAffine(self, old_gray, frame_gray, p0):
        # === calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **self.lk_params)

        # Select good points
        good_old = p0[st == 1].reshape((-1, 1, 2))
        good_new = p1[st == 1].reshape((-1, 1, 2))
        nCor = len(p1)
        self.corDetected.append(nCor)
        self.qLvlList.append(self.qLvl)
        self.pid_qLvl()  #pid
        retval, inliers = cv2.estimateAffinePartial2D(good_new, good_old)

        return retval
Exemple #25
0
    def add(self, image, mask, name, frame_idx):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        key_points, descriptors = self.orb.detectAndCompute(gray, mask)
        if self.key_points is not None:
            matches = self.matcher.match(self.desc, descriptors, None)

            matches.sort(key=lambda x: x.distance, reverse=False)
            count = int(len(matches) * GOOD_MATCH_PERCENT)
            matches = matches[:count]

            # matches_image = cv2.drawMatches(self.image, self.key_points, image, key_points, matches, None)
            # cv2.imshow("matches", matches_image)

            points1 = np.zeros((len(matches), 2), dtype=np.float32)
            points2 = np.zeros((len(matches), 2), dtype=np.float32)
            for i, match in enumerate(matches):
                points1[i, :] = self.key_points[match.queryIdx].pt
                points2[i, :] = key_points[match.trainIdx].pt
            # mat = cv2.estimateRigidTransform(points2, points1, False)
            h, m = cv2.estimateAffinePartial2D(points2, points1)
            # h, m = cv2.findHomography(points2, points1, cv2.RANSAC)
            self.mat = h if self.mat is None else self.mul_2d(h, self.mat)
            with open(self.file_name, 'a', newline='') as file:
                writer = csv.writer(file)
                writer.writerow([name, frame_idx, ' '.join(map(str, h.flatten().tolist()))])
            if self.with_gui:
                height, width, channels = image.shape
                image = np.copy(image)
                # image[~mask.astype(bool)] = 0
                mask[0] = 0
                mask[-1] = 0
                mask[:, 0] = 0
                mask[:, -1] = 0
                mask = cv2.warpAffine(mask, self.mat, (width, height * 2)).astype(bool)
                res = cv2.warpAffine(image, self.mat, (width, height * 2))  # ,flags=cv2.WARP_INVERSE_MAP)
                # mask = cv2.warpPerspective(mask, self.mat, (width, height * 2)).astype(bool)
                # res = cv2.warpPerspective(image, self.mat, (width, height * 2))  # ,flags=cv2.WARP_INVERSE_MAP)
                self.image[mask] = res[mask]
                # cv2.addWeighted(self.image, 0.5, res, 0.5, 0.0)
                cv2.imshow('res', self.image)
        else:
            height, width, channels = image.shape
            self.image = np.zeros((height * 2, width, channels), dtype=image.dtype)
            self.image[:height] = image
            self.image[:height][~mask.astype(bool)] = 0

        self.key_points = key_points
        self.desc = descriptors
Exemple #26
0
    def matchMaps(feat1,
                  feat2,
                  cent1,
                  cent2,
                  dir1,
                  dir2,
                  shape1,
                  lowes_ratio=0.85,
                  threshold=200):

        (h, w) = shape1

        src_pts_list, dst_pts_list = [], []
        scale_ratio_vector, rotation_vector = [], []

        # Match keypoints
        for i in range(4):
            for j in range(4):
                src_pts, dst_pts, dir1_, dir2_ = matchKeypoints(
                    feat1[i], feat2[j], cent1, cent2, dir1[i].copy(),
                    dir2[j].copy(), lowes_ratio)

                src_pts_list.append(src_pts)
                dst_pts_list.append(dst_pts)
                scale_ratio_vector.append(
                    (np.ones(len(src_pts)) * i - j).astype('int8'))
                rotation_vector.append(dir1_ - dir2_)

        # Merge both and compute transformation
        src_pts = np.concatenate(src_pts_list)
        dst_pts = np.concatenate(dst_pts_list)

        M, mask = cv2.estimateAffinePartial2D(src_pts,
                                              dst_pts,
                                              cv2.RANSAC,
                                              ransacReprojThreshold=threshold)

        mask = np.ravel(mask)

        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = (pts @ M[:, :2]) + M[:, 2:3].transpose()

        coherence_score, scale_coherence, rot_coherence, regularizer, rotation_vector, angle = computeMatchScore(
            rotation_vector, scale_ratio_vector, mask, dst, h, w,
            feat1.shape[-1])

        return coherence_score, scale_coherence, rot_coherence, regularizer, dst, M, src_pts, dst_pts, rotation_vector, angle
def alignImages(im1, im2):
    # Convert images to grayscale
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                None)
    cv2.imwrite("matches.jpg", imMatches)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    # h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
    # M = cv2.getAffineTransform(points1, points2)    # 点对数为3
    M = cv2.estimateAffinePartial2D(points1, points2)

    # Use homography
    height, width, channels = im2.shape
    # im1Reg = cv2.warpPerspective(im1, h, (width, height))
    im1Reg = cv2.warpAffine(im1, M[0], (width, height))

    # return im1Reg, h
    return im1Reg, M
Exemple #28
0
def similarity_transform(inPoints, outPoints):
    """ similarity_transform takes in a set of input points
    and a set of output points and finds an affine transformation
    between the two. cv2.estimateAffinePartial2D requires
    a two sets of three coordinates in order to find an
    affine transformation between them. similarity_transform will
    take two sets of two coordinates and find the corresponding
    third coordinate by finding a point that forms an equilateral
    triangle with the original two points.

    For more information look at:
        https://docs.opencv.org/3.4/d4/d61/tutorial_warp_affine.html

    Source:
        https://github.com/spmallick/learnopencv/tree/master/FaceAverage

    **Parameters**

    inPoints: list
        A list of tuples, in this case corresponding to specific
        image coordinate pairs
    outPoints: list
        A list of tuples, in this case corresponding to specific
        image coordinate pairs

    **Returns**

    tform[0]: array
        A 2 x 3 matrix corresponding to the optimal affine
        transformation between the two sets of points.
    """
    s60 = math.sin(60 * math.pi / 180)
    c60 = math.cos(60 * math.pi / 180)
    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()
    xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][0]
    yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][1]
    inPts.append([np.int(xin), np.int(yin)])
    xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][0]
    yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][1]
    outPts.append([np.int(xout), np.int(yout)])
    tform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))
    return tform[0]
Exemple #29
0
def getHomography(matches, kp1, kp2):
    points1 = np.zeros(
        (len(matches), 2),
        dtype=np.float32)  #Prints empty array of size equal to (matches, 2)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = kp1[
            match.
            queryIdx].pt  #gives index of the descriptor in the list of query descriptors
        points2[i, :] = kp2[
            match.
            trainIdx].pt  #gives index of the descriptor in the list of train descriptors

    #h, mask = cv2.findHomography(points2, points1, cv2.RANSAC)
    h, mask = cv2.estimateAffinePartial2D(points2, points1)
    return h
def get_image_homography_ORB(im1, im2, mask=None, filename=''):
    # Convert images to grayscale
    if len(im1.shape) == 3:
        im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
        im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # Detect features and compute descriptors.
    #feature_detector = cv2.SURF_create()
    feature_detector = cv2.ORB_create(nfeatures=MAX_FEATURES)
    keypoints1, descriptors1 = feature_detector.detectAndCompute(im1, mask)
    keypoints2, descriptors2 = feature_detector.detectAndCompute(im2, mask)
    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score

    matches = sorted(matches, key=lambda x: x.distance, reverse=False)
    #matches.sort(key=lambda x: x.distance, reverse=False)
    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    if len(points1) == 0 or len(points2) == 0:
        print("ERROR: no points found!!!")
    #h, mask = cv2.findHomography(points1, points2, cv2.RANSAC, 5.0) # <- wtf is the 5.0 - some method??
    h, mask = cv2.estimateAffinePartial2D(points1, points2)  # cv2.RANSAC)

    if not homography_is_translation(h) and filename != '':
        print("ERROR: the homography found is no translation!", h)
        # Draw top matches
        #imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
        #cv2.imwrite(os.path.dirname(os.path.realpath(__file__)) + os.sep + filename + "_matches.tif", imMatches)

    return h, mask
Exemple #31
0
def similarityTransform(inPoints, outPoints) :
    s60 = math.sin(60*math.pi/180)
    c60 = math.cos(60*math.pi/180)  
  
    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()
    
    xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]
    yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]
    
    inPts.append([np.int(xin), np.int(yin)])

    xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
    yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]
    
    outPts.append([np.int(xout), np.int(yout)])
    
    tform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))
    
    return tform[0]