Пример #1
0
def generate_affine_transform(video='12'):
    if video == '12':
        gps, pixels = cam.video_12()
    elif video == '13':
        gps, pixels = cam.video_13()
    else:
        gps, pixels = cam.vid_both()

    gps_to_vid = cv2.estimateAffine2D(gps, pixels)
    vid_to_gps = cv2.estimateAffine2D(pixels, gps)

    return gps_to_vid[0], vid_to_gps[0]
Пример #2
0
    def compute_homography(self, physical_object_image, cropped_image):
        # If the image of the physical object is too small,
        # scale it up to improve feature detection
        physical_object_image, cropped_image = self.checkAndScaleImages(
            physical_object_image, cropped_image)
        # I tried SIFT, SURF, and ORB too. AKAZE gives the best and fastest result.
        physicalObjectKeyPoints, physicalObjectDescriptors, croppedImageKeyPoints, croppedImageDescriptors \
            = self.extract_feature_points(physical_object_image, cropped_image, algorithm='AKAZE')
        # I also tried BRUTE_FORCE_L1, BRUTE_FORCE_HAMMING with ratio test, and FLANN.
        matches = self.find_matches(physicalObjectDescriptors,
                                    croppedImageDescriptors,
                                    algorithm='BRUTE_FORCE_HAMMING',
                                    ratio_test=False)
        if debug:
            # Draw top matches
            im_matches = cv2.drawMatches(physical_object_image,
                                         physicalObjectKeyPoints,
                                         cropped_image, croppedImageKeyPoints,
                                         matches, None)
            cv2.imwrite(str(os.path.join(temp_folder_path, "matches.jpg")),
                        im_matches)
        points1, points2 = self.find_points_from_matches(
            physicalObjectKeyPoints, croppedImageKeyPoints, matches)
        try:
            h1, mask = cv2.estimateAffine2D(
                points1,
                points2,
                method=cv2.RANSAC,
                ransacReprojThreshold=self.ransac_reprojection_threshold)
        except:
            h1 = DEFAULT_HOMOGRAPHY

        homography_result = PoseEstimationOutput(None, h1, None)

        if self.recompute_homography_using_only_inliers:
            # get the inlier matches (list of tuples of points)
            # run find homography again unsing only the inliers this time instead of LEMDS or RHO instead of RANSAC
            boolean_inliers_mask = (mask > 0)
            inlier_points1 = points1[boolean_inliers_mask.repeat(
                2, axis=1)].reshape((-1, 2))
            inlier_points2 = points2[boolean_inliers_mask.repeat(
                2, axis=1)].reshape((-1, 2))
            # h2, mask = cv2.findHomography(inlier_points1, inlier_points2, cv2.LMEDS)
            h2, mask = cv2.estimateAffine2D(inlier_points1, inlier_points2,
                                            cv2.LMEDS)
            # prosac_reprojection_error = 2.5
            # h2, mask = cv2.findHomography(inlier_points1, inlier_points2, cv2.RHO, prosac_reprojection_error)
            if h2 is not None:
                homography_result.homography = h2

        logger.debug("Homography: %s", homography_result.homography)
        return homography_result
    def transferExpression(self, lmarkSeq, meanShape):
        exptransSeq = copy.deepcopy(lmarkSeq)
        firstFlmark = exptransSeq[0, :, :]
        indexes = np.array([60, 64, 62, 67])

        tformMS, _ = cv2.estimateAffine2D(firstFlmark[:, :],
                                          np.float32(meanShape[:, :]))

        sx = np.sign(tformMS[0,
                             0]) * np.sqrt(tformMS[0, 0]**2 + tformMS[0, 1]**2)
        sy = np.sign(tformMS[1,
                             0]) * np.sqrt(tformMS[1, 0]**2 + tformMS[1, 1]**2)
        print(sx, sy)
        prevLmark = copy.deepcopy(firstFlmark)
        prevExpTransFlmark = copy.deepcopy(meanShape)

        zeroVecD = np.zeros((1, 68, 2))
        diff = np.cumsum(np.insert(np.diff(exptransSeq, n=1, axis=0),
                                   0,
                                   zeroVecD,
                                   axis=0),
                         axis=0)
        msSeq = np.tile(np.reshape(meanShape, (1, 68, 2)),
                        [lmarkSeq.shape[0], 1, 1])

        diff[:, :, 0] = abs(sx) * diff[:, :, 0]
        diff[:, :, 1] = abs(sy) * diff[:, :, 1]

        exptransSeq = diff + msSeq

        return exptransSeq
Пример #4
0
def sample_rotated_patch(img, pos, angle, patch_size):
    rotate = np.asarray([
        [np.cos(angle), -np.sin(angle)],
        [np.sin(angle), np.cos(angle)],
    ])
    _dst = np.asarray([[-patch_size // 2, -patch_size // 2],
                       [-patch_size // 2, patch_size // 2],
                       [patch_size // 2, patch_size // 2],
                       [patch_size // 2, -patch_size // 2]],
                      dtype=np.float32)

    src = np.asarray([[pos[0], pos[1]], [pos[0], pos[1] + patch_size],
                      [pos[0] + patch_size, pos[1] + patch_size],
                      [pos[0] + patch_size, pos[1]]])

    tran = src[0] - _dst[0]

    src = np.asarray([rotate.dot(_dst[i]) + tran for i in range(src.shape[0])],
                     dtype=np.float32)

    dst = np.asarray([[0, 0], [0, patch_size - 1],
                      [patch_size - 1, patch_size - 1], [patch_size - 1, 0]],
                     dtype=np.float32)

    theta, _ = cv2.estimateAffine2D(src, dst)

    patch = cv2.warpAffine(img, theta, (patch_size, patch_size))

    return patch
def findAffine(image_1_kp, image_2_kp, matches):
    image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
    image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
    # WRITE YOUR CODE HERE.

    # initiate two emtpy lists
    image_1_pts, image_2_pts = [], []

    for match in matches:
        # Get the matching keypoints for each of the images
        pt1 = image_1_kp[match.queryIdx].pt
        pt2 = image_2_kp[match.trainIdx].pt

        image_1_pts.append(pt1)
        image_2_pts.append(pt2)

    image_1_points = np.float64(image_1_pts).reshape(-1, 1, 2)
    image_2_points = np.float64(image_2_pts).reshape(-1, 1, 2)

    # Compute Affine
    M, _ = cv2.estimateAffine2D(image_1_points, image_2_points, cv2.RANSAC)

    M_aff = np.eye(3)

    M_aff[:2,:] = M

    return M_aff
Пример #6
0
def similarityTransform(inPoints, outPoints):
    s60 = math.sin(60 * math.pi / 180)
    c60 = math.cos(60 * math.pi / 180)

    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()

    # The third point is calculated so that the three points make an equilateral triangle
    xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][0]
    yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][1]

    inPts.append([np.int(xin), np.int(yin)])

    xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][0]
    yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][1]

    outPts.append([np.int(xout), np.int(yout)])

    # Now we can use estimateRigidTransform for calculating the similarity transform.
    #tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False) # This fxn is available in cv2 version 3.4 not in version 4 onward
    #return tform

    # This is for cv2 version 4 onward
    tform = cv2.estimateAffine2D(np.array([inPts]), np.array([outPts]), False)
    return tform[0]
Пример #7
0
def find_affine_w2s_random(dataset_name):
    with open("./results/%s_with_spline_clusters.vd" % (dataset_name),
              'rb') as f:
        vd = pkl.load(f)
    plane = vd.plane
    rt_c2w = find_rt_c2w(vd)

    landmark_sate_path = "../visual_opengl/%s/bird_view.txt" % (dataset_name)
    with open(landmark_sate_path, 'r') as f:
        lines = f.readlines()
    landmark_sate = []
    for line in lines:
        line_split = line.split(",")
        landmark_2d = [float(line_split[0]), float(line_split[1])]
        landmark_sate.append(landmark_2d)
    landmark_sate = np.array(landmark_sate)

    # landmark in cam coordinate
    landmark_perspective_path = "../visual_opengl/%s/cam_view.pklcor" % (
        dataset_name)
    with open(landmark_perspective_path, 'rb') as f:
        landmark_perspective = pkl.load(f)
    offset = np.random.randint(-40, 40, tuple(
        landmark_perspective.shape))  # best for IMG_0121
    landmark_perspective += offset
    landmark_cam = find_landmark_cam(plane, landmark_perspective,
                                     focal_dict[dataset_name])
    landmark_world = cart((rt_c2w @ ((h**o(landmark_cam)).T)).T)

    affine_w2s, _ = cv2.estimateAffine2D(landmark_world[:, 0:2], landmark_sate)
    print(affine_w2s)
    return affine_w2s
def compute_timewise_homographies(frames, features, outputMatches=False):
    print("finding matches between frames...")
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
    timewise_homographies = []
    for i in range(len(frames) - 1):
        matches = bf.match(features[i][1], features[i + 1][1])
        if outputMatches:
            img3 = cv.drawMatches(frames[i],
                                  features[i][0],
                                  frames[i + 1],
                                  features[i + 1][0],
                                  matches,
                                  None,
                                  flags=2)
            cv.imwrite(
                'data/matches/matches_' + str(i) + "-" + str(i + 1) + ".jpg",
                img3)
        src_pts = np.float32([features[i][0][m.queryIdx].pt
                              for m in matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([
            features[i + 1][0][m.trainIdx].pt for m in matches
        ]).reshape(-1, 1, 2)
        M, _ = cv.estimateAffine2D(src_pts, dst_pts, method=cv.RANSAC)
        if M is None:
            return timewise_homographies, i
        H = np.append(M, np.array([0, 0, 1]).reshape((1, 3)), axis=0)
        timewise_homographies.append(H)
    return timewise_homographies, len(frames) - 1
Пример #9
0
def solveQ3Part1():
    src = cv.imread("Q3/Dylan.jpg")
    target = cv.imread("Q3/frames.jpg")
    srcTri = np.array([[0, 0], [640, 0], [640, 480]]).astype(np.float32)

    ######################Affine transform#####################################
    dstTriAffin = np.array([[551, 220], [844, 66], [901,
                                                    299]]).astype(np.float32)
    warp_mat = cv.estimateAffine2D(srcTri, dstTriAffin)[0]
    warp_dst_affin = cv.warpAffine(src, warp_mat,
                                   (target.shape[1], target.shape[0]))
    ###########################################################################

    ######################Proj transform#####################################
    srcTri = np.array([[0, 0], [640, 0], [640, 480], [0,
                                                      480]]).astype(np.float32)
    dstTriProj = np.array([[195, 55], [495, 159], [431, 498],
                           [37, 182]]).astype(np.float32)
    warp_mat_proj = cv.getPerspectiveTransform(srcTri, dstTriProj)
    warp_dst_affin_proj = cv.warpPerspective(
        src, warp_mat_proj, (target.shape[1], target.shape[0]))

    cv.imwrite('warpedImageOverFrames.jpg',
               target + warp_dst_affin + warp_dst_affin_proj)
    cv.imwrite('warpedImageOverBlackBackground.jpg',
               warp_dst_affin + warp_dst_affin_proj)

    plt.imshow(warp_dst_affin + warp_dst_affin_proj)
    plt.show()
    plt.imshow(target + warp_dst_affin + warp_dst_affin_proj)
    plt.show()
    print(
        "Results saved in warpedImageOverBlackBackground.jpg and warpedImageOverFrames.jpg\n"
    )
Пример #10
0
 def to_refine(self, image, pts, scale=3.0):
     """
     refine the image by input points.
     :param image_rgb: input image
     :param pts: points
     """
     x1, y1, x2, y2, x3, y3, x4, y4 = pts.ravel()
     cx, cy = int(128 // 2), int(48 // 2)
     cw = 64
     ch = 24
     tx1 = cx - cw // 2
     ty1 = cy - ch // 2
     tx2 = cx + cw // 2
     ty2 = cy - ch // 2
     tx3 = cx + cw // 2
     ty3 = cy + ch // 2
     tx4 = cx - cw // 2
     ty4 = cy + ch // 2
     target_pts = np.array([[tx1, ty1], [tx2, ty2], [tx3, ty3], [tx4, ty4]
                            ]).astype(np.float32) * scale
     org_pts = np.array([[x1, y1], [x2, y2], [x3, y3],
                         [x4, y4]]).astype(np.float32)
     # mat_ = cv2.estimateRigidTransform(org_pts, target_pts, True)
     mat_ = cv2.estimateAffine2D(org_pts, target_pts)[0]
     dsize = (int(120 * scale), int(48 * scale))
     warped = cv2.warpAffine(image, mat_, dsize)
     return warped
Пример #11
0
def getTransform(src, dst, method='affine'):
    pts1, pts2 = feature_matching(src, dst)

    # x, y = zip(*pts1)
    # plt.subplot(121)
    # plt.scatter(x, y, 1, c='r', marker='x', lw=4)
    # plt.imshow(img1, cmap='gray')
    # x, y = zip(*pts2)
    # plt.subplot(122)
    # plt.scatter(x,y, 1, c='r', marker='x', lw=4)
    # plt.imshow(img2, cmap='gray')
    # plt.show()

    src_pts = np.float32(pts1).reshape(-1, 1, 2)
    dst_pts = np.float32(pts2).reshape(-1, 1, 2)

    if method == 'affine':
        M, mask = cv2.estimateAffine2D(src_pts,
                                       dst_pts,
                                       cv2.RANSAC,
                                       ransacReprojThreshold=5.0)
        #M = np.append(M, [[0,0,1]], axis=0)

    if method == 'homography':
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    matchesMask = mask.ravel().tolist()

    return (M, pts1, pts2, mask)
Пример #12
0
def CorrectImage(img, result_dict, boundboxes, width, height):

    # 获取对应基准点坐标对,计算最优仿射变换矩阵
    points_from = []
    points_to = []
    for k, v in boundboxes.items():
        if k[:3] != 'key':
            continue
        a = process.extractOne(k[3:],
                               result_dict.keys(),
                               scorer=fuzz.ratio,
                               score_cutoff=90)
        if a:
            points_from.append([result_dict[a[0]][0], result_dict[a[0]][1]])
            points_to.append([v[0], v[1]])
            # print(points_from)
            # print(points_to)
    affine_transform_matrix = cv2.estimateAffine2D(np.array(points_from),
                                                   np.array(points_to))
    # print(affine_transform_matrix[0])
    dstImg = cv2.warpAffine(img,
                            affine_transform_matrix[0], (width, height),
                            borderMode=cv2.INTER_LINEAR,
                            borderValue=cv2.BORDER_REPLICATE)
    return dstImg
Пример #13
0
    def __get_homography(self,
                         matches,
                         dst_kps,
                         src_kps,
                         reproj_thresh,
                         is_affine=False):
        """Find homography from one photo to another

        :param matches: matches between both photos
        :param dst_kps: Destination frame of reference key-points
        :param src_kps: Source frame of reference key-points
        :param reproj_thresh: RANSAC threshold
        :param is_affine: Boolean to find an affine transforamtion instead of homography
        :return: (Homography/Affine matrix, status)
        """

        kps_new = np.float32([kp.pt for kp in src_kps])
        kps_panorama = np.float32([kp.pt for kp in dst_kps])

        if len(matches) > 4:
            # construct the two sets of points
            pts_new = np.float32([kps_new[m.queryIdx] for m in matches])
            pts_panorama = np.float32(
                [kps_panorama[m.trainIdx] for m in matches])

            # estimate the homography between the sets of points
            if is_affine:
                return cv2.estimateAffine2D(pts_new, pts_panorama, cv2.RANSAC,
                                            reproj_thresh)
            else:
                return cv2.findHomography(pts_new, pts_panorama, cv2.RANSAC,
                                          reproj_thresh)
        else:
            return None
def translateImage(im1,im2):
    try:
        im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
        im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
        
        orb = cv2.ORB_create(MAX_FEATURES)
        keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
        keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
        
        matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
        matches = matcher.match(descriptors1, descriptors2, None)
        
        matches.sort(key=lambda x: x.distance, reverse=False)
        
        numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
        matches = matches[:numGoodMatches]
        
        points1 = np.zeros((len(matches), 2), dtype=np.float32)
        points2 = np.zeros((len(matches), 2), dtype=np.float32)
        for i, match in enumerate(matches):
            points1[i, :] = keypoints1[match.queryIdx].pt
            points2[i, :] = keypoints2[match.trainIdx].pt
      
        h,_ = cv2.estimateAffine2D(points1,points2)
        h[0][0]=1
        h[0][1]=0
        h[1][0]=0
        h[1][1]=1
        
        height, width, _ = im2.shape
        im1Reg = cv2.warpAffine(im1,h,(width,height),borderValue=(255,255,255))
        return im1Reg, h
    except:
        h="error"
        return im1 , None
Пример #15
0
def get_rating_of_affine_transform(kp1, kp2):
    a = np.asarray([p.pt for p in kp1])
    b = np.asarray([p.pt for p in kp2])
    retval, inliers = cv2.estimateAffine2D(a, b, None)
    assert len(inliers) == len(kp1)
    inlier_ratio = np.count_nonzero(inliers) / len(inliers)
    return inlier_ratio
Пример #16
0
    def compute_transform_matrix(self, im_prev, im_next):
        '''
        Computes the transformation between two images

        Arguments:

        - im_prev: np array of shape self.img_shape and no color channels
        - im_next: np array of shape self.img_shape and no color channels

        Returns:

        - The affine transformation matrix, np array of shape (2,3)
        '''
        prev_pts = cv2.goodFeaturesToTrack(im_prev,
                                           maxCorners=200,
                                           qualityLevel=0.01,
                                           minDistance=30,
                                           blockSize=3,
                                           mask=self.features_mask)
        next_pts, status, err = cv2.calcOpticalFlowPyrLK(
            im_prev, im_next, prev_pts, None)
        idx = np.where(status == 1)[0]
        prev_pts = prev_pts[idx]
        next_pts = next_pts[idx]
        m = cv2.estimateAffine2D(prev_pts, next_pts)
        return m[0]
Пример #17
0
def warp_image(image,
               points_in_image,
               points_in_baseline,
               transformation_type='affine'):
    if transformation_type == 'homographic':
        h, _ = cv2.findHomography(np.float32(points_in_image),
                                  np.float32(points_in_baseline))
        warpped_image = cv2.warpPerspective(image, h,
                                            (image.shape[1], image.shape[0]))
        return warpped_image, h
    elif transformation_type == 'affine':
        M, _ = cv2.estimateAffine2D(np.float32(points_in_image),
                                    np.float32(points_in_baseline))
        warpped_image = cv2.warpAffine(image, M,
                                       (image.shape[1], image.shape[0]))
        return warpped_image, M
    elif transformation_type == 'rigid':
        M = cv2.estimateRigidTransform(np.float32(points_in_image),
                                       np.float32(points_in_baseline),
                                       fullAffine=False)
        warpped_image = cv2.warpAffine(image, M,
                                       (image.shape[1], image.shape[0]))
        return warpped_image, M
    else:
        raise Exception('Unsupported method')
Пример #18
0
def get_dominant_motion(mdata):
    pt0, pt1, m01 = mdata
    i0, i1 = np.stack([(m.queryIdx, m.trainIdx) for m in m01], axis=1)
    #least_squares(cost_fn,
    #cv2.estimateRigidTransform(pt0[i0], pt1[i1], True)
    M, _ = cv2.estimateAffine2D(pt0[i0], pt1[i1])
    A, b = M[:, :2], M[:, 2]
    return A, b
Пример #19
0
def KLTmain(im, im0, im0_small, p0):
    # Parameters for KLT
    EPS = cv2.TERM_CRITERIA_EPS
    COUNT = cv2.TERM_CRITERIA_COUNT
    lk_coarse = dict(winSize=(15, 15),
                     maxLevel=4,
                     criteria=(EPS | COUNT, 10, 0.1))
    lk_fine = dict(winSize=(51, 51),
                   maxLevel=0,
                   criteria=(EPS | COUNT, 30, 0.001))

    # 1. Coarse tracking on 1/8 scale full image
    scale = 1 / 4
    im_small = cv2.resize(im, (0, 0),
                          fx=scale,
                          fy=scale,
                          interpolation=cv2.INTER_NEAREST)
    if im0_small is None:
        im0_small = cv2.resize(im0, (0, 0),
                               fx=scale,
                               fy=scale,
                               interpolation=cv2.INTER_NEAREST)
    p, v, _ = cv2calcOpticalFlowPyrLK(im0_small, im_small, p0 * scale, None,
                                      **lk_coarse)
    p /= scale
    T23, inliers = cv2.estimateAffine2D(
        p0[v], p[v], method=cv2.RANSAC)  # 2x3, better results
    v[v] = inliers.ravel().astype(bool)
    # import plots; plots.imshow(im0_small//2+im_small//2, p1=p0[v]*scale,p2=p[v]*scale)

    # 2. Coarse tracking on full resolution roi https://www.mathworks.com/discovery/affine-transformation.html
    translation = p[v] - p0[v]
    T = np.eye(3, 2)
    T[2] = translation.mean(0)  # translation-only transform
    p, v = KLTregional(im0, im, p0, T, lk_coarse, fbt=1, translateFlag=True)

    if v.sum() > 10:  # good fit
        T23, inliers = cv2.estimateAffine2D(
            p0[v], p[v], method=cv2.RANSAC)  # 2x3, better results
    else:
        print('KLT coarse-affine failure, running SURF matches full scale.')
        T23, inliers = estimateAffine2D_SURF(im0, im, p0, scale=1)

    # 3. Fine tracking on affine-transformed regions
    p, v = KLTregional(im0, im, p0, T23.T, lk_fine, fbt=0.3)
    return p[v], v, im_small
Пример #20
0
    def test_optical_flow(self):
        frames, fps = stabilization.extract_frames_from_video(
            self.motion_video_file)
        homographies = []
        # params for ShiTomasi corner detection
        feature_params = dict(maxCorners=100,
                              qualityLevel=0.1,
                              minDistance=4,
                              blockSize=4)

        # Parameters for lucas kanade optical flow
        lk_params = dict(winSize=(5, 5),
                         maxLevel=2,
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

        for i in range(1, len(frames)):
            prev_frame = frames[i - 1]
            prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
            p0 = cv2.goodFeaturesToTrack(prev_gray,
                                         mask=None,
                                         **feature_params)

            curr_frame = frames[i]
            curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, p0,
                                                   None, **lk_params)

            # Select good points
            good_new = p1[st == 1]
            good_old = p0[st == 1]

            circled = np.copy(prev_frame)
            for j in good_old:
                x, y = j.ravel()
                cv2.circle(circled, (x, y), 1, (0, 0, 255), -1)
            frame_filename = path.join(self.motion_video_output_dir,
                                       "frame_{}_1.jpg".format(i))
            cv2.imwrite(frame_filename, circled)

            circled = np.copy(curr_frame)
            for j in good_new:
                x, y = j.ravel()
                cv2.circle(circled, (x, y), 1, (0, 0, 255), -1)
            frame_filename = path.join(self.motion_video_output_dir,
                                       "frame_{}_2.jpg".format(i))
            cv2.imwrite(frame_filename, circled)

            M, _ = cv2.estimateAffine2D(good_new, good_old, method=cv2.RANSAC)
            H = np.append(M, np.array([0, 0, 1]).reshape((1, 3)), axis=0)
            homographies.append(H)

        homographies = np.array(homographies)
        print(homographies)
        stabilization.plot_homographies(homographies,
                                        self.motion_video_output_dir)
def main():
    files_directory = r'C:\Scratch\IPA_Data\Sampled\15_Ambient_Combined\06_red_green_blue'

    image_names = [r'a0_amb.tif', r'b0_amb.tif']

    image_paths = []

    for filename in image_names:
        image_paths.append(os.path.join(files_directory, filename))

    img1 = cv.imread(image_paths[0], 1)
    img2 = cv.imread(image_paths[1], 1)

    surf = cv.xfeatures2d.SURF_create()

    surf.setHessianThreshold(400)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)

    print(len(kp1))
    print(len(kp2))

    img3 = cv.drawKeypoints(img1, kp1, None, (255, 0, 0), 4)
    img4 = cv.drawKeypoints(img2, kp2, None, (255, 0, 0), 4)

    bf = cv.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    pts2 = []
    pts1 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    f_matrix, mask = cv.findFundamentalMat(pts1, pts2, cv.FM_LMEDS)
    homog = cv.findHomography(pts1, pts2)

    aff = cv.estimateAffine2D(pts1, pts2, np.ones([len(pts1)]), cv.LMEDS, 3,
                              2000, 0.99, 10)

    warp_dst = cv.warpAffine(img1, aff[0], (img2.shape[1], img2.shape[0]))

    center = (warp_dst.shape[1] // 2, warp_dst.shape[0] // 2)

    cv.imshow('Source image', img2)
    cv.imshow('Warp', warp_dst)

    cv.waitKey()
Пример #22
0
def fit_affine(X_source, X_target, do_debug=False):
    A, _ = cv2.estimateAffine2D(numpy.array(X_source),
                                numpy.array(X_target),
                                confidence=0.95)
    result = cv2.transform(X_source.reshape((-1, 1, 2)), A).reshape((-1, 2))
    loss = ((result - X_target)**2).mean()
    if do_debug:
        image = debug_projection(X_source, X_target, result)
    return A, result
Пример #23
0
def affine(from_data, to_data, target_data):
    """Compute the 2D affine transformation between the data sets via opencv"""
    # calculate an approximate affine
    affine_matrix, inliers = cv2.estimateAffine2D(from_data, to_data, ransacReprojThreshold=3,
                                                  maxIters=20000, refineIters=0, method=cv2.LMEDS)
    print('Percentage inliers used:' + str(np.sum(inliers)*100/from_data.shape[0]))
    # make the transformed data homogeneous for multiplication with the affine
    transformed_data = np.squeeze(cv2.convertPointsToHomogeneous(target_data))
    # apply the affine matrix
    return np.matmul(transformed_data, affine_matrix.T)
Пример #24
0
def alignImages(im1, im2):

    # Convert images to grayscale
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    print("number of keypoints " + str(len(keypoints1)) + " " +
          str(len(keypoints2)))

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    print("number of matches " + str(len(matches)))

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    print("number of goodmatches  " + str(numGoodMatches))

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                None)
    cv2.imwrite("matches.jpg", imMatches)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography

# h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
    height, width, channels = im2.shape
    #im1Reg = cv2.warpPerspective(im1, h, (width, height))

    #estimate rigid SetTransform
    affTransf, inliers = cv2.estimateAffine2D(points1, points2)
    im1Reg = cv2.warpAffine(im1, affTransf, (width, height))
    return im1Reg, affTransf
    def match_key_points(self,
                         key_points_a,
                         key_points_b,
                         descriptors_a,
                         descriptors_b,
                         ratio,
                         threshold,
                         method="homography"):
        """
        match key points given by SIFT/SURF
        :param method: homography/affine
        :param key_points_a: key points of first image
        :param key_points_b: key points of second image
        :param descriptors_a: descriptors of first image
        :param descriptors_b: descriptors of second image
        :param ratio:
        :param threshold:
        :return: matches and homography/affine matrix
        """

        # Find matches
        raw_matches = self.desc_matcher.knnMatch(descriptors_a, descriptors_b,
                                                 2)
        matches = []

        # Save matches
        for m in raw_matches:
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                matches.append((m[0].trainIdx, m[0].queryIdx))

        logger_instance.log(LogLevel.DEBUG, "matches: " + str(len(matches)))

        # Estimate geometrical transformation
        if len(matches) > self.matches_required:
            points_a = np.float32([key_points_a[i] for (_, i) in matches])
            points_b = np.float32([key_points_b[i] for (i, _) in matches])

            if method == "homography":
                (H, status) = cv2.findHomography(points_a, points_b,
                                                 cv2.RANSAC, threshold)
            elif method == "affine":
                (H, status) = cv2.estimateAffine2D(
                    points_a,
                    points_b,
                    cv2.RANSAC,
                    ransacReprojThreshold=threshold)
            else:
                raise Exception(
                    "Invalid call, unsupported transformation type: " + method)

            return matches, H, status

        return None
Пример #26
0
def affine(img1, img2):
    img1_pts, img2_pts = feature_matching(img1, img2)
    pts = img1_pts - img2_pts
    std = np.std(pts)
    #img1_pts = np.float32(pts1).reshape(-1,1,2)
    #img2_pts = np.float32(pts2).reshape(-1,1,2)
    M, mask = cv2.estimateAffine2D(img1_pts,
                                   img2_pts,
                                   cv2.RANSAC,
                                   ransacReprojThreshold=0.4)
    M = np.array([[1, 0, M[0, 2]], [0, 1, M[1, 2]]])
    return M
Пример #27
0
    def estimate_rigid_transform(self, image1_pts, image2_pts, use_full):
        if int(self.OPENCV_MAJOR) < 4: # new in opencv4
            affine = cv2.estimateRigidTransform(image1_pts, image2_pts, fullAffine=use_full)
        else:
            if use_full:
                # noinspection PyUnresolvedReferences
                # new implementation returns also a vector which indicated which points are inliers
                affine = cv2.estimateAffine2D(image1_pts, image2_pts)
            else:
                # noinspection PyUnresolvedReferences
                affine = cv2.estimateAffinePartial2D(image1_pts, image2_pts)

        return affine
    def feature_matching(self, img1, img2, transform='Affine'):
        # Initiate SIFT detector
        sift = cv.SIFT_create()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)
        # FLANN parameters
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary
        flann = cv.FlannBasedMatcher(index_params, search_params)
        matches2to1 = flann.knnMatch(des2, des1, k=2)

        matchesMask_ratio = [[0, 0] for i in range(len(matches2to1))]
        match_dict = {}
        for i, (m, n) in enumerate(matches2to1):
            if m.distance < 0.7 * n.distance:
                matchesMask_ratio[i] = [1, 0]
                match_dict[m.trainIdx] = m.queryIdx

        # perform reciprocal matching to ensure better matches
        good = []
        recip_matches = flann.knnMatch(des1, des2, k=2)
        matchesMask_ratio_recip = [[0, 0] for i in range(len(recip_matches))]

        for i, (m, n) in enumerate(recip_matches):
            if m.distance < 0.7 * n.distance:  # ratio
                if m.queryIdx in match_dict and match_dict[
                        m.queryIdx] == m.trainIdx:
                    good.append(m)
                    matchesMask_ratio_recip[i] = [1, 0]

        # draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask_ratio_recip, flags = 0)
        # img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,recip_matches,None,**draw_params)
        # cv.imshow("warped", img3)
        # cv.waitKey()

        pts1, pts2 = ([kp1[m.queryIdx].pt
                       for m in good], [kp2[m.trainIdx].pt for m in good])

        src_pts = np.float32(pts1).reshape(-1, 1, 2)
        dst_pts = np.float32(pts2).reshape(-1, 1, 2)
        if transform == 'Affine':
            M, mask = cv.estimateAffine2D(src_pts,
                                          dst_pts,
                                          cv.RANSAC,
                                          ransacReprojThreshold=5.0)
        elif transform == 'Homography':
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)

        return M
Пример #29
0
def main():
    if not (os.path.exists(CROPPED_RECTIFIED_PATH)):
        os.makedirs(CROPPED_RECTIFIED_PATH)
    fnames = os.listdir(CROPPED_LABEL_PATH)
    seg_zero = plt.imread(os.path.join(ZERO_FULL_PATH, ZERO_FULL + ".png"))
    seg_label = np.uint8(seg_zero[:, :, :3] * 255)
    #get keypoints from zero_full images
    dst = []
    for c in colors["keypoints"]:
        dst.append(getColorSqrCenter(seg_label, c))
    dst = np.array(dst)
    torch.cuda.set_device(CUDA_DEVICE)
    model_pred = Keypoints(NUM_CLASSES)
    model_pred = model_pred.cuda()
    model_pred.load_state_dict(torch.load(os.path.join(PATH, "model_100.pkl")))
    model_pred.eval()
    num_of_correction = 0
    for fname in fnames:
        #print(fname)
        image = cv2.imread(os.path.join(CROPPED_TEST_PATH, fname))
        for i in range(0, 9, 3):
            #get keypoints from label images
            cropped_im = image[i:IMG_WIDTH - i, i:IMG_HEIGHT - i]
            cropped_im = cv2.resize(cropped_im, (IMG_WIDTH, IMG_HEIGHT),
                                    interpolation=cv2.INTER_NEAREST)
            im = transform1(np.array(cropped_im))
            im = im.cuda()

            prediction = Prediction(model_pred, NUM_CLASSES, IMG_HEIGHT,
                                    IMG_WIDTH, IMG_SMALL_HEIGHT,
                                    IMG_SMALL_WIDTH)
            result, keypoints = prediction.predict(im)
            keypoints = keypoints.cpu().numpy()
            keypoints = np.array(keypoints)

            #transpose and save
            T = cv2.estimateAffine2D(keypoints, dst, ransacReprojThreshold=5)
            im_rect = cv2.warpAffine(image, T[0],
                                     (seg_zero.shape[1], seg_zero.shape[0]))
            if (rectified_judge(im_rect, seg_zero.shape[1],
                                seg_zero.shape[0]) == 1):
                image = im_rect
                num_of_correction = num_of_correction + 1
                break

        image = image[:, :, (2, 1, 0)]
        image = Image.fromarray(image.astype(np.uint8))
        image.save(os.path.join(CROPPED_RECTIFIED_PATH, fname))

    print("%d dial %d correction images" % (num_of_dial, num_of_correction))
	def update_transformation_matrix(self):
		sr = np.array([
			[175, 417],
			[313, 171],
			[469, 331]
		])
		ds = np.array([
			[-40, -424],
			[30, -354],
			[-20, -290]
		])
		retval, inliers = cv.estimateAffine2D(sr, ds)
		with open('storage.pkl', 'wb') as f:
			pickle.dump(retval, f)
Пример #31
0
def getTransform(src, dst, method='affine'):
    pts1, pts2 = feature_matching(src, dst)

    src_pts = np.float32(pts1).reshape(-1, 1, 2)
    dst_pts = np.float32(pts2).reshape(-1, 1, 2)

    if method == 'affine':
        M, mask = cv2.estimateAffine2D(
            src_pts, dst_pts, cv2.RANSAC, ransacReprojThreshold=5.0)
        # M = np.append(M, [[0, 0, 1]], axis=0)

    if method == 'homography':
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    matchesMask = mask.ravel().tolist()

    return (M, pts1, pts2, mask)