def get_best_matches(img1, img2, num_matches):
    kp1, des1 = get_sift_data(img1)
    kp2, des2 = get_sift_data(img2)
    kp1, kp2 = np.array(kp1), np.array(kp2)

    # Find distance between descriptors in images
    dist = scipy.spatial.distance.cdist(des1, des2, 'sqeuclidean')

    # Write your code to get the matches according to dist
    # <YOUR CODE>

    # des 1 is in row
    # des 2 is in col

    min_row = np.argmin(dist, axis=1)
    min_col = np.argmin(dist, axis=0)

    #use below idx as input to min_row
    match_row_idx = [
        i for i in range(0, len(min_row)) if min_col[min_row[i]] == i
    ]
    #match_row_idx has feature id from des1 and min_row has corresponding feature from des2

    kp_1_loc = cv2.KeyPoint_convert(kp1[match_row_idx])
    kp_2_loc = cv2.KeyPoint_convert(kp2[min_row[match_row_idx]])
    data = np.concatenate((kp_1_loc, kp_2_loc), axis=1)

    return data
Exemple #2
0
    def compute(
        self, image: np.ndarray, keypoints: typing.List[cv2.KeyPoint]
    ) -> typing.Tuple[typing.List[cv2.KeyPoint], typing.List]:

        a, b = np.load(
            f"{os.path.dirname(__file__)}/{BriefDescriptorExtractor.PAIRS_FILENAME}"
        )
        kps = cv2.KeyPoint_convert(keypoints)

        # Filter keypoints
        #print(f"kps: {kps.shape}")
        kps = kps[np.all(np.append(
            kps >= np.array([0, 0]) + BriefDescriptorExtractor.PATCH_SIZE / 2,
            kps < np.array(image.shape) - 1 -
            BriefDescriptorExtractor.PATCH_SIZE / 2,
            axis=1),
                         axis=1)]
        #print(f"kps: {kps.shape}")

        descs = np.empty((0, 256), dtype=np.bool)
        for kp in kps:
            avals = image[tuple((kp + a).astype(np.int32).T)]
            bvals = image[tuple((kp + b).astype(np.int32).T)]
            desc = np.greater(avals, bvals)
            descs = np.append(descs, desc.reshape(1, 256), axis=0)
        #print(f"{descs.shape}: \n{descs}")

        keypoints = cv2.KeyPoint_convert(kps.reshape(-1, 1, 2))
        #descs = [bitarray(list(desc)) for desc in descs]
        return (keypoints, descs)
def process_frame_flann(frame, poster):
    poster_offset = (frame.shape[1], frame.shape[0])
    red = (0, 0, 255)
    poster_resize = cv2.copyMakeBorder(poster, 0,
                                       frame.shape[0] - poster.shape[0], 0, 0,
                                       cv2.BORDER_CONSTANT)

    poster_kp, poster_des = orb.detectAndCompute(poster, None)
    frame_kp, frame_des = orb.detectAndCompute(frame, None)
    poster_kp_vec = cv2.KeyPoint_convert(poster_kp)
    frame_kp_vec = cv2.KeyPoint_convert(frame_kp)

    results1 = get_neighbours_flann(np.float32(poster_des),
                                    np.float32(frame_des))
    results2 = get_neighbours_flann(np.float32(frame_des),
                                    np.float32(poster_des))

    combod = np.concatenate((frame, poster_resize), axis=1)
    for frame_idx in range(len(results1)):
        poster_idx = results1[frame_idx][0].trainIdx
        back_ref = results2[poster_idx][0].trainIdx
        if frame_idx == back_ref:
            poster_kp_pos = poster_kp_vec[back_ref]
            frame_kp_pos = frame_kp_vec[poster_idx]
            poster_img_pos = (int(poster_kp_pos[0] + poster_offset[0]),
                              int(poster_kp_pos[1]))
            frame_img_pos = (int(frame_kp_pos[0]), int(frame_kp_pos[1]))
            cv2.line(combod, poster_img_pos, frame_img_pos, red, 1)

    return combod
Exemple #4
0
def perform_sift(image1, keypoints1, image2, keypoints2):
    keypoint_pixels1 = cv.KeyPoint_convert(keypoints1)
    keypoint_pixels2 = cv.KeyPoint_convert(keypoints2)
    keypoint_histogram1 = []
    keypoint_histogram2 = []
    keypoint_descriptors1 = []
    keypoint_descriptors2 = []

    magnitudes1, orientations1 = calculate_image_gradients(image1)
    magnitudes2, orientations2 = calculate_image_gradients(image2)

    keypoint_descriptors1, keypoint_coords1 = create_descriptor(keypoint_descriptors1, keypoint_histogram1, keypoints1, orientations1)
    keypoint_descriptors2, keypoint_coords2 = create_descriptor(keypoint_descriptors2, keypoint_histogram2, keypoints2, orientations2)

    best_matches = []

    for x in range(len(keypoint_descriptors1)):
        ssds = []
        best_match_index = 0
        for y in range(len(keypoint_descriptors2)):
            ssd = ((np.square(keypoint_descriptors1[x] - keypoint_descriptors2[y])).sum())
            ssds.append(ssd)
            if min(ssds) == ssd:
                best_match_index = y
        ssdmin = min(ssds)
        ssds.remove(min(ssds))
        ssdminprime = min(ssds)
        ssds.remove(min(ssds))
        ratio_distance = ssdmin / ssdminprime

        if ratio_distance < 0.8:
            best_matches.append(cv.DMatch(x, best_match_index, 0))

    matched_image = cv.drawMatches(image1, keypoints1, image2, keypoints2, best_matches, image1, flags=2)
    cv.imshow("Matches", matched_image)
def compute_homography(matched_kp1, matched_kp2):
    matched_pts1 = cv2.KeyPoint_convert(matched_kp1)
    matched_pts2 = cv2.KeyPoint_convert(matched_kp2)

    # Estimate the homography between the matches using RANSAC
    H, inliers = cv2.findHomography(matched_pts1[:, [1, 0]],
                                    matched_pts2[:, [1, 0]], cv2.RANSAC)
    inliers = inliers.flatten()
    return H, inliers
Exemple #6
0
def get_unique(mp):
    sort = sort_mp(mp)
    match = np.concatenate((cv2.KeyPoint_convert(
        sort[..., 0]), cv2.KeyPoint_convert(sort[..., 1])),
                           axis=1)
    match = pd.DataFrame(match).reset_index()
    max01 = set(match.groupby([0, 1]).max()["index"])
    max23 = set(match.groupby([2, 3]).max()["index"])
    unique = match.loc[(max01 & max23)]
    return unique.values[:, 1:]
def triangulate_points(pose_1, pose_2, kp_l, kp_r):

    if len(kp_l) < len(kp_r):
        kp_r = cv2.KeyPoint_convert(kp_r[:len(kp_l)])
        kp_l = cv2.KeyPoint_convert(kp_l)
    else:
        kp_l = cv2.KeyPoint_convert(kp_l[:len(kp_r)])
        kp_r = cv2.KeyPoint_convert(kp_r)

    triangulated = cv2.triangulatePoints(pose_1[:3, :], pose_2[:3, :],
                                         kp_l[:2], kp_r[:2])

    return triangulated / triangulated[3]
Exemple #8
0
def preprocess(roi):
    detector_params = cv2.SimpleBlobDetector_Params()
    detector_params.filterByArea = True
    detector_params.maxArea = 500
    detector_params.minArea = 50
    detector = cv2.SimpleBlobDetector_create(detector_params)

    while True:
        threshold = 30
        _, roi = cv2.threshold(roi, threshold, 255, cv2.THRESH_BINARY)
        
        roi = cv2.erode(roi, None, iterations=2)
        roi = cv2.dilate(roi, None, iterations=4)
        roi = cv2.medianBlur(roi, 5)

        keypoints = detector.detect(roi)
        roi = cv2.drawKeypoints(roi, keypoints, roi)

        cv2.namedWindow('roi_processed',cv2.WINDOW_NORMAL)
        cv2.resizeWindow('roi_processed', 600,600)
        cv2.imshow('roi_processed', roi)

        pts = cv2.KeyPoint_convert(keypoints)

        if len(pts) == 0:
            continue
        else:
            return pts[0]
Exemple #9
0
def ICAngles(image, keypoints, half_patch_size, u_max):
    """Calculate angles via Intensity Centroids
    """

    kp_position = cv.KeyPoint_convert(keypoints)
    ptsize = len(kp_position)

    for ptidx in range(ptsize):

        m_01 = 0
        m_10 = 0

        for u in range(-half_patch_size, half_patch_size + 1):
            m_10 = m_10 + u * image[int(kp_position[ptidx, 1]),
                                    int(kp_position[ptidx, 0]) + u]

        for v in range(1, half_patch_size + 1):
            v_sum = 0
            d = u_max[v]
            for u in range(-d, d - 1):
                val_plus = int(image[int(kp_position[ptidx, 1]) + v,
                                     int(kp_position[ptidx, 0]) + u])
                val_minus = int(image[int(kp_position[ptidx, 1]) - v,
                                      int(kp_position[ptidx, 0]) + u])
                v_sum = v_sum + (val_plus - val_minus)
                m_10 = m_10 + u * (val_plus + val_minus)
            m_01 = m_01 + v * v_sum

        keypoints[ptidx].angle = math.atan2(float(m_01), float(m_10))
Exemple #10
0
    def GetFingers(self, frame):
        img = removeBG(frame)
        img = img[0:250, 0:250]

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        kernel = np.ones((5, 5), np.uint8)
        blur_process = cv2.erode(gray, kernel, iterations=1)
        blur_process = cv2.GaussianBlur(blur_process, (blurValue, blurValue),
                                        0)
        ret, blur_mask = cv2.threshold(blur_process, 25, 255,
                                       cv2.THRESH_BINARY_INV)

        blur = cv2.bitwise_and(gray, gray, mask=blur_mask)
        blur_mask = cv2.GaussianBlur(blur, (blurValue, blurValue), 0)
        ret, blur_mask = cv2.threshold(blur_mask, 27, 255,
                                       cv2.THRESH_BINARY_INV)

        blur = cv2.bitwise_and(gray, gray, mask=blur_mask)

        detector_params = cv2.SimpleBlobDetector_Params()
        detector_params.filterByArea = True
        detector_params.maxArea = 1000
        detector_params.minArea = 30
        detector = cv2.SimpleBlobDetector_create(detector_params)

        keypoints = detector.detect(blur_mask)
        blur_mask = cv2.drawKeypoints(blur_mask, keypoints, blur_mask)
        pts = cv2.KeyPoint_convert(keypoints)

        return pts, gray
Exemple #11
0
def find_keypoints(Image, Mask, Tile_H, Tile_W, nFeatures):
    featureEngine = cv2.FastFeatureDetector_create()
    H, W = Image.shape
    kp = []
    for y in range(0, H, Tile_H):
        for x in range(0, W, Tile_W):
            Patch_Img = Image[y:y + Tile_H, x:x + Tile_W]
            Patch_Mask = Mask[y:y + Tile_H, x:x + Tile_W]
            keypoints = featureEngine.detect(\
                Patch_Img,mask=Patch_Mask)
            for pt in keypoints:
                pt.pt = (pt.pt[0] + x, pt.pt[1] + y)

            if (len(keypoints) > nFeatures):
                keypoints = sorted(keypoints,\
                                   key=lambda x: -x.response)
                for kpt in keypoints[0:nFeatures]:
                    kp.append(kpt)
            else:
                for kpt in keypoints:
                    kp.append(kpt)

    trackPts = cv2.KeyPoint_convert(kp)
    trackPts = np.expand_dims(trackPts, axis=1)

    global debug
    if debug == 1:
        print("# Points Tracked : " + str(len(trackPts)))
    return trackPts
    def whereToSpray(self, final_mask, final_image):    
        # Setup SimpleBlobDetector parameters.
        params = cv2.SimpleBlobDetector_Params()
        # Filter by colour.
        params.filterByColor = True
        params.blobColor = 255
        # Filter by Area.
        params.filterByArea = True
        params.minArea = 200
        # Filter by Circularity
        params.filterByCircularity = False
        # Filter by Convexity
        params.filterByConvexity = False
        # Filter by Inertia
        params.filterByInertia = False
        # Set up the detector with new parameters
        detector = cv2.SimpleBlobDetector_create(params)
 
        # Detect blobs.
        keypoints = detector.detect(final_mask)
        #print(keypoints[0])
        points	=	cv2.KeyPoint_convert(keypoints)
        #These will be points with int values
        publishable_points = []
        for point in points:
            x = int(point[0])
            y = int(point[1])
            publishable_points.append([x, y])
            final_image	=	cv2.circle(final_image, (x, y), 10,(200, 0, 200),  thickness = -1)
        cv2.imshow("Final Output. Red - weed belief mask, purple - points to spray.",  final_image)
        self.publishPointCloud(publishable_points)
Exemple #13
0
    def compute_mma(self, kp1, kp2, good, th=3):
        kp1 = cv2.KeyPoint_convert(kp1)
        kp2 = cv2.KeyPoint_convert(kp2)

        src_pts = np.float32([kp1[m.queryIdx] for m in good]).reshape(-1, 2)
        dst_pts = np.float32([kp2[m.trainIdx] for m in good]).reshape(-1, 2)

        N1 = src_pts.shape[0]
        N2 = dst_pts.shape[0]
        matching_score = 0
        if N2 != 0 and N1 != 0:
            norm = np.linalg.norm(src_pts - dst_pts, ord=None, axis=1)
            is_match = norm <= th
            match_n = np.sum(is_match)
            matching_score = match_n / len(norm)
        return matching_score
Exemple #14
0
    def compute_repeatability(self, kp1, kp2, th=3):
        kp1 = cv2.KeyPoint_convert(kp1)
        kp2 = cv2.KeyPoint_convert(kp2)
        repeatability = 0
        N1 = len(kp1)
        N2 = len(kp2)
        if N2 != 0 and N1 != 0:
            kp1 = np.expand_dims(kp1, 1)
            kp2 = np.expand_dims(kp2, 0)

            norm = np.linalg.norm(kp1 - kp2, ord=None, axis=-1)
            cnt1 = np.sum(np.min(norm, axis=1) <= th)
            cnt2 = np.sum(np.min(norm, axis=0) <= th)

            repeatability = (cnt1 + cnt2) / (N1 + N2)
        return repeatability
    def local_extract(self, image):
        
        img_zip = None
        score = None
        if (self.pose_ld==0) or (self.pose_ld==1):
            image = self.grayscele_fn(image)
            kps, desc = self.local_descriptor(image)
            kps = cv2.KeyPoint_convert(kps)

        elif (self.pose_ld==2) or (self.pose_ld==3):
            feat = self.local_descriptor.extract(image)
            kps = feat["keypoints"][:,:2]
            desc = feat['descriptors']
        
        elif self.pose_ld==4:
            rescale_size = [1024,960]
            im, inp, sc = ex.read_image(image, "cuda", rescale_size, 0, False)
            feat = self.local_descriptor({'image': inp})
            kps = feat['keypoints']
            desc = feat['descriptors']
            score = feat['scores']
            img_zip = {'img_tensor' : inp, \
                       'scale_factor' : sc}

        return kps, desc, score, img_zip
    def __find_four_key_point(self):
        if (self.key_points is None):
            self.__find_key_points(min_circularity=0.5, min_convexity=0.3)

        if (len(self.key_points) == 0):
            return []

        hulls = cv2.convexHull(cv2.KeyPoint_convert(self.key_points))
        points = [
            [self.working_image.shape[0], self.working_image.shape[1]],
            [0, self.working_image.shape[1]],
            [self.working_image.shape[0], 0],
            [0, 0]
        ]
        for hull in hulls:
            if (hull[0][0] + hull[0][1] < points[0][0] + points[0][1]):
                points[0] = hull[0]
            if (hull[0][0] - hull[0][1] > points[1][0] - points[1][1]):
                points[1] = hull[0]
            if (hull[0][0] - hull[0][1] < points[2][0] - points[2][1]):
                points[2] = hull[0]
            if (hull[0][0] + hull[0][1] > points[3][0] + points[3][1]):
                points[3] = hull[0]

        # edited because of LJK bad format, which doesn't provide
        # right-bottom dots.
        points[3] = [points[1][0], points[2][1]]
        return points
    def filter_corner(self, kp):
        pts = cv.KeyPoint_convert(kp)
        if len(pts) > 0:
            a = np.array(pts)
            # print(f'points\n-------\n{a}\n')
            # print(f'{a[:,0]} {a[:,1]}')
            h = np.median(a[:, 0], axis=0)
            w = np.median(a[:, 1], axis=0)
            median = np.array([h, w]).astype(int)
            # print(f'median: {median}')

            remove_idxs = []
            i = 0
            for _ in a:
                # print(a[i][0])
                # print(a[i][1])
                if not self.valid_keypoint(median, a[i]):
                    remove_idxs.append(i)
                i += 1

            # print(f'idx\'s to remove: {remove_idxs}')
            if len(remove_idxs) < len(a):
                a = np.delete(a, remove_idxs, axis=0)
            # a = np.average(a, axis=0).astype(int)

            h = np.median(a[:, 0], axis=0)
            w = np.median(a[:, 1], axis=0)
            a = np.array([h, w]).astype(int)
            # print(f'corner guess: {a}')
            return a
        return None
Exemple #18
0
def corners(img, keypoints):
    zoom = 5
    ret = []
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

    win_size = (3, 3)
    zero_zone = (-1, -1)
    criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.1)

    points = cv.KeyPoint_convert(keypoints)
    corners = points
    cv.cornerSubPix(gray, corners, win_size, zero_zone, criteria)
    indexes = random.sample(range(0, len(points) - 1), 3)

    for index in indexes:
        y, x = points[index].astype("uint32")
        ry, rx = corners[index]
        im = cv.resize(img, None, fx=zoom, fy=zoom)
        im = cv.circle(im, (zoom * y, zoom * x), 2, (255, 0, 0))
        im = cv.circle(im, (int(zoom * ry), int(zoom * rx)), 2, (0, 255, 0))

        t = max(zoom * (x - 5), 0)
        b = zoom * (x + 5)
        l = max(zoom * (y - 5), 0)
        r = zoom * (y + 5)
        window = im[t:b, l:r]
        ret.append(window)

    return ret
Exemple #19
0
def compute(image):
    sift = cv2.xfeatures2d.SIFT_create()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    keypoints = sift.detect(gray, None)
    descriptors = sift.compute(gray, keypoints)
    location = cv2.KeyPoint_convert(keypoints)

    return keypoints, descriptors, location
Exemple #20
0
def get_matched_points(kp1, kp2, matches):
    """
    Args:
        - kp1: list of KeyPoints
        - kp2: list of KeyPoinst
        - matches: list of DMatch
    Returns:
        - out_kp1: 2d ndarray (coordinates of matched kp1)
        - out_kp2: 2d ndarray (cooridnates of matched kp2)
    """
    query_Idx = [match.queryIdx for match in matches]
    train_Idx = [match.trainIdx for match in matches]
    coords1 = cv2.KeyPoint_convert(kp1)
    coords2 = cv2.KeyPoint_convert(kp2)
    out_kp1 = coords1[query_Idx, :]
    out_kp2 = coords2[train_Idx, :]
    return out_kp1, out_kp2
Exemple #21
0
def draw_matches(img1, img2, sel_matches, k1, k2):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
    view[:h1, :w1, 0] = img1
    view[:h2, w1:, 0] = img2
    view[:, :, 1] = view[:, :, 0]
    view[:, :, 2] = view[:, :, 0]

    position = None

    if (sel_matches is not None) and (k2 is not None):
        #don't use in final production
        for m in sel_matches:
            # draw the keypoints matches
            color = tuple([sp.random.randint(0, 255) for _ in iter(range(3))])
            cv2.line(
                view, (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1])),
                (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1])),
                color)

        kp2 = [k2[m.trainIdx] for m in sel_matches]
        kp1 = [k1[m.queryIdx] for m in sel_matches]

        p1 = cv2.KeyPoint_convert(kp1)
        p2 = cv2.KeyPoint_convert(kp2)

        if sum(1 for _ in sel_matches) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        else:
            H, status = None, None
        if H is not None:
            corners = sp.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
            position = cv2.perspectiveTransform(corners.reshape(1, -1, 2),
                                                H).reshape(-1, 2)
            area = computeArea(position)
            print("Area : " + str(area))
            if area > (w2 * h2 / 150) and area < (w2 * h2 / 1.4):
                corners = sp.int32(position + (w1, 0))
                cv2.polylines(view, [corners], True, (255, 255, 255))
            else:
                position = None

    cv2.imshow("view", view)
    return position
    def getKeypoints(self, img):
        """Use a feature detector to get keypoints in an image

        :param img: image to detect keypoints in
        :return: detected keypoints as array of points
        """
        points2f = self.detector.detect(img, None)
        keypoints = cv2.KeyPoint_convert(points2f)
        return keypoints
Exemple #23
0
def __main__():
    videoFile = cv2.VideoCapture(fileName)

    ret, frameRef = videoFile.read()
    while(ret != True):
        ret, frameRef = videoFile.read()
    frameRef_gray = cv2.cvtColor(frameRef, cv2.COLOR_BGR2GRAY)
    # frameRef = filterFrame(frameRef)
    frameCurr_gray = cv2.equalizeHist(frameRef_gray)

    regularPoints, markedPoints_ref, counterArr, mask, numRows, numColumns = initialValuesLK(frameRef, pointStep)
    while videoFile.isOpened():
        ret, frameCurr = videoFile.read()
        frameCurr_gray = cv2.cvtColor(frameCurr, cv2.COLOR_BGR2GRAY)
        # frameCurr = filterFrame(frameCurr)
        frameCurr_gray = cv2.equalizeHist(frameCurr_gray)

        if ret:
            interestingPointsTemp_curr, interestingPointsTemp_ref, counterArrTemp = \
                calculateOpticalFlowRegular(frameCurr_gray, frameRef_gray,
                                            regularPoints, lkparams, numRows, numColumns)

            markedPoints_curr, markedPoints_ref, counterArr = \
                calculateOpticalFlowMarked(frameCurr_gray, frameRef_gray, markedPoints_ref,
                                           counterArr, lkparams, numRows, numColumns)

            blobFrame = cv2.equalizeHist(frameCurr_gray)
            keypoints = detector.detect(blobFrame)
            sizes = [x.size for x in keypoints]
            keypoints = cv2.KeyPoint_convert(keypoints)


            for i in range(len(markedPoints_curr)):

                if(findNeibouringBlob(markedPoints_curr[i][0], keypoints, sizes)):

                    new = markedPoints_curr[i]
                    old = markedPoints_ref[i]
                    a, b = new.ravel()
                    c, d = old.ravel()
                    mask = cv2.line(mask, (a, b), (c, d), [0, 255, 0], 1)
                    frameCurr = cv2.circle(frameCurr, (a, b), 10, [255, 0, 0], -1)


            frameRef_gray = frameCurr_gray
            markedPoints_ref = np.concatenate((markedPoints_curr, interestingPointsTemp_curr), axis=0)
            counterArr = counterArr + counterArrTemp

            frameCurr = cv2.add(frameCurr, mask)
            cv2.imshow('frame', frameCurr)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

    videoFile.release()
    cv2.destroyAllWindows()
    print("Successfully Completed!")
Exemple #24
0
def dst2kps(dst, max_kps):
    """converting dst to keypoints indices then to keypoints objects"""
    threshold = 0.1
    while True:
        kps_idx = list(
            map(lambda x: tuple([int(i) for i in x]),
                np.argwhere(dst > (dst.max() * threshold))))
        if max_kps < 0 or len(kps_idx) <= max_kps: break
        threshold += 0.02
    return cv2.KeyPoint_convert(kps_idx)
def SHITOMASI_brief():
    img1 = cv2.imread("../imgReferencia/img00.jpg")
    gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

    corners11 = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
    corners1 = np.int0(corners11)
    kp1 = cv2.KeyPoint_convert(corners1)

    brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    kp1, des1 = brief.compute(img1, kp1)
    img2 = cv2.imread("../imgTeste/img1.jpg", 0)

    corners22 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
    corners2 = np.int0(corners22)
    kp2 = cv2.KeyPoint_convert(corners2)
    kp2, des2 = brief.compute(img2, kp2)

    matches = bf.match(des1, des2)
def SHITOMASI_brisk():
    img1 = cv2.imread("../imgReferencia/img00.jpg", 0)

    corners11 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10)
    corners1 = np.int0(corners11)
    kp1 = cv2.KeyPoint_convert(corners1)

    brisk = cv2.BRISK_create()
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    kp1, des1 = brisk.compute(img1, kp1)

    img2 = cv2.imread("../imgTeste/img1.jpg", 0)

    corners22 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
    corners2 = np.int0(corners22)
    kp2 = cv2.KeyPoint_convert(corners2)
    kp2, des2 = brisk.compute(img2, kp2)

    matches = bf.match(des1, des2)
Exemple #27
0
def get_keypoints(frame, mask, detector, detector_params):
    if detector == 'good':
        kps = cv.goodFeaturesToTrack(frame, mask=mask, **detector_params)
        return kps
    elif detector == 'sift':
        detec = cv.xfeatures2d.SIFT_create(**detector_params)
    elif detector == 'surf':
        detec = cv.xfeatures2d.SURF_create(**detector_params)
    kps = detec.detect(frame, mask=mask)
    return cv.KeyPoint_convert(kps)
Exemple #28
0
    def computeQuery(self, queryfeatures, trainfeatures, matches_threshold = 52, querry_ROI = None, train_ROI = None):
        if trainfeatures is None:
            print("[INFO] missing trainfeatures. Please "
                  "supply trainfeatures to the computeQuery function")

        elif queryfeatures is None:
            print("[INFO] missing an image query. Please "
                  "supply an image to query")
        else:
            # print("detecting matching descriptors")
            (kp1, des1) = queryfeatures
            (kp2, des2) = trainfeatures


            # compute homography of our points. If the match is good then there should be a GOOD homography transformation
            # between our query and train images. Furthermore the homography can be used to display the result
            M = self.matchKeypoints(kp1, kp2, des1, des2, ratio=0.95, reprojThresh=5.0)
            matchesH, homography, status = None, None, None
            totalSuccess = 0
            if M is not None:
                (matchesH, H, status) = M
                for success in status:
                    if success == 1:
                        totalSuccess += 1


                if querry_ROI is not None and train_ROI is not None:
                    print(totalSuccess)  # a count of all the matches found with ransac
                    print("the query image resolution is :" + str(querry_ROI.shape[0])
                          + " x " + str(querry_ROI.shape[1]))
                    print("the train image resolution is :" + str(train_ROI.shape[0])
                          + " x " + str(train_ROI.shape[1]))
                    self.drawMatches(querry_ROI, train_ROI, cv2.KeyPoint_convert(kp1), cv2.KeyPoint_convert(kp2),
                                     matchesH, status)
                    # LEFT IS TRAIN IMAGE AND RIGHT IS QUERY IMAGE

            # if the match is None, then there aren't enough matched
            # keypoints to create a panorama
            if totalSuccess == 0:
                # cv2.imshow("ImageA", querry_ROI)
                # cv2.imshow("ImageB", train_ROI)
                # cv2.waitKey(0)
                # cv2.destroyWindow("ImageA")
                # cv2.destroyWindow("ImageB")
                return False, totalSuccess
            elif totalSuccess < matches_threshold:
                # print("\n[INFO] Failure to match image. RANSAC has found very few matches that compute a homography ")
                # self.drawMatches(querry_ROI, train_ROI, cv2.KeyPoint_convert(kp1), cv2.KeyPoint_convert(kp2),
                #                   matchesH, status)
                return False,totalSuccess

            else:
                # print("\n[INFO] Success, a match was made. RANSAC found a homography that fits "
                #       + str(totalSuccess) + " feature points.")
                return True,totalSuccess
def HARRIS_freak(img):
    img1 = cv2.imread("../imgReferencia/img00.jpg", 0)
    freak = cv2.xfeatures2d.FREAK_create()

    imagem1 = np.float32(img1)
    dst1 = cv2.cornerHarris(imagem1, 2, 3, 0.04)
    dst1 = cv2.dilate(dst1, None)
    ret1, dst1 = cv2.threshold(dst1, 0.01 * dst1.max(), 255, 0)
    dst1 = np.uint8(dst1)
    ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
    criteria1 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                 0.001)
    corners1 = cv2.cornerSubPix(imagem1, np.float32(centroids1), (5, 5),
                                (-1, -1), criteria1)
    matriz1 = []
    for variavel in corners1:
        array = np.array([variavel])
        matriz1.append(array)
    kp1 = cv2.KeyPoint_convert(matriz1)
    kp1, des1 = freak.compute(img1, kp1)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    img2 = cv2.imread("../imgTeste/img" + str(img) + ".jpg", 0)
    imagem2 = np.float32(img2)
    dst2 = cv2.cornerHarris(imagem2, 2, 3, 0.04)
    dst2 = cv2.dilate(dst2, None)
    ret2, dst2 = cv2.threshold(dst2, 0.01 * dst2.max(), 255, 0)
    dst2 = np.uint8(dst2)
    ret2, labels2, stats2, centroids2 = cv2.connectedComponentsWithStats(dst2)
    criteria2 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                 0.001)
    corners2 = cv2.cornerSubPix(imagem2, np.float32(centroids2), (5, 5),
                                (-1, -1), criteria2)
    matriz2 = []
    for variavel in corners2:
        array = np.array([variavel])
        matriz2.append(array)
    kp2 = cv2.KeyPoint_convert(matriz2)
    kp2, des2 = freak.compute(img2, kp2)

    matches = bf.match(des1, des2)
Exemple #30
0
def disparity_orb(left, right):
    orb_detector = cv2.ORB_create()
    BF_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    kp1, des1 = orb_detector.detectAndCompute(left, None)
    kp2, des2 = orb_detector.detectAndCompute(right, None)

    matches = BF_matcher.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)
    kp1 = cv2.KeyPoint_convert(kp1)
    kp2 = cv2.KeyPoint_convert(kp2)

    matching_points_left = []
    matching_points_right = []
    for match in matches:
        matching_points_left.append(
            (kp2[match.trainIdx][0], kp2[match.trainIdx][1]))
        matching_points_right.append(
            (kp1[match.queryIdx][0], kp1[match.queryIdx][1]))
    output_image_l = np.zeros(left.shape, np.uint8)
    output_image_r = np.zeros(left.shape, np.uint8)
    for temp in range(len(matching_points_right)):
        output_image_l[int(matching_points_left[temp][1])][int(
            matching_points_left[temp][0])] = np.abs(
                matching_points_right[temp][0] - matching_points_left[temp][0])
        output_image_r[int(matching_points_right[temp][1])][int(
            matching_points_right[temp][0])] = np.abs(
                matching_points_right[temp][0] - matching_points_left[temp][0])
    output_image_l = cv2.normalize(output_image_l,
                                   None,
                                   alpha=0,
                                   beta=255,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_8UC1)
    output_image_r = cv2.normalize(output_image_r,
                                   None,
                                   alpha=0,
                                   beta=255,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_8UC1)

    return output_image_l + output_image_r