예제 #1
0
def findFiducial():

    # load fiducial with greyscale flag
    tag = cv2.imread('Frame.png', 0)
    # tag = cv2.imread('Tags/Tag36h9.png', 0)
    real_width = 51.5  # mm (measure these!!!!)
    h, w = tag.shape  # h and w of fiducial

    # load in one of our "distorted" images with greyscale flag
    img = cv2.imread('CalibrationImages/Fiducial/Fiducial1.jpg')
    #img = cv2.imread('photo.jpg') # trainImage
    #img, timestamp = freenect.sync_get_video()     #(possibly take an image)
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Load previously saved data about the camera - generated with the chessboard photos
    dist = np.load('CalibrationImages/Caliboutput/dist.npy')
    Cam_Mat = np.load('CalibrationImages/Caliboutput/mtx1.npy')
    '''
    # undistort the image!
    ph, pw = img.shape[:2]
    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(Cam_Mat, dist, (pw, ph), 1, (pw, ph))
    img = cv2.undistort(img, Cam_Mat, dist, None, newcameramtx)
    '''

    # Initiate ORB detector
    orb = cv2.ORB()

    # find the keypoints and descriptors with ORB:
    # fiducial
    tag_kp = orb.detect(tag, None)
    tag_kp, tag_des = orb.compute(tag, tag_kp)
    # photo
    img_kp = orb.detect(img, None)
    img_kp, img_des = orb.compute(img, img_kp)

    # find the fiducial
    print("finding fiducial")
    M = find(tag_des, tag_kp, img_des, img_kp)
    draw_outline(M, h, w, img)

    # write out full size image
    cv2.imwrite('found.jpg', img)

    # show the image
    cv2.imshow('found', img)
    cv2.waitKey()
    cv2.destroyAllWindows()
예제 #2
0
def _sift(raw_image):
    '''
    Args:
        raw_image (Image)

    Returns:
        (list): containing all sift keypoints
    '''
    orb = cv2.ORB()
    numpy_image = raw_image.getNumpyCv2()

    # APPLY FEATURES HERE
    all_keypoints, des1 = orb.detectAndCompute(numpy_image,
                                               None)  # Finds sift keypoints
    keypoints = [all_keypoints[i].pt for i in range(len(all_keypoints))]

    return keypoints
예제 #3
0
def processing_part3(flann, gray_roi, gray_img):
    orb = cv2.ORB(nfeatures=100, nlevels=4, scaleFactor=1.3)
    key_points, query_descriptors = orb.detectAndCompute(gray_roi, None)
    matches = flann.knnMatch(np.uint8(query_descriptors), k=6)
    vote_array = np.zeros((gray_img.shape[0] / 10, gray_img.shape[1] / 10),
                          dtype=np.uint8)

    for match in matches:
        for index in match:
            vectorx = (
                key_points[index.queryIdx].size *
                key_points_math_array[index.trainIdx].distance[0] *
                scipy.cos(key_points_math_array[index.trainIdx].distance[2] +
                          key_points[index.queryIdx].distance[2] -
                          key_points_math_array[index.trainIdx].distance[2])
            ) / key_points_math_array[index.trainIdx].size

            vectory = (
                key_points[index.queryIdx].size *
                key_points_math_array[index.trainIdx].distance[0] *
                scipy.sin(key_points_math_array[index.trainIdx].distance[2] +
                          key_points[index.imgIdx].distance[2] -
                          key_points_math_array[index.trainIdx].distance[2])
            ) / key_points_math_array[index.trainIdx].size

            kpx = int(
                scipy.divide(key_points[index.imgIdx].pt[0] + vectorx, 10))
            kpy = int(
                scipy.divide(key_points[index.imgIdx].pt[1] + vectory, 10))

            if (kpx > 0 and kpy > 0) and (kpx < vote_array.shape[1]
                                          and kpy < vote_array.shape[0]):
                vote_array[kpy, kpx] += 1

    vote_array = cv2.resize(vote_array,
                            None,
                            fx=10,
                            fy=10,
                            interpolation=cv2.INTER_NEAREST)
    max_index = np.unravel_index(vote_array.argmax(), vote_array.shape)
    position = (max_index[0], max_index[1])
    cv2.circle(gray_img,
               position,
               gray_img.shape[1] / 33, (0, 0, 255),
               thickness=2)
    return gray_img
예제 #4
0
 def get_useful_picture(self, img):
     orb = cv2.ORB()
     # sift = cv2.SIFT()
     # kp1, des1 = sift.detectAndCompute(img, None)
     result_list = []
     bf = cv2.BFMatcher(cv2.NORM_L2)
     kpa, desa = orb.detectAndCompute(img, None)
     self.storDir_num = self.get_future_num_color(img)
     StorDir = "static/" + "Picture_new/" + self.storDir_num
     if not os.path.exists(StorDir):
         return result_list
     for filename in os.listdir(StorDir):
         try:
             stor = StorDir + "/" + str(filename)
             img1 = cv2.imread(stor)
             kpi, desi = orb.detectAndCompute(img1, None)
             if desi == None:
                 continue
             else:
                 matches = bf.knnMatch(desa, desi, k=2)
                 sub_matchpointnum = 0
                 for m, n in matches:
                     if m.distance < 0.82 * n.distance:
                         sub_matchpointnum += 1
                 if sub_matchpointnum > 20:
                     # kp2, des2 = sift.detectAndCompute(img1, None)
                     # FLANN_INDEX_KDTREE = 0
                     # index_params = dict(algorithm = FLANN_INDEX_KDTREE , tree = 5)
                     # seacher_params = dict (checks = 50)
                     # flann = cv2.FlannBasedMatcher(index_params , seacher_params)
                     # matches1 = flann.knnMatch(des1 , des2 , k =2 )
                     # sub_matchpointnum1 = 0
                     # for m , n in matches1 :
                     # 	if m.distance < 0.80*n.distance:
                     # 		sub_matchpointnum1 += 1
                     # if sub_matchpointnum1 > 20 :
                     uty, uid = self.analyse(stor)
                     result = []
                     stor_new = '/' + stor
                     result.append(stor_new)
                     result.append(uty)
                     result.append(uid)
                     result_list.append(result)
         except Exception as e:
             print("Failed in read picture:", e)
     return result_list
예제 #5
0
def ORB(input_image, stored_image):
    gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)

    orb_detector = cv2.ORB(1600, 1.3)

    (keypoints_1, descriptor_1) = orb_detector.detectAndCompute(gray, None)

    (keypoints_2,
     descriptor_2) = orb_detector.detectAndCompute(stored_image, None)

    brute_force = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    matches_found = brute_force.match(descriptor_1, descriptor_2)

    matches_found = sorted(matches_found, key=lambda val: val.distance)

    return len(matches_found)
예제 #6
0
def detect(img1, img2):
    global DEVX
    global DEVY
    #   SIFT generally produces better results, but it is not FOSS, so chose the feature detector
    #   that suits the needs of your project.  ORB does OK
    use_sift = False

    if use_sift:
        detector = cv2.SURF()
    else:
        detector = cv2.ORB(1000)

    # keypoints as kp, descriptors as desc
    kp1, des1 = detector.detectAndCompute(img1, None)
    kp2, des2 = detector.detectAndCompute(img2, None)

    if use_sift:
        if len(kp2) < 5000:
            print "bad pic"
            return
        bf = cv2.BFMatcher()
        # This returns the top two matches for each feature point (list of list)
        pairMatches = bf.knnMatch(des1, des2, k=2)
        rawMatches = []
        for m, n in pairMatches:
            if m.distance < 0.6 * n.distance:
                rawMatches.append(m)
    else:
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        rawMatches = bf.match(des1, des2)

    sortMatches = sorted(rawMatches, key=lambda x: x.distance)
    matches = sortMatches[0:300]

    U1 = []
    U2 = []
    if len(matches) >= 3:
        for mat in matches:

            U1.append(kp1[mat.queryIdx].pt)
            U2.append(kp2[mat.trainIdx].pt)
        return np.asarray(U1), np.asarray(U2)
    else:
        print len(matches)
        print "match failed,pass"
        return
    def checkForBattery(self):
        image = cv2.cvtColor(self.raw_image, cv2.COLOR_BGR2GRAY)
        #laplacian = cv2.Laplacian(image,cv2.CV_32F)
        #cv2.imshow("Laplacian", laplacian)

        for model in self.batteryTemplates:
            #model = cv2.Laplacian(model,cv2.CV_32F)

            w, h = model.shape[::-1]
            ########################## Trying ORB ###############################
            #res = cv2.matchTemplate(image, model, cv2.TM_CCOEFF_NORMED)
            #(_, maxValue, minLoc, maxLoc) = cv2.minMaxLoc(res)
            #mask = np.zeros(image.shape, dtype="uint8")

            #cv2.imshow("Lap", model)
            #loc = np.where( res >= threshold)
            match = False
            orb = cv2.ORB()
            kp = orb.detect(model, None)
            kp, des = orb.compute(model, kp)
            kp1 = orb.detect(image, None)
            kp1, des1 = orb.compute(image, kp1)
            bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
            clusters = np.array([des])
            bf.add(clusters)

            matches = bf.match(des, des1)

            #thres = (sum(dist)/len(dist))*0.5

            image2 = cv2.drawKeypoints(image, kp1, color=(0, 255, 0), flags=0)
            cv2.imshow("Battery Detection", image2)
            threshold = 1100  # max distance
            if len(matches) > 100:  # minimum number of matches
                # Sort them in the order of their distance.
                matches = sorted(matches, key=lambda x: x.distance)

                if matches[10].distance < threshold:  # filter bad matches
                    print("Found battery")
                    print(kp1[matches[0].queryIdx].pt)  # pixel pos
                    return True

            #for pt in zip(*loc[::-1]):
            #	print("I've found the battery!!")
            #	match=True
        return match
    def __init__(self, camera):
        picamera.array.PiMotionAnalysis.__init__(self, camera)

        self.obj_pose_pub = rospy.Publisher('/pidrone/desired/pose',
                                            Pose,
                                            queue_size=1)

        self.detector = cv2.ORB(nfeatures=150, scoreType=cv2.ORB_FAST_SCORE)

        self.curr_obj_coordinates = None
        self.error = None
        self.track_object = False

        self.prev_img = None

        self.alpha = 0.70
        self.z = 0.16
예제 #9
0
def clusterFaces():
    #desList will store descriptor for each frame
    desList = []

    #labeledFrames will help to keep a track of frames for which descriptors were found successfully
    labeledFrames = []

    print "\n\n.........Running k-means clustering on the faces......"
    #Detecting keypoints and descriptors. I use ORB since it is free to use
    orb = cv2.ORB()
    for (counter, img) in enumerate(imageFaces):
        kp = orb.detect(img, None)
        kp, des = orb.compute(img, kp)
        if des is not None:
            desList.append(np.float32(des[0]))
            labeledFrames.append(counter)

    #z will store the descriptors as a numpy array. z will have the features for k-means clustering for 6 labels
    z = np.asarray(desList)
    k_means = cluster.KMeans(6)
    k_means.fit(z)

    print "\nThe following is the result of the analysis: \n"
    #frameLabelDic will be a dictionary to map a frame to an actor
    frameLabelDic = {}
    #labelCount will give the total number of frames mapped to one actor. 6 mappings in total
    labelCount = {}

    for label in range(6):
        labelCount.update({label: 0})

    for x in range(len(labeledFrames)):
        frameLabelDic.update({labeledFrames[x]: k_means.labels_[x]})
        labelCount[k_means.labels_[x]] += 1

    #pprint(frameLabelDic)
    pprint(labelCount)

    #Now storing faces in directories
    print "\n.........Storing faces \nIn directories: faceData/0 ... faceData/5\n"

    makeDirectoryStructure()
    for faceFrame in frameLabelDic.keys():
        cv2.imwrite(
            faceDataDir + "/%s/%s.jpg" % (frameLabelDic[faceFrame], faceFrame),
            imageFaces[faceFrame])
예제 #10
0
    def __init__(self):
        self.demo_on = False
        self.node_name = "walle_recognizer"

        rospy.init_node(self.node_name)

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.node_name
        cv.NamedWindow("RGB Image", cv.CV_WINDOW_NORMAL)
        cv.MoveWindow("RGB Image", 25, 75)

        # And one for the depth image
        cv.NamedWindow("Depth Image", cv.CV_WINDOW_NORMAL)
        cv.MoveWindow("Depth Image", 25, 350)
        self.known_objects = []
        self.sift = cv2.ORB()

        self.current_depth = None
        # self.current_color = None
        self.current_stage = None

        # Create the cv_bridge object
        self.bridge = CvBridge()

        self.pub = rospy.Publisher("/walle/recognizer/publish", String)
        self.rate = rospy.Rate(10)  # 10hz

        self.last_recognized = None

        # Subscribe to the camera image and depth topics and set
        # the appropriate callbacks

        self.image_sub = rospy.Subscriber("/camera/rgb/image_color", Image,
                                          self.image_callback)
        self.depth_sub = rospy.Subscriber("/camera/depth/image_raw", Image,
                                          self.depth_callback)

        rospy.Subscriber("/walle/recognizer/listen",
                         String,
                         self.receive,
                         queue_size=1)

        rospy.loginfo("Waiting for image topics...")
예제 #11
0
def auto_label_image(image_id, klass):
    results = []
    img = get_image(image_id)
    # Initiate STAR detector
    orb = cv2.ORB()

    # find the keypoints with ORB
    kp = orb.detect(img, None)

    # compute the descriptors with ORB
    kp, des = orb.compute(img, kp)

    for index, keypoint in enumerate(kp):
        descriptor = des[index]
        vector = kp_des2vector(klass, image_id, kp[index], descriptor)
        results.append(vector)
    return results
예제 #12
0
def process_image(imageName, maskName, resultName):

    img = cv2.imread(imageName, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    if maskName is not None:
        mask = cv2.imread(maskName, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        img[np.where((mask > 128))] = 0

    if not os.path.isfile(resultName + ".key_surf"):

        # SURF
        surf = cv2.SURF()
        k_surf, des_surf = surf.detectAndCompute(img, None)

#        img_surf = cv2.drawKeypoints(img, k_surf)
#        cv2.imwrite(resultName + "_keypoints_SURF.jpeg", img_surf)

        points_surf = pickle_keypoints(k_surf, des_surf)
        saveKeypointsDatabase(points_surf, resultName + ".key_surf")


    if not os.path.isfile(resultName + ".key_sift"):
    
        # SIFT
        sift = cv2.SIFT()
        k_sift, des_sift = sift.detectAndCompute(img, None)

#        img_sift = cv2.drawKeypoints(img, k_sift)
#        cv2.imwrite(resultName + "_keypoints_SIFT.jpeg", img_sift)

        points_sift = pickle_keypoints(k_sift, des_sift)
        saveKeypointsDatabase(points_sift, resultName + ".key_sift")


    if not os.path.isfile(resultName + ".key_orb"):

        # ORB
        orb = cv2.ORB()
        k_orb, des_orb = orb.detectAndCompute(img, None)

#        img_orb = cv2.drawKeypoints(img, k_orb)
#        cv2.imwrite(resultName + "_keypoints_ORB.jpeg", img_orb)

        points_orb = pickle_keypoints(k_orb, des_orb)
        saveKeypointsDatabase(points_orb, resultName + ".key_orb")
예제 #13
0
def esix():
    MIN_MATCH_COUNT = 10

    img1 = cv2.imread('/Users/larken/class/computer-vision/a5/box.png',
                      0)  # queryImage
    img2 = cv2.imread(
        '/Users/larken/class/computer-vision/a5/box_in_scene.png',
        0)  # trainImage
    orb = cv2.ORB()

    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)

    # # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)
예제 #14
0
def training_part1():
    os.chdir("./training")
    orb = cv2.ORB(nfeatures=100, nlevels=4, scaleFactor=1.3)
    # FLANN_INDEX_KDTREE = 1 - OK, FLANN_INDEX_LSH    = 6 - error.
    flann = cv2.FlannBasedMatcher(dict(algorithm=6), searchParams=dict())

    for img in glob.glob("*.jpg"):
        gray = cv2.imread(img, 0)
        key_points, descriptors = orb.detectAndCompute(gray, None)
        flann.add([np.uint8(descriptors)])
        flann.train()

        for kp in key_points:
            distance_center = calc_center(kp, gray)
            key_point = KeyPoint(kp.angle, distance_center, kp.size, kp.pt)
            key_points_math_array.append(key_point)

    return flann
예제 #15
0
def findMatchesBetweenImages(image_1, image_2):
    # return the top 10 list of matches between two input images sorted by distance
    matches = None
    image_1_kp = None
    image_1_desc = None
    image_2_kp = None
    image_2_desc = None
    # SIFT/ORB object to find keypoints/desc to be used for matches
    sift = cv2.SIFT()
    orb = cv2.ORB()
    image_1_kp, image_1_desc = orb.detectAndCompute(image_1, None)
    image_2_kp, image_2_desc = orb.detectAndCompute(image_2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(image_1_desc, image_2_desc)
    matches = sorted(matches, key=lambda x: x.distance)
    matches = matches[:10]

    return image_1_kp, image_2_kp, matches
예제 #16
0
 def featureMatch(self, current, previous):
     orb = cv2.ORB_create()
     orb = cv2.ORB()
     cv2.ocl.setUseOpenCL(False)
     kp1, des1 = orb.detectAndCompute(current, None)
     kp2, des2 = orb.detectAndCompute(previous, None)
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
     matches = bf.match(des1, des2)
     matches = sorted(matches, key=lambda x: x.distance)
     res = cv2.drawMatches(current,
                           kp1,
                           previous,
                           kp2,
                           matches[:],
                           None,
                           flags=2)
     res = cv2.resize(res, (320, 240))
     return (res)
예제 #17
0
    def test_orb(cls):

        img = cv.imread("../lib/images/face.png", 1)
        img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

        # in opencv2.4, and there is something wrong in opencv3.1.0
        orb = cv.ORB()  # cv.ORB_create() in opencv 3.1.0
        kp = orb.detect(img, None)
        kp, des = orb.compute(img_gray, kp)

        cv.drawKeypoints(img,
                         kp,
                         img,
                         color=(0, 255, 0),
                         flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        cv.imshow("img", img)
        cv.waitKey(0)
예제 #18
0
def main():
    try:
        img = cv2.imread('img18.png', cv2.IMREAD_COLOR)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        sift = cv2.ORB()
        kps, descriptors = sift.detectAndCompute(gray, None)

        cv2.drawKeypoints(img, kps, img, (51, 163, 236),
                          cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        cv2.namedWindow('sift_keypoints', cv2.WINDOW_KEEPRATIO)
        cv2.imshow('sift_keypoints', img)
        cv2.waitKey()

    except NameError:
        print("Error found")
        pass
예제 #19
0
def corner(path):
    if path == '0':
        path = 0
    cap = cv2.VideoCapture(path)

    while (cap.isOpened()):
        _, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = np.float32(gray)

        # Harris corner detector scale variant
        #dst = cv2.cornerHarris(gray,3,3,0.03)
        #dst = cv2.dilate(dst,None)
        #frame[dst>0.01*dst.max()]=[0,0,255]

        # Some other guy scale variant
        #corners = cv2.goodFeaturesToTrack(gray,25,0.01,10);
        #for i in corners:
        #	x,y = i.ravel()
        #	cv2.circle(frame,(x,y),3,255,-1)

        #Non maximal suppression with fast detector
        #fast = cv2.FastFeatureDetector()
        #kp = fast.detect(frame,None)
        #frame = cv2.drawKeypoints(frame, kp, color=(255,0,0))

        # CeNsuRe detector with Brief
        #star = cv2.FeatureDetector_create("STAR")
        #brief = cv2.DescriptorExtractor_create("BRIEF")
        #kp = star.detect(frame,None)
        #kp, des = brief.compute(frame, kp)
        #frame = cv2.drawKeypoints(frame, kp, color=(255,0,0))

        #ORB FAST PLUSE BRIEF
        orb = cv2.ORB()
        kp = orb.detect(frame, None)
        kp, des = orb.compute(frame, kp)
        frame = cv2.drawKeypoints(frame, kp, color=(255, 0, 0))

        cv2.imshow('morph', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
예제 #20
0
    def __init__(self, video, shot=None, height=200, min_match=20, lookahead=5,
                 verbose=False):
        """
        Parameters
        ----------
        shot : iterable, optional
            Segment iterator.

        """
        super(Thread, self).__init__()

        self.video = video
        self.height = height

        # estimate new size from video size and target height
        w, h = self.video._size
        self._resize = (int(self.height), int(w * self.height / h))

        self.lookahead = lookahead
        if shot is None:
            shot = Shot(video)
        self.shot = shot

        self.verbose = verbose

        # ORB (non-patented SIFT alternative) extraction
        if OPENCV == 2:
            self._orb = cv2.ORB()
        elif OPENCV == 3:
            self._orb = cv2.ORB_create()

        # # brute-force ORB matching
        # self._bfmatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

        # fast approximate nearest neighbord
        FLANN_INDEX_LSH = 6
        index_params = dict(algorithm=FLANN_INDEX_LSH,
                            table_number=6,
                            key_size=12,
                            multi_probe_level=1)
        search_params = dict(checks=50)
        self._flann = cv2.FlannBasedMatcher(index_params, search_params)

        self.min_match = min_match
def findMatchesBetweenImages(image_1, image_2):
    """ Return the top 10 list of matches between two input images.

  This function detects and computes SIFT (or ORB) from the input images, and
  returns the best matches using the normalized Hamming Distance.

  Args:
    image_1 (numpy.ndarray): The first image (grayscale).
    image_2 (numpy.ndarray): The second image. (grayscale).

  Returns:
    image_1_kp (list): The image_1 keypoints, the elements are of type
                       cv2.KeyPoint.
    image_2_kp (list): The image_2 keypoints, the elements are of type 
                       cv2.KeyPoint.
    matches (list): A list of matches, length 10. Each item in the list is of
                    type cv2.DMatch.

  """
    # matches - type: list of cv2.DMath
    matches = None
    # image_1_kp - type: list of cv2.KeyPoint items.
    image_1_kp = None
    # image_1_desc - type: numpy.ndarray of numpy.uint8 values.
    image_1_desc = None
    # image_2_kp - type: list of cv2.KeyPoint items.
    image_2_kp = None
    # image_2_desc - type: numpy.ndarray of numpy.uint8 values.
    image_2_desc = None

    orb = cv2.ORB()
    image_1_kp = orb.detect(image_1, None)
    image_1_kp, image_1_desc = orb.compute(image_1, image_1_kp)

    image_2_kp = orb.detect(image_2, None)
    image_2_kp, image_2_desc = orb.compute(image_2, image_2_kp)

    bfMatch = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bfMatch.match(image_1_desc, image_2_desc)
    matches = sorted(matches, key=lambda x: x.distance)
    matches = matches[:10]
    image = drawMatches(image_1, image_1_kp, image_2, image_2_kp, matches)

    return image_1_kp, image_2_kp, matches
예제 #22
0
    def calculate(self, resource):
        """ Append descriptors to ORB h5 table """
        (image_url, mask_url, gobject_url) = resource
        if image_url is '':
            raise FeatureExtractionError(resource, 400,
                                         'Image resource is required')
        if mask_url is not '':
            raise FeatureExtractionError(resource, 400,
                                         'Mask resource is not accepted')

        #image_url = BQServer().prepare_url(image_url, remap='display')
        im = image2numpy(image_url, remap='display')
        im = np.uint8(im)

        if gobject_url is '':
            fs = cv2.ORB().detect(im)

        if gobject_url:
            (x, y, size) = gobject2keypoint(gobject_url)
            fs = [cv2.KeyPoint(x, y, size)]  # keypoints

        descriptor_extractor = cv2.DescriptorExtractor_create("ORB")
        (kpts, descriptors) = descriptor_extractor.compute(im, fs)

        if descriptors == None:  #no feature was returned
            raise FeatureExtractionError(resource, 500,
                                         'No feature was calculated')

        x = []
        y = []
        response = []
        size = []
        angle = []
        octave = []

        for k in kpts[:500]:
            x.append(k.pt[0])
            y.append(k.pt[1])
            response.append(k.response)
            size.append(k.size)
            angle.append(k.angle)
            octave.append(k.octave)

        return (descriptors, x, y, response, size, angle, octave)
예제 #23
0
 def test_372(self):
     # 37.2对ORB描述符进行蛮力匹配
     # 在本例中我们
     # 有一个查询图像和一个目标图像。我们要使用特征匹配的方法在目标图像中寻
     # 找查询图像的位置
     img1 = cv2.imread('box.png', 0)  # queryImage
     img2 = cv2.imread('box_in_scene.png', 0)  # trainImage
     # Initiate SIFT detector
     orb = cv2.ORB()
     # find the keypoints and descriptors with SIFT
     kp1, des1 = orb.detectAndCompute(img1, None)
     kp2, des2 = orb.detectAndCompute(img2, None)
     # create BFMatcher object
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
     # Match descriptors.
     matches = bf.match(des1, des2)
     # Sort them in the order of their distance.
     matches = sorted(matches, key=lambda x: x.distance)
     print("")
예제 #24
0
def findMatchesBetweenImages(image_1, image_2, num_matches):
    """
    Return the top list of matches between two input images.

    NOTE: You will not be graded for this function. This function is almost
          identical to the function in Assignment 7 (we just parametrized the
          number of matches). We expect you to use the function you wrote in
          A7 here.

    Args:
    ----------
        image_1 : numpy.ndarray
            The first image (can be a grayscale or color image)

        image_2 : numpy.ndarray
            The second image (can be a grayscale or color image)

        num_matches : int
            The number of desired matches. If there are not enough, return
            as many matches as you can.

    Returns:
    ----------
        image_1_kp : list[cv2.KeyPoint]
            A list of keypoint descriptors in the first image

        image_2_kp : list[cv2.KeyPoint]
            A list of keypoint descriptors in the second image

        matches : list[cv2.DMatch]
            A list of matches between the keypoint descriptor lists of
            length no greater than num_matches
    """
    orb = cv2.ORB()
    kp1, des1 = orb.detectAndCompute(image_1, None)
    kp2, des2 = orb.detectAndCompute(image_2, None)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)

    matches = sorted(matches, key = lambda x:x.distance)

    return kp1, kp2, matches[:num_matches]
예제 #25
0
def orbnav(inDir):
    result = None
    orb = cv2.ORB()
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    img1, kp1, des1 = None, None, None
    for name in os.listdir(inDir):
        print name
        img2 = cv2.imread(os.path.join(inDir, name))
        kp2, des2 = orb.detectAndCompute(img2, None)
        print len(kp2)
        if img1 is not None:
            matches = bf.match(des1, des2)
            matches = sorted(matches, key=lambda x: x.distance)
            gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
            img3 = drawMatches(gray1, kp1, gray2, kp2, matches[:10])
            cv2.imwrite("tmp.jpg", img3)
        img1, kp1, des1 = img2, kp2, des2
예제 #26
0
def orb_keypoint_matching(img1, img2):
    # initialize orb detector
    orb = cv2.ORB()
    # detect and compute function
    # generates list of keypoints
    kp1, des1 = orb.detectAndCompute(img1, None)
    # detect and compute function
    # generates list of keypoints
    kp2, des2 = orb.detectAndCompute(img2, None)
    # create bfmatcher object with defined parameters
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # gen matches with bf matcher
    matches = bf.match(des1, des2)
    # the matches are sorted by their distance
    matches = sorted(matches, key=lambda x: x.distance)
    # draw list of first matches
    img3 = drawMatches(img1, kp1, img2, kp2, matches)
    # return adjacent images with matching lines
    return img3
예제 #27
0
    def __check_change_orb__(self, image, last_image):
        """
        Computes ORB fetures on last saved slide and given image.
        Tries to match features of these images and calculates ratio
        of matched features with all found features in last saved slide

        Args:
            image(numpy.ndarray): Image from which to get features
            last_image(numpy.ndarray): Image which we want to compare

        Returns:
            float: Similararity between last saved image and given image
        """
        orb = cv.ORB()
        kp1, ds1 = orb.detectAndCompute(self.last_image, None)
        kp2, ds2 = orb.detectAndCompute(image, None)
        bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
        matches = bf.match(ds1, ds2)
        return float(len(matches)) / len(kp1)
def image_local_features(image):
    #llegim la imatge:
    #img = cv2.imread(image)
    #Cambiem la mida de la imatge:
    if not image is None:

        #linea que soluciona un bug de opencv a python3
        #cv2.ocl.setUseOpenCL(False)

        # Creem l'objecte ORB que tindrà 200k keypoints. (Perametre que podem modificar per no saturar el programa)
        orb = cv2.ORB(200000)

        # Detectem els keypoints:
        kp_o = orb.detect(image, None)

        # Calculem els descriptors amb els keypoints trobats.
        kp, des = orb.compute(image, kp_o)

        return des
예제 #29
0
    def __init__(self, camera, bridge):
        picamera.array.PiMotionAnalysis.__init__(self, camera)
        self.bridge = bridge
        self.br = tf.TransformBroadcaster()

        self.posepub = rospy.Publisher('/pidrone/picamera/pose',
                                       PoseStamped,
                                       queue_size=1)
        self.first_image_pub = rospy.Publisher("/pidrone/picamera/first_image",
                                               Image,
                                               queue_size=1,
                                               latch=True)

        self.detector = cv2.ORB(nfeatures=NUM_FEATURES,
                                scoreType=cv2.ORB_FAST_SCORE)

        map_grid_kp, map_grid_des = create_map('map.jpg')
        self.estimator = LocalizationParticleFilter(map_grid_kp, map_grid_des)

        # [x, y, z, yaw]
        self.pos = [0, 0, 0, 0]
        self.posemsg = PoseStamped()

        self.angle_x = 0.0
        self.angle_y = 0.0
        # localization does not estimate z
        self.z = 0.0

        self.first_locate = True
        self.locate_position = False

        self.prev_img = None
        self.prev_kp = None
        self.prev_des = None
        self.prev_time = None
        self.prev_rostime = None

        self.map_counter = 0
        self.max_map_counter = 0

        # constant
        self.alpha_yaw = 0.1  # perceived yaw smoothing alpha
        self.hybrid_alpha = 0.3  # blend position with first frame and int
    def __init__(self,
                 topic_name,
                 is_depth=False,
                 skip_frame=1,
                 directory_name='./',
                 bag_file=''):
        rospy.logwarn("INIT IMAGE ORB SERIALIZER")
        super(ImageORBSerializer, self).__init__(topic_name, skip_frame,
                                                 directory_name, bag_file)

        self.file_ext = ".png"
        self.orb_filename_base = self.dir_name + bag_file.split("/")[-1][:-4] + "/" + \
                                 topic_name[1:].replace("/", "_") + "_orb/{:08d}"
        self.orb_file_ext = ""

        self.is_depth = is_depth
        rospy.Subscriber(topic_name, Image, self.callback)

        self.orb_descriptor = cv2.ORB()