Esempio n. 1
0
def get_points(c_img1, c_img2):

    # convert to gray
    img1 = cvtColor(c_img1, COLOR_BGR2GRAY)
    img2 = cvtColor(c_img2, COLOR_BGR2GRAY)
    surf = SURF()  # Initiate SURF detector
    # find the key points and descriptors with SURF
    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)

    my_flan_index_tree = 0
    index_params = dict(algorithm=my_flan_index_tree, trees=6)
    search_params = dict(checks=50)

    my_flan = FlannBasedMatcher(index_params, search_params)
    matches = my_flan.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    pts2 = []
    pts1 = []

    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)
        pts1 = float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        pts2 = float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

    # get color of key points
    lengths = len(pts1) - 1
    color1 = zeros((len(pts1), 1, 3))
    color2 = zeros((len(pts1), 1, 3))
    color = zeros((len(pts1), 1, 3), dtype=int)

    for i in range(1, lengths):
        color1[i] = c_img1[int(pts1[i][0][1]), int(pts1[i][0][0])]
        color2[i] = c_img2[int(pts2[i][0][1]), int(pts2[i][0][0])]
        color[i] = (color1[i] + color2[i]) / 2  # avg of colors

    # convert the 2D features into homogeneous coordinates into array of 3x51 dimension
    pt1 = pts1.reshape((pts1.shape[0], 2)).T
    pt1 = vstack((pt1, ones(pt1.shape[1])))

    pt2 = pts2.reshape((pts2.shape[0], 2)).T
    pt2 = vstack((pt2, ones(pt2.shape[1])))

    return pt1, pt2, color
Esempio n. 2
0
def fit_cv2(data, algorithm):
    logger.info('Fitting cv2 FLANN...')
    from cv2 import FlannBasedMatcher
    KDTREE = 0
    index_params = {
        'algorithm': KDTREE,
        'trees': 5,
        #'target_precision': 0.9,
        #'build_weight': 0.01,
        #'memory_weight': 0,
        #'sample_fraction': 0.1,
    }
    search_params = {'checks': 5}
    flann = FlannBasedMatcher(index_params, search_params)
    flann.add(np.float32(data))
    flann.train()
    return flann
Esempio n. 3
0
def match_descriptors(descriptors1,
                      descriptors2,
                      matcher='flann',
                      max_ratio=0.8):
    """

    Args:
        descriptors1:
        descriptors2:
        matcher:
        max_ratio:

    Returns:

    [1] https://stackoverflow.com/questions/30716610/how-to-get-pixel-coordinates-from-feature-matching-in-opencv-python
    """

    if matcher is 'flann':
        # FLANN parameters
        FLANN_INDEX_KDTREE = 0

        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary

        flann = FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(descriptors1, descriptors2, k=2)

    # Need to draw only good matches, so create a mask using ratio test as per Lowe's paper.
    good_matches = []
    for i, (m, n) in enumerate(matches):
        if m.distance < max_ratio * n.distance:
            good_matches.append([n.queryIdx, m.trainIdx
                                 ])  # Keep indexes of matched keypoints
    good_matches = array(good_matches)

    return good_matches
Esempio n. 4
0
def fit_cv2(data, algorithm):
    logger.info('Fitting cv2 FLANN...')
    from cv2 import FlannBasedMatcher
    KDTREE = 0
    index_params = {
        'algorithm': KDTREE,
        'trees': 5,
        #'target_precision': 0.9,
        #'build_weight': 0.01,
        #'memory_weight': 0,
        #'sample_fraction': 0.1,
    }
    search_params = {'checks': 5}
    flann = FlannBasedMatcher(index_params, search_params)
    flann.add(np.float32(data))
    flann.train()
    return flann
Esempio n. 5
0
 def __init__(self):
     # Initiate ORB detectors
     self.orb = cv2.ORB_create()
     # If we want to filter ourselves the maches from bfmatches, crosscheck =
     # false
     self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
     self.Flann_index_lsh = 6
     self.index_params = dict(algorithm=self.Flann_index_lsh, table_number=6, key_size=12, multi_probe_level=1)
     self.search_params = dict(checks=50)
     self.flann_matcher = FlannBasedMatcher(self.index_params, self.search_params)
     self.kp1 = KeyPoint()
     self.kp2 = KeyPoint()
     self.desc1 = None
     self.desc2 = None
     self.matches = None
     self.ratio = 0.65
     self.matches1 = None
     self.matches2 = None
     self.good_matches = None
     self.good_kp1 = []
     self.good_kp2 = []
     self.n_matches = 0
     # The following variables are used to work with numpy functions
     self.global_matches = []  # Numpy array
     self.global_kpts1 = []
     self.global_kpts2 = []
     # Create lists where we store the keypoints in their original format for
     # future uses. Also, store the descriptors
     self.curr_kp = []  # List of keypoints
     self.prev_kp = []
     self.curr_dsc = []  # List of descriptors
     self.prev_dsc = []
     self.is_minkp = None
     self.is_minmatches = None
     self.lk_params = dict(
         winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
     )
Esempio n. 6
0
    def algorithm_SURF(self,
                       photo,
                       screen,
                       screen_colored,
                       hessianThreshold=3500,
                       descMatcher=1):

        t1 = time.perf_counter()

        # Init algorithm
        surf = SURF_create(hessianThreshold)
        surf.setUpright(True)

        t2 = time.perf_counter()

        self.writeLog('Created SURF object - {}ms'.format(
            self.formatTimeDiff(t1, t2)))

        # Detect and compute
        kp_photo, des_photo = surf.detectAndCompute(photo, None)
        kp_screen, des_screen = surf.detectAndCompute(screen, None)

        t3 = time.perf_counter()
        self.writeLog('Detected keypoints - {}ms'.format(
            self.formatTimeDiff(t2, t3)))

        # Descriptor Matcher
        try:
            index_params = dict(algorithm=descMatcher, trees=5)
            search_params = dict(checks=50)
            flann = FlannBasedMatcher(index_params, search_params)
        except:
            return False

        t4 = time.perf_counter()
        self.writeLog('Initialized Flann Matcher - {}ms'.format(
            self.formatTimeDiff(t3, t4)))

        # Calc knn Matches
        try:
            matches = flann.knnMatch(des_photo, des_screen, k=2)
        except:
            return False

        logging.info('knn {}'.format(len(matches)))
        t5 = time.perf_counter()
        self.writeLog('Calced knn matches - {}ms'.format(
            self.formatTimeDiff(t4, t5)))

        if not matches or len(matches) == 0:
            return False

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        logging.info('good {}'.format(len(good)))
        t6 = time.perf_counter()
        self.writeLog('Filtered good matches - {}ms'.format(
            self.formatTimeDiff(t5, t6)))

        if not good or len(good) < 10:
            return False

        photo_pts = np.float32([kp_photo[m.queryIdx].pt
                                for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        screen_pts = np.float32([kp_screen[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args

        M, _ = findHomography(photo_pts, screen_pts, RANSAC, 5.0)

        t7 = time.perf_counter()
        self.writeLog('Found Homography - {}ms'.format(
            self.formatTimeDiff(t6, t7)))

        if M is None or not M.any() or len(M) == 0:
            return False

        h, w = photo.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        dst = perspectiveTransform(pts, M)

        t8 = time.perf_counter()
        self.writeLog('Perspective Transform - {}ms'.format(
            self.formatTimeDiff(t7, t8)))

        minX = dst[0][0][0]
        minY = dst[0][0][1]
        maxX = dst[0][0][0]
        maxY = dst[0][0][1]

        for i in range(4):
            if dst[i][0][0] < minX:
                minX = dst[i][0][0]
            if dst[i][0][0] > maxX:
                maxX = dst[i][0][0]
            if dst[i][0][1] < minY:
                minY = dst[i][0][1]
            if dst[i][0][1] > maxY:
                maxY = dst[i][0][1]

        minX = int(minX)
        minY = int(minY)
        maxX = int(maxX)
        maxY = int(maxY)

        if minX < 0:
            minX = 0
        if minY < 0:
            minY = 0

        logging.info('minY {}'.format(int(minY)))
        logging.info('minX {}'.format(int(minX)))
        logging.info('maxY {}'.format(int(maxY)))
        logging.info('maxX {}'.format(int(maxX)))

        if maxX - minX <= 0:
            return False
        if maxY - minY <= 0:
            return False

        imwrite(self.match_dir + '/result.png', screen_colored[minY:maxY,
                                                               minX:maxX])

        t9 = time.perf_counter()
        self.writeLog('Wrote Image - {}ms'.format(self.formatTimeDiff(t8, t9)))

        return True
Esempio n. 7
0
    def getmarker(self, image: object):

       def findmacth(image_file):
            print('\nprocessing', str(image_file))
            start = timer()

            desc_1 = get_points(image_file, "", size, brisk)

            if desc_1 is not None:

                titles = []
                similarity = []
                titles_append = titles.append
                similarity_append = similarity.append

                for title, desc_2, len_desc_2 in root:

                    good_points = 0
                    matches = flann.knnMatch(desc_1, desc_2, k=2)

                    for m_n in matches:
                        if len(m_n) != 2:
                            continue
                        elif m_n[0].distance < self.threshold * m_n[1].distance:
                            good_points += 1

                    percentage_similarity = good_points / len_desc_2 * 100

                    if percentage_similarity > 2:
                        titles_append(title)
                        similarity_append(percentage_similarity)

                if similarity:
                    idx = argmax(similarity)
                    # for idx1, t in enumerate(titles):
                    print("Info: " + str(titles[idx]))
                    # print("percentage_similarity: {0}".format(str(similarity[idx])))

                    end = timer()
                    print('find_match_time: ', (end - start))

                    if not self.isfolder:
                        return str(titles[idx])
                else:
                    end = timer()
                    print("{0} no similarity".format(str(self.image)))
                    print('find_match_time: ', (end - start))
            else:
                print("\n{0} has not points".format(str(image)))

            if not self.isfolder:
                return None

       if not os.path.exists(self.descriptions):
           print("\nDid not find file {}".format(self.descriptions))
           return None

       with open(self.descriptions, 'rb') as handle:
           root = load(handle)

       flann = FlannBasedMatcher(self.index_params, {})

       if self.iscover:
            thresh, octaves, size, ext_of_files = get_parameters("covers")
       else:
            thresh, octaves, size, ext_of_files = get_parameters(self.current_book)

       if not thresh:
           return None

       brisk = BRISK_create(thresh, octaves)  # norm = cv.NORM_HAMMING (70,2) 30days

       if self.isfolder:

            files = os.listdir(self.image)
            for f in files:
                if f.endswith(ext_of_files):
                    findmacth(str(image) + "/" + f)
            # cv_file.release()
       else:
            cur_book = findmacth(image)
            # cv_file.release()
            return cur_book

       return None
Esempio n. 8
0
    return error
    
if __name__ == "__main__":
    img1 = imread('rect_left.jpeg') 
    img2 = imread('rect_right.jpeg')

    # find the keypoints and descriptors with SIFT
    sift = xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)

    # FLANN parameters for points match
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)
    flann = FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    good = []
    pts1 = []
    pts2 = []
    dis_ratio = []
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.3*n.distance:
            good.append(m)
            dis_ratio.append(m.distance/n.distance)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    min_idx = np.argmin(dis_ratio) 
    
    # calculate fundamental matrix and check error
    fundMat = rectify(pts1, pts2)
Esempio n. 9
0
class Matcher(object):
    def __init__(self):
        # Initiate ORB detectors
        self.orb = cv2.ORB_create()
        # If we want to filter ourselves the maches from bfmatches, crosscheck =
        # false
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
        self.Flann_index_lsh = 6
        self.index_params = dict(algorithm=self.Flann_index_lsh, table_number=6, key_size=12, multi_probe_level=1)
        self.search_params = dict(checks=50)
        self.flann_matcher = FlannBasedMatcher(self.index_params, self.search_params)
        self.kp1 = KeyPoint()
        self.kp2 = KeyPoint()
        self.desc1 = None
        self.desc2 = None
        self.matches = None
        self.ratio = 0.65
        self.matches1 = None
        self.matches2 = None
        self.good_matches = None
        self.good_kp1 = []
        self.good_kp2 = []
        self.n_matches = 0
        # The following variables are used to work with numpy functions
        self.global_matches = []  # Numpy array
        self.global_kpts1 = []
        self.global_kpts2 = []
        # Create lists where we store the keypoints in their original format for
        # future uses. Also, store the descriptors
        self.curr_kp = []  # List of keypoints
        self.prev_kp = []
        self.curr_dsc = []  # List of descriptors
        self.prev_dsc = []
        self.is_minkp = None
        self.is_minmatches = None
        self.lk_params = dict(
            winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
        )

    def match(self, img_new, img_prev):
        # Compute the matches for the two images using the Brute Force matcher
        # @param img_new: The current image
        # @param img_prev: The reference image
        # Initialize control variables
        self.is_minkp = None
        self.is_minmatches = None
        self.good_kp1 = []
        self.good_kp2 = []
        self.good_matches = None
        self.kp1, self.desc1 = self.orb.detectAndCompute(img_prev, None)
        self.kp2, self.desc2 = self.orb.detectAndCompute(img_new, None)

        # Check descriptors

        # There are any keypoints?
        if not self.kp1 or not self.kp2:
            self.is_minkp = False
            print "The list of keypoints is emtpy"
        else:
            print "The list of keypoints is NOT empty"
            if np.size(self.desc1) == 0 or np.size(self.desc2) == 0:
                print "NO DESCRIPTORS"
                self.is_minkp = False
            else:
                self.is_minkp = True

        # Store the keypoints
        # print "len original", len(self.kp1)
        # for i in range(len(self.kp1)):
        #     self.curr_kp.append(self.kp1[i])
        # for i in range(len(self.kp2)):
        #     self.prev_kp.append(self.kp2[i])

        if self.is_minkp:
            print " Matching..."
            print self.is_minkp
            self.matches1 = self.bf.knnMatch(self.desc1, self.desc2, 2)
            self.matches2 = self.bf.knnMatch(self.desc2, self.desc1, 2)

            # Check now if there are any matches
            if self.matches1 is None or self.matches2 is None:
                print "There are keypoints, but no one matches"
            else:
                self.is_minmatches = True
                self.good_matches = self.filter_matches(self.matches1, self.matches2)
        # self.matches = sorted(self.matches, key=lambda x: x.distance)

    def filter_distance(self, matches):
        # Filter the matches based on a distance threshold
        # @param matches: List of matches (matcher objects)
        # pdb.set_trace()
        # Clear matches for wich NearestNeighbor (NN) ratio is > than threshold
        dist = []
        sel_matches = []
        thres_dist = 0
        temp_matches = []
        # print "Entering in the filter_distance function..."

        for i in range(0, len(matches) - 1):
            # We keep only those match objects with two matches:
            if (len(matches[i])) == 2:
                # print "There is at least one match with 2 candidate points"
                # If there are two matches:
                for j in range(0, 2):
                    dist.append(matches[i][j].distance)
                    temp_matches.append(matches[i])

        # Now, calculate the threshold:
        if dist:
            thres_dist = (sum(dist) / len(dist)) * self.ratio
        else:
            return

        # Keep only reasonable matches based on the threshold distance:
        for i in range(0, len(temp_matches)):
            if (temp_matches[i][0].distance / temp_matches[i][1].distance) < thres_dist:

                sel_matches.append(temp_matches[i])
        print "matches after distance", len(sel_matches)

        return sel_matches

    def filter_asymmetric(self, matches1, matches2):
        # Filter the matches with the symetrical test.
        # @param matches1: First list of matches
        # @param matches2: Second list of matches
        # @return sel_matches: filtered matches
        # Keep only symmetric matches
        sel_matches = []
        # For every match in the forward direction, we remove those that aren't
        # found in the other direction
        for match1 in matches1:
            for match2 in matches2:
                # If matches are symmetrical:
                if (match1[0].queryIdx) == (match2[0].trainIdx) and (match2[0].queryIdx) == (match1[0].trainIdx):
                    # We keep only symmetric matches and store the keypoints
                    # of this matches
                    sel_matches.append(match1)
                    self.good_kp2.append(self.kp2[match1[0].trainIdx].pt)
                    self.good_kp1.append(self.kp1[match1[0].queryIdx].pt)
                    # Store also the keypoints in original form
                    self.curr_kp.append(self.kp1[match1[0].queryIdx])
                    self.prev_kp.append(self.kp2[match1[0].trainIdx])
                    self.curr_dsc.append(self.desc1[match1[0].queryIdx])
                    self.prev_dsc.append(self.desc2[match1[0].trainIdx])
                    break

        # We have stored twice every keypoint. Remove them

        self.good_kp1 = self.good_kp1[::2]
        self.good_kp2 = self.good_kp2[::2]
        sel_matches = sel_matches[::2]
        print "matches after simmetric filter", len(sel_matches)

        return sel_matches

    def filter_matches(self, matches1, matches2):
        # This function filter two list of matches based on the distance
        # threshold and the symmetric test
        # @param matches1: First list of matches
        # @param matches2: Second list of matches
        # @return good_matches: List of filtered matches

        matches1 = self.filter_distance(matches1)
        if matches1:
            print (
                "Matches1 after distance filter:\
                    {}".format(
                    len(matches1)
                )
            )
        matches2 = self.filter_distance(matches2)
        if matches2:
            print (
                "Matches2 after distance filter:\
                     {}".format(
                    len(matches2)
                )
            )
        print "Ended distance filtering"
        if not matches1 or not matches2:
            print "Not matches1 or not matches2"
            self.is_minmatches = False
        else:
            self.is_minmatches = True
        print self.is_minmatches
        if self.is_minmatches:
            print "Go to filter asymmetric matches..."
            good_matches = self.filter_asymmetric(matches1, matches2)
            return good_matches
        else:
            return None

    def match_flann(self, img_new, img_prev):
        # Compute matches for the two images based on Flann.
        # @param img_new: Current frame
        # @param img_prev: Reference frame
        # First, keypoints and descriptors for both images
        self.kp1, self.desc1 = self.orb.detectAndCompute(img_new, None)
        self.kp2, self.desc2 = self.orb.detectAndCompute(img_prev, None)
        # Next, match:
        print "kp1", len(self.kp1)
        print "kp2", len(self.kp2)
        if self.kp1 and self.kp2:
            matches1 = self.flann_matcher.knnMatch(self.desc1, self.desc2, k=2)
            print "matches1", len(matches1)

            matches2 = self.flann_matcher.knnMatch(self.desc2, self.desc1, k=2)
            print "matches2", len(matches2)

            self.good_matches = self.filter_matches(matches1, matches2)
        else:
            print "No matches found"

    def draw_matches(self, img, matches):
        # Draw matches in the last image
        # @param img: image
        # @param matches: a matcher object (opencv)
        # @param kp1: keypoints of the old frame
        # @param kp2: keypoints of the new frame
        # @return img: image with lines between correlated points
        for i in range(len(matches)):
            idtrain = matches[i][0].trainIdx
            idquery = matches[i][0].queryIdx

            point_train = self.kp2[idtrain].pt
            point_query = self.kp1[idquery].pt

            point_train = self.transform_float_int_tuple(point_train)
            point_query = self.transform_float_int_tuple(point_query)

            cv2.line(img, ((point_train[0]), (point_train[1])), ((point_query[0]), (point_query[1])), (255, 0, 0))

        return img

    def draw_matches_np(self, img, kpts_c, kpts_p):
        # Draw the matches in the image img, taking as input a numpy array
        # @param img: image
        # @param kpts_c: keypoints in the current image (numpy ndarray)
        # @param kpts_p: keypoints in the previous image (numpy ndarray)
        # @return img: image with lines between correlated points

        for i in range(len(kpts_c)):

            cv2.line(img, ((kpts_c[i][0]), (kpts_c[i][1])), ((kpts_p[i][0]), (kpts_p[i][1])), (255, 0, 0))

        return img

    def draw_outliers_np(self, img, kpts_c, kpts_p):
        # Draw the matches in the image img, taking as input a numpy array
        # @param img: image
        # @param kpts_c: keypoints in the current image (numpy ndarray)
        # @param kpts_p: keypoints in the previous image (numpy ndarray)
        # @return img: image with lines between correlated points

        for i in range(len(kpts_c)):

            cv2.line(img, ((kpts_c[i][0]), (kpts_c[i][1])), ((kpts_p[i][0]), (kpts_p[i][1])), (0, 0, 255))

        return img

    def transform_float_int_tuple(self, input_tuple):
        output_tuple = [0, 0]
        if not input_tuple is None:
            for i in range(0, len(input_tuple)):
                output_tuple[i] = int(input_tuple[i])
        else:
            return input_tuple

        return output_tuple

    def append_matches(self):
        # Store all matches in one list

        for i in range(len(self.good_matches)):

            self.global_matches.append(self.good_matches[i])

    def append_keypoints1(self):
        # Store keypoints from the current image in one list

        for i in range(len(self.good_kp1)):

            self.global_kpts1.append(self.good_kp1[i])

    def append_keypoints2(self):
        # Store keypoints from the current image in one list

        for i in range(len(self.good_kp2)):

            self.global_kpts2.append(self.good_kp2[i])

    def append_global(self):

        self.append_keypoints1()
        self.append_keypoints2()
        self.append_matches()

    def sum_coord(self, x_ini, y_ini):
        # In order to get the true coordinates of the keypoits distributed along
        # over the entire image, not the coordinates refered to the roi's, we
        # have to sum the start vector to every keypoint

        for i in range(len(self.good_kp1)):

            self.good_kp1[i] = list(self.good_kp1[i])

            self.good_kp2[i] = list(self.good_kp2[i])

            self.good_kp1[i][0] += x_ini

            self.good_kp1[i][1] += y_ini

            self.good_kp2[i][0] += x_ini

            self.good_kp2[i][1] += y_ini

            self.good_kp1[i] = np.array([self.good_kp1[i][0], self.good_kp1[i][1]]).reshape(2)

            self.good_kp2[i] = np.array([self.good_kp2[i][0], self.good_kp2[i][1]]).reshape(2)

    def lktracker(self, img_prev, img_curr, prev_points):
        # Tracks the prev_points in the current image.
        # @param img_prev: image, the previous image
        # @param img_curr: the current image
        # @param prev_points: vector of points in the  previous image
        list_tracks = []
        curr_points, st, err = cv2.calcOpticalFlowPyrLK(img_prev, img_curr, prev_points, None, **self.lk_params)
        print "LK current points", len(curr_points)

        prev_points2, st, err = cv2.calcOpticalFlowPyrLK(img_curr, img_prev, curr_points, None, **self.lk_params)
        print "LK prev poiints", len(prev_points)
        d = abs(prev_points - prev_points2).reshape(-1, 2).max(-1)
        print "d", d
        good = d < 2
        # print "good", good
        for (x, y), good_flag in zip(curr_points.reshape(-1, 2), good):
            if not good_flag:
                continue
            list_tracks.append((x, y))

        return good, prev_points2, list_tracks