Exemplo n.º 1
0
def lktrack(img1,
            img2,
            ptsI,
            nPtsI,
            winsize_ncc=10,
            win_size_lk=4,
            method=cv.CV_TM_CCOEFF_NORMED):
    """
    **SUMMARY**
    (Dev Zone)
    Lucas-Kanede Tracker with pyramids
    
    **PARAMETERS**
    
    img1 - Previous image or image containing the known bounding box (Numpy array)
    img2 - Current image
    ptsI - Points to track from the first image
           Format ptsI[0] - x1, ptsI[1] - y1, ptsI[2] - x2, ..
    nPtsI - Number of points to track from the first image
    winsize_ncc - size of the search window at each pyramid level in LK tracker (in int)
    method - Paramete specifying the comparison method for normalized cross correlation 
             (see http://opencv.itseez.com/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#cv2.matchTemplate)
    
    **RETURNS**
    
    fb - forward-backward confidence value. (corresponds to euclidean distance between).
    ncc - normCrossCorrelation values
    status - Indicates positive tracks. 1 = PosTrack 0 = NegTrack
    ptsJ - Calculated Points of second image
    
    """
    template_pt = []
    target_pt = []
    fb_pt = []
    ptsJ = [-1] * len(ptsI)

    for i in range(nPtsI):
        template_pt.append((ptsI[2 * i], ptsI[2 * i + 1]))
        target_pt.append((ptsI[2 * i], ptsI[2 * i + 1]))
        fb_pt.append((ptsI[2 * i], ptsI[2 * i + 1]))

    template_pt = np.asarray(template_pt, dtype="float32")
    target_pt = np.asarray(target_pt, dtype="float32")
    fb_pt = np.asarray(fb_pt, dtype="float32")

    target_pt, status, track_error = cv2.calcOpticalFlowPyrLK(
        img1,
        img2,
        template_pt,
        target_pt,
        winSize=(win_size_lk, win_size_lk),
        flags=cv.OPTFLOW_USE_INITIAL_FLOW,
        criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))

    fb_pt, status_bt, track_error_bt = cv2.calcOpticalFlowPyrLK(
        img2,
        img1,
        target_pt,
        fb_pt,
        winSize=(win_size_lk, win_size_lk),
        flags=cv.OPTFLOW_USE_INITIAL_FLOW,
        criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))

    status = status & status_bt
    ncc = normCrossCorrelation(img1, img2, template_pt, target_pt, status,
                               winsize_ncc, method)
    fb = euclideanDistance(template_pt, target_pt)

    newfb = -1 * np.ones(len(fb))
    newncc = -1 * np.ones(len(ncc))
    for i in np.argwhere(status):
        i = i[0]
        ptsJ[2 * i] = target_pt[i][0]
        ptsJ[2 * i + 1] = target_pt[i][1]
        newfb[i] = fb[i]
        newncc[i] = ncc[i]

    return newfb, newncc, status, ptsJ
Exemplo n.º 2
0
    def __init__(self, img, new_pts, detector, descriptor, templateImg, skp, sd, tkp, td):
        """
        **SUMMARY**

        Initializes all the required parameters and attributes of the class.

        **PARAMETERS**

        * *img* - SimpleCV.Image
        * *new_pts* - List of all the tracking points found in the image. - list of cv2.KeyPoint
        * *detector* - SURF detector - cv2.FeatureDetector
        * *descriptor* - SURF descriptor - cv2.DescriptorExtractor
        * *templateImg* - Template Image (First image) - SimpleCV.Image
        * *skp* - image keypoints - list of cv2.KeyPoint
        * *sd* - image descriptor - numpy.ndarray
        * *tkp* - Template Imaeg keypoints - list of cv2.KeyPoint
        * *td* - Template image descriptor - numpy.ndarray

        **RETURNS**

        SimpleCV.Tracking.TrackClass.SURFTrack object

        **EXAMPLE**
        >>> track = SURFTracker(image, pts, detector, descriptor, temp, skp, sd, tkp, td)
        """
        if td is None:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            return
        if len(new_pts) < 1:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            self.pts = None
            self.templateImg = templateImg
            self.skp = skp
            self.sd = sd
            self.tkp = tkp
            self.td = td
            self.detector = detector
            self.descriptor = descriptor
            return
        if sd is None:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            self.pts = None
            self.templateImg = templateImg
            self.skp = skp
            self.sd = sd
            self.tkp = tkp
            self.td = td
            self.detector = detector
            self.descriptor = descriptor
            return

        np_pts = np.asarray([kp.pt for kp in new_pts])
        t, pts, center = cv2.kmeans(np.asarray(np_pts, dtype=np.float32), K=1, bestLabels=None,
                            criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=1,
                            flags=cv2.KMEANS_RANDOM_CENTERS)
        max_x = int(max(np_pts[:, 0]))
        min_x = int(min(np_pts[:, 0]))
        max_y = int(max(np_pts[:, 1]))
        min_y = int(min(np_pts[:, 1]))

        bb =  (min_x-5, min_y-5, max_x-min_x+5, max_y-min_y+5)

        self = Track.__init__(self, img, bb)
        self.templateImg = templateImg
        self.skp = skp
        self.sd = sd
        self.tkp = tkp
        self.td = td
        self.pts = np_pts
        self.detector = detector
        self.descriptor = descriptor
Exemplo n.º 3
0
def lktrack(img1, img2, ptsI, nPtsI, winsize_ncc=10, win_size_lk=4, method=cv2.cv.CV_TM_CCOEFF_NORMED):
    """
    **SUMMARY**
    (Dev Zone)
    Lucas-Kanede Tracker with pyramids
    
    **PARAMETERS**
    
    img1 - Previous image or image containing the known bounding box (Numpy array)
    img2 - Current image
    ptsI - Points to track from the first image
           Format ptsI[0] - x1, ptsI[1] - y1, ptsI[2] - x2, ..
    nPtsI - Number of points to track from the first image
    winsize_ncc - size of the search window at each pyramid level in LK tracker (in int)
    method - Paramete specifying the comparison method for normalized cross correlation 
             (see http://opencv.itseez.com/modules/imgproc/doc/object_detection.html?highlight=matchtemplate#cv2.matchTemplate)
    
    **RETURNS**
    
    fb - forward-backward confidence value. (corresponds to euclidean distance between).
    ncc - normCrossCorrelation values
    status - Indicates positive tracks. 1 = PosTrack 0 = NegTrack
    ptsJ - Calculated Points of second image
    
    """ 
    template_pt = []
    target_pt = []
    fb_pt = []
    ptsJ = [-1]*len(ptsI)
    
    for i in range(nPtsI):
        template_pt.append((ptsI[2*i],ptsI[2*i+1]))
        target_pt.append((ptsI[2*i],ptsI[2*i+1]))
        fb_pt.append((ptsI[2*i],ptsI[2*i+1]))
    
    template_pt = np.asarray(template_pt,dtype="float32")
    target_pt = np.asarray(target_pt,dtype="float32")
    fb_pt = np.asarray(fb_pt,dtype="float32")
    
    target_pt, status, track_error = cv2.calcOpticalFlowPyrLK(img1, img2, template_pt, target_pt, 
                                     winSize=(win_size_lk, win_size_lk), flags = cv2.OPTFLOW_USE_INITIAL_FLOW,
                                     criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
                                     
    fb_pt, status_bt, track_error_bt = cv2.calcOpticalFlowPyrLK(img2,img1, target_pt,fb_pt, 
                                       winSize = (win_size_lk,win_size_lk),flags = cv2.OPTFLOW_USE_INITIAL_FLOW,
                                       criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    
    status = status & status_bt
    ncc = normCrossCorrelation(img1, img2, template_pt, target_pt, status, winsize_ncc, method)
    fb = euclideanDistance(template_pt, target_pt)
    
    newfb = -1*np.ones(len(fb))
    newncc = -1*np.ones(len(ncc))
    for i in np.argwhere(status):
        i = i[0]
        ptsJ[2 * i] = target_pt[i][0]
        ptsJ[2 * i + 1] = target_pt[i][1]
        newfb[i] = fb[i]
        newncc[i] = ncc[i]

    return newfb, newncc, status, ptsJ
Exemplo n.º 4
0
def surfTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**
    
    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using SURF keypoints.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    eps_val     - eps for DBSCAN
                  The maximum distance between two samples for them 
                  to be considered as in the same neighborhood. 
                
    min_samples - min number of samples in DBSCAN
                  The number of samples in a neighborhood for a point 
                  to be considered as a core point. 
                  
    distance    - thresholding KNN distance of each feature
                  if KNN distance > distance, point is discarded.

    **RETURNS**

    SimpleCV.Features.Tracking.SURFTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    SURF based Tracker:
    Matches keypoints from the template image and the current frame.
    flann based matcher is used to match the keypoints.
    Density based clustering is used classify points as in-region (of bounding box)
    and out-region points. Using in-region points, new bounding box is predicted using
    k-means.
    """
    eps_val = 0.69
    min_samples = 5
    distance = 100

    for key in kwargs:
        if key == 'eps_val':
            eps_val = kwargs[key]
        elif key == 'min_samples':
            min_samples = kwargs[key]
        elif key == 'dist':
            distance = kwargs[key]

    from scipy.spatial import distance as Dis
    from sklearn.cluster import DBSCAN

    if len(ts) == 0:
        # Get template keypoints
        bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
        templateImg = img
        detector = cv2.FeatureDetector_create("SURF")
        descriptor = cv2.DescriptorExtractor_create("SURF")

        templateImg_cv2 = templateImg.getNumpyCv2()[bb[1]:bb[1] + bb[3],
                                                    bb[0]:bb[0] + bb[2]]
        tkp = detector.detect(templateImg_cv2)
        tkp, td = descriptor.compute(templateImg_cv2, tkp)

    else:
        templateImg = ts[-1].templateImg
        tkp = ts[-1].tkp
        td = ts[-1].td
        detector = ts[-1].detector
        descriptor = ts[-1].descriptor

    newimg = img.getNumpyCv2()

    # Get image keypoints
    skp = detector.detect(newimg)
    skp, sd = descriptor.compute(newimg, skp)

    if td is None:
        print("Descriptors are Empty")
        return None

    if sd is None:
        track = SURFTracker(img, skp, detector, descriptor, templateImg, skp,
                            sd, tkp, td)
        return track

    # flann based matcher
    flann_params = dict(algorithm=1, trees=4)
    flann = cv2.flann_Index(sd, flann_params)
    idx, dist = flann.knnSearch(td, 1, params={})
    del flann

    # filter points using distnace criteria
    dist = (dist[:, 0] / 2500.0).reshape(-1, ).tolist()
    idx = idx.reshape(-1).tolist()
    indices = sorted(list(range(len(dist))), key=lambda i: dist[i])

    dist = [dist[i] for i in indices]
    idx = [idx[i] for i in indices]
    skp_final = []
    skp_final_labelled = []
    data_cluster = []

    for i, dis in zip(idx, dist):
        if dis < distance:
            skp_final.append(skp[i])
            data_cluster.append((skp[i].pt[0], skp[i].pt[1]))

    #Use Denstiy based clustering to further fitler out keypoints
    n_data = np.asarray(data_cluster)
    D = Dis.squareform(Dis.pdist(n_data))
    S = 1 - (D / np.max(D))

    db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S)
    core_samples = db.core_sample_indices_
    labels = db.labels_
    for label, i in zip(labels, list(range(len(labels)))):
        if label == 0:
            skp_final_labelled.append(skp_final[i])

    track = SURFTrack(img, skp_final_labelled, detector, descriptor,
                      templateImg, skp, sd, tkp, td)

    return track
Exemplo n.º 5
0
def surfTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**
    
    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using SURF keypoints.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    eps_val     - eps for DBSCAN
                  The maximum distance between two samples for them 
                  to be considered as in the same neighborhood. 
                
    min_samples - min number of samples in DBSCAN
                  The number of samples in a neighborhood for a point 
                  to be considered as a core point. 
                  
    distance    - thresholding KNN distance of each feature
                  if KNN distance > distance, point is discarded.

    **RETURNS**

    SimpleCV.Features.Tracking.SURFTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    SURF based Tracker:
    Matches keypoints from the template image and the current frame.
    flann based matcher is used to match the keypoints.
    Density based clustering is used classify points as in-region (of bounding box)
    and out-region points. Using in-region points, new bounding box is predicted using
    k-means.
    """
    eps_val = 0.69
    min_samples = 5
    distance = 100

    for key in kwargs:
        if key == 'eps_val':
            eps_val = kwargs[key]
        elif key == 'min_samples':
            min_samples = kwargs[key]
        elif key == 'dist':
            distance = kwargs[key]

    from scipy.spatial import distance as Dis
    from sklearn.cluster import DBSCAN

    if len(ts) == 0:
        # Get template keypoints
        bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
        templateImg = img
        detector = cv2.FeatureDetector_create("SURF")
        descriptor = cv2.DescriptorExtractor_create("SURF")

        templateImg_cv2 = templateImg.getNumpyCv2()[bb[1]:bb[1]+bb[3], bb[0]:bb[0]+bb[2]]
        tkp = detector.detect(templateImg_cv2)
        tkp, td = descriptor.compute(templateImg_cv2, tkp)

    else:
        templateImg = ts[-1].templateImg
        tkp = ts[-1].tkp
        td = ts[-1].td
        detector = ts[-1].detector
        descriptor = ts[-1].descriptor

    newimg = img.getNumpyCv2()

    # Get image keypoints
    skp = detector.detect(newimg)
    skp, sd = descriptor.compute(newimg, skp)

    if td is None:
        print "Descriptors are Empty"
        return None

    if sd is None:
        track = SURFTracker(img, skp, detector, descriptor, templateImg, skp, sd, tkp, td)
        return track

    # flann based matcher
    flann_params = dict(algorithm=1, trees=4)
    flann = cv2.flann_Index(sd, flann_params)
    idx, dist = flann.knnSearch(td, 1, params={})
    del flann

    # filter points using distnace criteria
    dist = (dist[:,0]/2500.0).reshape(-1,).tolist()
    idx = idx.reshape(-1).tolist()
    indices = sorted(range(len(dist)), key=lambda i: dist[i])

    dist = [dist[i] for i in indices]
    idx = [idx[i] for i in indices]
    skp_final = []
    skp_final_labelled=[]
    data_cluster=[]
    
    for i, dis in itertools.izip(idx, dist):
        if dis < distance:
            skp_final.append(skp[i])
            data_cluster.append((skp[i].pt[0], skp[i].pt[1]))

    #Use Denstiy based clustering to further fitler out keypoints
    n_data = np.asarray(data_cluster)
    D = Dis.squareform(Dis.pdist(n_data))
    S = 1 - (D/np.max(D))
    
    db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S)
    core_samples = db.core_sample_indices_
    labels = db.labels_
    for label, i in zip(labels, range(len(labels))):
        if label==0:
            skp_final_labelled.append(skp_final[i])

    track = SURFTrack(img, skp_final_labelled, detector, descriptor, templateImg, skp, sd, tkp, td)

    return track
Exemplo n.º 6
0
    def __init__(self, img, new_pts, detector, descriptor, templateImg, skp,
                 sd, tkp, td):
        """
        **SUMMARY**

        Initializes all the required parameters and attributes of the class.

        **PARAMETERS**

        * *img* - SimpleCV.Image
        * *new_pts* - List of all the tracking points found in the image. - list of cv2.KeyPoint
        * *detector* - SURF detector - cv2.FeatureDetector
        * *descriptor* - SURF descriptor - cv2.DescriptorExtractor
        * *templateImg* - Template Image (First image) - SimpleCV.Image
        * *skp* - image keypoints - list of cv2.KeyPoint
        * *sd* - image descriptor - numpy.ndarray
        * *tkp* - Template Imaeg keypoints - list of cv2.KeyPoint
        * *td* - Template image descriptor - numpy.ndarray

        **RETURNS**

        SimpleCV.Tracking.TrackClass.SURFTrack object

        **EXAMPLE**
        >>> track = SURFTracker(image, pts, detector, descriptor, temp, skp, sd, tkp, td)
        """
        if td is None:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            return
        if len(new_pts) < 1:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            self.pts = None
            self.templateImg = templateImg
            self.skp = skp
            self.sd = sd
            self.tkp = tkp
            self.td = td
            self.detector = detector
            self.descriptor = descriptor
            return
        if sd is None:
            bb = (1, 1, 1, 1)
            self = Track.__init__(self, img, bb)
            self.pts = None
            self.templateImg = templateImg
            self.skp = skp
            self.sd = sd
            self.tkp = tkp
            self.td = td
            self.detector = detector
            self.descriptor = descriptor
            return

        np_pts = np.asarray([kp.pt for kp in new_pts])
        t, pts, center = cv2.kmeans(
            np.asarray(np_pts, dtype=np.float32),
            K=1,
            bestLabels=None,
            criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1,
                      10),
            attempts=1,
            flags=cv2.KMEANS_RANDOM_CENTERS)
        max_x = int(max(np_pts[:, 0]))
        min_x = int(min(np_pts[:, 0]))
        max_y = int(max(np_pts[:, 1]))
        min_y = int(min(np_pts[:, 1]))

        bb = (min_x - 5, min_y - 5, max_x - min_x + 5, max_y - min_y + 5)

        self = Track.__init__(self, img, bb)
        self.templateImg = templateImg
        self.skp = skp
        self.sd = sd
        self.tkp = tkp
        self.td = td
        self.pts = np_pts
        self.detector = detector
        self.descriptor = descriptor