예제 #1
0
def lkTracker(img, bb, ts, oldimg, **kwargs):
    """
    **DESCRIPTION**

    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using Lucas Kanade based Optical Flow method.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.
    * *oldimg* - Image - Previous Image.

    Optional PARAMETERS:
    (docs from http://docs.opencv.org/)

    maxCorners    - Maximum number of corners to return in goodFeaturesToTrack. 
                    If there are more corners than are found, the strongest of 
                    them is returned.

    qualityLevel  - Parameter characterizing the minimal accepted quality of image corners. 
                    The parameter value is multiplied by the best corner quality measure, 
                    which is the minimal eigenvalue or the Harris function response. 
                    The corners with the quality measure less than the product are rejected.
                    For example, if the best corner has the quality measure = 1500, 
                    and the qualityLevel=0.01 , then all the corners with the quality measure 
                    less than 15 are rejected. 

    minDistance   - Minimum possible Euclidean distance between the returned corners.

    blockSize     - Size of an average block for computing a derivative covariation matrix over each pixel neighborhood.

    winSize       - size of the search window at each pyramid level.

    maxLevel      - 0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), 
                    if set to 1, two levels are used, and so on

    **RETURNS**

    SimpleCV.Features.Tracking.LKTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = lkTracker(img, bb, ts, img, maxCorners=4000, qualityLevel=0.5, winSize=(12,12))
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... prevImg = img
        ... ts = lkTracker(img, bb, ts, prevImg, maxCorners=4000, qualityLevel=0.5, winSize=(12,12))
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    LK (Lucas Kanade) Tracker:
    It is based on LK Optical Flow. It calculates Optical flow in frame1 to frame2 
    and also in frame2 to frame1 and using back track error, filters out false
    positives.
    """
    maxCorners = 4000
    qualityLevel = 0.08
    minDistance = 2
    blockSize = 3
    winSize = (10, 10)
    maxLevel = 10
    for key in kwargs:
        if key == 'maxCorners':
            maxCorners = kwargs[key]
        elif key == 'quality':
            qualityLevel = kwargs[key]
        elif key == 'minDistance':
            minDistance = kwargs[key]
        elif key == 'blockSize':
            blockSize = kwargs[key]
        elif key == 'winSize':
            winSize = kwargs[key]
        elif key == maxLevel:
            maxLevel = kwargs[key]

    bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
    img1 = img.crop(bb[0], bb[1], bb[2], bb[3])
    g = img1.getGrayNumpyCv2()
    pt = cv2.goodFeaturesToTrack(g,
                                 maxCorners=maxCorners,
                                 qualityLevel=qualityLevel,
                                 minDistance=minDistance,
                                 blockSize=blockSize)
    if type(pt) == type(None):
        print("no points")
        track = LK(img, bb, pt)
        return track

    for i in xrange(len(pt)):
        pt[i][0][0] = pt[i][0][0] + bb[0]
        pt[i][0][1] = pt[i][0][1] + bb[1]

    p0 = np.float32(pt).reshape(-1, 1, 2)
    oldg = oldimg.getGrayNumpyCv2()
    newg = img.getGrayNumpyCv2()
    p1, st, err = cv2.calcOpticalFlowPyrLK(
        oldg,
        newg,
        p0,
        None,
        winSize=winSize,
        maxLevel=maxLevel,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    p0r, st, err = cv2.calcOpticalFlowPyrLK(
        newg,
        oldg,
        p1,
        None,
        winSize=winSize,
        maxLevel=maxLevel,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
    good = d < 1
    new_pts = []
    for pts, val in itertools.izip(p1, good):
        if val:
            new_pts.append([pts[0][0], pts[0][1]])
    if ts[-1:]:
        old_pts = ts[-1].pts
        if type(old_pts) == type(None):
            old_pts = new_pts
    else:
        old_pts = new_pts
    dx = []
    dy = []
    for p1, p2 in itertools.izip(old_pts, new_pts):
        dx.append(p2[0] - p1[0])
        dy.append(p2[1] - p1[1])

    if not dx or not dy:
        track = LK(img, bb, new_pts)
        return track

    cen_dx = round(sum(dx) / len(dx)) / 3
    cen_dy = round(sum(dy) / len(dy)) / 3

    bb1 = [bb[0] + cen_dx, bb[1] + cen_dy, bb[2], bb[3]]
    if bb1[0] <= 0:
        bb1[0] = 1
    if bb1[0] + bb1[2] >= img.width:
        bb1[0] = img.width - bb1[2] - 1
    if bb1[1] + bb1[3] >= img.height:
        bb1[1] = img.height - bb1[3] - 1
    if bb1[1] <= 0:
        bb1[1] = 1

    track = LKTrack(img, bb1, new_pts)
    return track
예제 #2
0
def lkTracker(img, bb, ts, oldimg, **kwargs):
    """
    **DESCRIPTION**
    
    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using Lucas Kanade based Optical Flow method.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.
    * *oldimg* - Image - Previous Image.

    Optional PARAMETERS:
    (docs from http://docs.opencv.org/)

    maxCorners    - Maximum number of corners to return in goodFeaturesToTrack. 
                    If there are more corners than are found, the strongest of 
                    them is returned.
                
    qualityLevel  - Parameter characterizing the minimal accepted quality of image corners. 
                    The parameter value is multiplied by the best corner quality measure, 
                    which is the minimal eigenvalue or the Harris function response. 
                    The corners with the quality measure less than the product are rejected.
                    For example, if the best corner has the quality measure = 1500, 
                    and the qualityLevel=0.01 , then all the corners with the quality measure 
                    less than 15 are rejected. 
                  
    minDistance   - Minimum possible Euclidean distance between the returned corners.

    blockSize     - Size of an average block for computing a derivative covariation matrix over each pixel neighborhood.

    winSize       - size of the search window at each pyramid level.

    maxLevel      - 0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), 
                    if set to 1, two levels are used, and so on

    **RETURNS**

    SimpleCV.Features.Tracking.LKTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = lkTracker(img, bb, ts, img, maxCorners=4000, qualityLevel=0.5, winSize=(12,12))
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... prevImg = img
        ... ts = lkTracker(img, bb, ts, prevImg, maxCorners=4000, qualityLevel=0.5, winSize=(12,12))
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    LK (Lucas Kanade) Tracker:
    It is based on LK Optical Flow. It calculates Optical flow in frame1 to frame2 
    and also in frame2 to frame1 and using back track error, filters out false
    positives.
    """
    maxCorners = 4000
    qualityLevel = 0.08
    minDistance = 2
    blockSize = 3
    winSize = (10, 10)
    maxLevel = 10
    for key in kwargs:
        if key == 'maxCorners':
            maxCorners = kwargs[key]
        elif key == 'quality':
            qualityLevel = kwargs[key]
        elif key == 'minDistance':
            minDistance = kwargs[key]
        elif key == 'blockSize':
            blockSize = kwargs[key]
        elif key == 'winSize':
            winSize = kwargs[key]
        elif key == maxLevel:
            maxLevel = kwargs[key]

    bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
    img1 = img.crop(bb[0],bb[1],bb[2],bb[3])
    g = img1.getGrayNumpyCv2()
    pt = cv2.goodFeaturesToTrack(g, maxCorners = maxCorners, qualityLevel = qualityLevel,
                                minDistance = minDistance, blockSize = blockSize)
    if type(pt) == type(None):
        print "no points"
        track = LK(img, bb, pt)
        return track

    for i in xrange(len(pt)):
        pt[i][0][0] = pt[i][0][0]+bb[0]
        pt[i][0][1] = pt[i][0][1]+bb[1]

    p0 = np.float32(pt).reshape(-1, 1, 2)
    oldg = oldimg.getGrayNumpyCv2()
    newg = img.getGrayNumpyCv2()
    p1, st, err = cv2.calcOpticalFlowPyrLK(oldg, newg, p0, None, winSize  = winSize,
                                           maxLevel = maxLevel,
                                           criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    p0r, st, err = cv2.calcOpticalFlowPyrLK(newg, oldg, p1, None, winSize  = winSize,
                                            maxLevel = maxLevel,
                                            criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    d = abs(p0-p0r).reshape(-1, 2).max(-1)
    good = d < 1
    new_pts=[]
    for pts, val in itertools.izip(p1, good):
        if val:
            new_pts.append([pts[0][0], pts[0][1]])
    if ts[-1:]:
        old_pts = ts[-1].pts
        if type(old_pts) == type(None):
            old_pts = new_pts
    else:
        old_pts = new_pts
    dx=[]
    dy=[]
    for p1, p2 in itertools.izip(old_pts, new_pts):
        dx.append(p2[0]-p1[0])
        dy.append(p2[1]-p1[1])

    if not dx or not dy:
        track = LK(img, bb, new_pts)
        return track

    cen_dx = round(sum(dx)/len(dx))/3
    cen_dy = round(sum(dy)/len(dy))/3

    bb1 = [bb[0]+cen_dx, bb[1]+cen_dy, bb[2], bb[3]]
    if bb1[0] <= 0:
        bb1[0] = 1
    if bb1[0]+bb1[2] >= img.width:
        bb1[0] = img.width - bb1[2] - 1
    if bb1[1]+bb1[3] >= img.height:
        bb1[1] = img.height - bb1[3] - 1
    if bb1[1] <= 0:
        bb1[1] = 1

    track = LKTrack(img, bb1, new_pts)    
    return track
예제 #3
0
def surfTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**
    
    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using SURF keypoints.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    eps_val     - eps for DBSCAN
                  The maximum distance between two samples for them 
                  to be considered as in the same neighborhood. 
                
    min_samples - min number of samples in DBSCAN
                  The number of samples in a neighborhood for a point 
                  to be considered as a core point. 
                  
    distance    - thresholding KNN distance of each feature
                  if KNN distance > distance, point is discarded.

    **RETURNS**

    SimpleCV.Features.Tracking.SURFTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    SURF based Tracker:
    Matches keypoints from the template image and the current frame.
    flann based matcher is used to match the keypoints.
    Density based clustering is used classify points as in-region (of bounding box)
    and out-region points. Using in-region points, new bounding box is predicted using
    k-means.
    """
    eps_val = 0.69
    min_samples = 5
    distance = 100

    for key in kwargs:
        if key == 'eps_val':
            eps_val = kwargs[key]
        elif key == 'min_samples':
            min_samples = kwargs[key]
        elif key == 'dist':
            distance = kwargs[key]

    from scipy.spatial import distance as Dis
    from sklearn.cluster import DBSCAN

    if len(ts) == 0:
        # Get template keypoints
        bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
        templateImg = img
        detector = cv2.FeatureDetector_create("SURF")
        descriptor = cv2.DescriptorExtractor_create("SURF")

        templateImg_cv2 = templateImg.getNumpyCv2()[bb[1]:bb[1]+bb[3], bb[0]:bb[0]+bb[2]]
        tkp = detector.detect(templateImg_cv2)
        tkp, td = descriptor.compute(templateImg_cv2, tkp)

    else:
        templateImg = ts[-1].templateImg
        tkp = ts[-1].tkp
        td = ts[-1].td
        detector = ts[-1].detector
        descriptor = ts[-1].descriptor

    newimg = img.getNumpyCv2()

    # Get image keypoints
    skp = detector.detect(newimg)
    skp, sd = descriptor.compute(newimg, skp)

    if td is None:
        print "Descriptors are Empty"
        return None

    if sd is None:
        track = SURFTracker(img, skp, detector, descriptor, templateImg, skp, sd, tkp, td)
        return track

    # flann based matcher
    flann_params = dict(algorithm=1, trees=4)
    flann = cv2.flann_Index(sd, flann_params)
    idx, dist = flann.knnSearch(td, 1, params={})
    del flann

    # filter points using distnace criteria
    dist = (dist[:,0]/2500.0).reshape(-1,).tolist()
    idx = idx.reshape(-1).tolist()
    indices = sorted(range(len(dist)), key=lambda i: dist[i])

    dist = [dist[i] for i in indices]
    idx = [idx[i] for i in indices]
    skp_final = []
    skp_final_labelled=[]
    data_cluster=[]
    
    for i, dis in itertools.izip(idx, dist):
        if dis < distance:
            skp_final.append(skp[i])
            data_cluster.append((skp[i].pt[0], skp[i].pt[1]))

    #Use Denstiy based clustering to further fitler out keypoints
    n_data = np.asarray(data_cluster)
    D = Dis.squareform(Dis.pdist(n_data))
    S = 1 - (D/np.max(D))
    
    db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S)
    core_samples = db.core_sample_indices_
    labels = db.labels_
    for label, i in zip(labels, range(len(labels))):
        if label==0:
            skp_final_labelled.append(skp_final[i])

    track = SURFTrack(img, skp_final_labelled, detector, descriptor, templateImg, skp, sd, tkp, td)

    return track
예제 #4
0
def surfTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**

    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using SURF keypoints.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    eps_val     - eps for DBSCAN
                  The maximum distance between two samples for them 
                  to be considered as in the same neighborhood. 

    min_samples - min number of samples in DBSCAN
                  The number of samples in a neighborhood for a point 
                  to be considered as a core point. 

    distance    - thresholding KNN distance of each feature
                  if KNN distance > distance, point is discarded.

    **RETURNS**

    SimpleCV.Features.Tracking.SURFTracker

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    SURF based Tracker:
    Matches keypoints from the template image and the current frame.
    flann based matcher is used to match the keypoints.
    Density based clustering is used classify points as in-region (of bounding box)
    and out-region points. Using in-region points, new bounding box is predicted using
    k-means.
    """
    eps_val = 0.69
    min_samples = 5
    distance = 100

    for key in kwargs:
        if key == 'eps_val':
            eps_val = kwargs[key]
        elif key == 'min_samples':
            min_samples = kwargs[key]
        elif key == 'dist':
            distance = kwargs[key]

    from scipy.spatial import distance as Dis
    from sklearn.cluster import DBSCAN

    if len(ts) == 0:
        # Get template keypoints
        bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
        templateImg = img
        detector = cv2.FeatureDetector_create("SURF")
        descriptor = cv2.DescriptorExtractor_create("SURF")

        templateImg_cv2 = templateImg.getNumpyCv2()[bb[1]:bb[1] + bb[3],
                                                    bb[0]:bb[0] + bb[2]]
        tkp = detector.detect(templateImg_cv2)
        tkp, td = descriptor.compute(templateImg_cv2, tkp)

    else:
        templateImg = ts[-1].templateImg
        tkp = ts[-1].tkp
        td = ts[-1].td
        detector = ts[-1].detector
        descriptor = ts[-1].descriptor

    newimg = img.getNumpyCv2()

    # Get image keypoints
    skp = detector.detect(newimg)
    skp, sd = descriptor.compute(newimg, skp)

    if td is None:
        print("Descriptors are Empty")
        return None

    if sd is None:
        track = SURFTracker(img, skp, detector, descriptor, templateImg, skp,
                            sd, tkp, td)
        return track

    # flann based matcher
    flann_params = dict(algorithm=1, trees=4)
    flann = cv2.flann_Index(sd, flann_params)
    idx, dist = flann.knnSearch(td, 1, params={})
    del flann

    # filter points using distnace criteria
    dist = (dist[:, 0] / 2500.0).reshape(-1, ).tolist()
    idx = idx.reshape(-1).tolist()
    indices = sorted(range(len(dist)), key=lambda i: dist[i])

    dist = [dist[i] for i in indices]
    idx = [idx[i] for i in indices]
    skp_final = []
    skp_final_labelled = []
    data_cluster = []

    for i, dis in itertools.izip(idx, dist):
        if dis < distance:
            skp_final.append(skp[i])
            data_cluster.append((skp[i].pt[0], skp[i].pt[1]))

    #Use Denstiy based clustering to further fitler out keypoints
    n_data = np.asarray(data_cluster)
    D = Dis.squareform(Dis.pdist(n_data))
    S = 1 - (D / np.max(D))

    db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S)
    core_samples = db.core_sample_indices_
    labels = db.labels_
    for label, i in zip(labels, range(len(labels))):
        if label == 0:
            skp_final_labelled.append(skp_final[i])

    track = SURFTrack(img, skp_final_labelled, detector, descriptor,
                      templateImg, skp, sd, tkp, td)

    return track