Example #1
0
def score_OF(roi, frame1, frame0, lk_params, feature_params):
    img0 = cv2.cvtColor(frame0[roi.ymin:roi.ymax, roi.xmin:roi.xmax], cv2.COLOR_BGR2GRAY)
    img1 = cv2.cvtColor(frame1[roi.ymin:roi.ymax, roi.xmin:roi.xmax], cv2.COLOR_BGR2GRAY)

    p0 = cv2.goodFeaturesToTrack(img0, mask = None, **feature_params)    
    p1 = cv2.goodFeaturesToTrack(img1, mask = None, **feature_params)    
    if p0 is None or  p1 is None:
        return -1.0, 0.0, 0.0, 0.0, 0.0
    
    p1_0, st1_0, err1_0 = cv2.calcOpticalFlowPyrLK(img0, img1, p0, p1, **lk_params)
    p0_1, st0_1, err0_1 = cv2.calcOpticalFlowPyrLK(img1, img0, p1, p0, **lk_params)
    
    nb_c_p0=0.0
    nb_p0=0.0
    nb_c_p1=0.0
    nb_p1=0.0
    move_x = 0.0
    move_y = 0.0

    for pts0, pts1, s in itertools.izip(p0, p1_0, st1_0):
        nb_p0+=1
        if s[0] == 1:
            nb_c_p0+=1
            move_x+=(pts1[0][0]-pts0[0][0])
            move_y+=(pts1[0][1]-pts0[0][1])
    if nb_c_p0>0:
        move_x=int(round(move_x/nb_c_p0,0))
        move_y=int(round(move_y/nb_c_p0,0))
    
    for pts0, pts1, s in itertools.izip(p1, p0_1, st0_1):
        nb_p1+=1
        if s[0] == 1:
            nb_c_p1+=1

    return (nb_c_p0/nb_p0+nb_c_p1/nb_p1)/2, nb_p0, nb_p1, move_x, move_y
Example #2
0
def checked_flow(gray_img0, gray_img1, p0, max_err=1.0, win_size=15, max_level=3):
    lk_params = dict(
        winSize=(win_size, win_size),
        maxLevel=max_level,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.1),
    )
    h, w = gray_img0.shape[:2]
    p0 = p0 * w
    p_idx = np.arange(len(p0))

    img_size = np.int32([(0, 0), (w, h)])
    good = aabb.contains_point(img_size, p0)
    p0, p_idx = p0[good], p_idx[good]

    x, y = np.int32(p0.T)
    mask0, mask1 = get_img_mask(gray_img0), get_img_mask(gray_img1)
    good = mask0[y, x] & mask1[y, x]
    p0, p_idx = p0[good], p_idx[good]

    p1, _, _ = cv2.calcOpticalFlowPyrLK(gray_img0, gray_img1, p0, None, **lk_params)
    p0r, _, _ = cv2.calcOpticalFlowPyrLK(gray_img1, gray_img0, p1, None, **lk_params)
    err = common.norm(p0 - p0r).ravel()
    good = err < max_err
    p1, p_idx, err = p1.reshape(-1, 2)[good], p_idx[good], err[good]
    return p1, p_idx, err
Example #3
0
 def UpdateTracks(self, tracks):
     """
     Updates all the point lists using new and old image data.
     :param tracks: List of lists of points, in increasing order of recentness
     """
     new_tracks = []
     if len(tracks) > 0:
         #convert old pointlist to matrix, taking the last element in each point tracked
         p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
         #new pointlist from old points
         p1, st, err = cv2.calcOpticalFlowPyrLK(self.img0, self.img1, p0, None, **prm.lk_params)
         #old pointlist from new points
         p0r, st, err = cv2.calcOpticalFlowPyrLK(self.img1, self.img0, p1, None, **prm.lk_params)
         #
         d = abs(p0-p0r).reshape(-1, 2).max(-1)
         good = d < 1
         for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
             if not good_flag:
                 continue
             tr.append((x, y))
             #eliminate track length size. I don't care about history, so make track len small
             if len(tr) > prm.TRACK_LEN:
                 del tr[0]
             new_tracks.append(tr)
             if prm.DEBUG:
                 cv2.circle(self.vis, (x, y), 2, (0, 255, 0), -1)
         if prm.DEBUG:
             cv2.polylines(self.vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
             draw_str(self.vis, (20, 20), 'track count: %d' % len(tracks))
     return new_tracks
def optical_flow(fgrayprev, fgray, tracking_features):
    winsize = 10
    if tracking_features != None:
        forwardflow, status, track_error \
            = cv2.calcOpticalFlowPyrLK( fgrayprev
                                        , fgray
                                        , tracking_features
                                        , None
                                        , winSize=(winsize,winsize)
                                        , maxLevel=5
                                        , criteria = (cv2.TERM_CRITERIA_EPS \
                                                          | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        backflow, backstatus, backtrack_error \
            = cv2.calcOpticalFlowPyrLK( fgray
                                        , fgrayprev
                                        , forwardflow
                                        , None
                                        , winSize=(winsize,winsize)
                                        , maxLevel=5
                                        , criteria = (cv2.TERM_CRITERIA_EPS \
                                                          | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        d = abs(tracking_features-backflow).reshape(-1, 2).max(-1)
        good = d < 1
        forwardflow = forwardflow.reshape((-1, 2))
        finalflow = []
        finalfeat = []
        for feat,flow,qualitygood in zip(tracking_features, forwardflow, good):
            if qualitygood:
                finalflow.append(flow)
                finalfeat.append(feat)
        return finalflow,finalfeat
    else:
        return None,None
Example #5
0
    def lk_flow(self, p0, begin_frame, end_frame):
        """Run LK flow from begin_frame to end_frame along points in p0; also,
        selects the points that are considered to be more trustworthy
        for this flow. Returns such points both in the beginning end
        ending reference.

        """
        # FIXME: take this from settings
        lk_params = {
            "winSize": (15, 15),
            "maxLevel": 2,
            "criteria": (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
        }

        # Flow assest points forward; then flow them backward, so we
        # can check whether they match
        p1, st, err = cv2.calcOpticalFlowPyrLK(begin_frame, end_frame, p0, None, **lk_params)
        p0r, st, err = cv2.calcOpticalFlowPyrLK(end_frame, begin_frame, p1, None, **lk_params)

        # Retain points only if backflow was able to bring them in the
        # original place (except a certain error, which, BTW, is a
        # FIXME: magic constants)
        p0_good, p1_good = [], []
        for k in range(len(p0)):
            if numpy.linalg.norm(p0[k] - p0r[k]) < 1.0:
                p0_good.append(p0[k])
                p1_good.append(p1[k])

        return p0_good, p1_good
Example #6
0
    def test_umat_optical_flow(self):
        img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE)
        img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE)
        # Note, that if you want to see performance boost by OCL implementation - you need enough data
        # For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
        # img = np.hstack([np.vstack([img] * 6)] * 6)

        feature_params = dict(maxCorners=239,
                              qualityLevel=0.3,
                              minDistance=7,
                              blockSize=7)

        p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params)
        p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params)
        self.assertEqual(p0_umat.get().shape, p0.shape)

        p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
        p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
        self.assertTrue(np.allclose(p0_umat.get(), p0))

        _p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None)

        _p1_mask_err_umat0 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None)))
        _p1_mask_err_umat1 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None)))
        _p1_mask_err_umat2 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None)))

        for _p1_mask_err_umat in [_p1_mask_err_umat0, _p1_mask_err_umat1, _p1_mask_err_umat2]:
            for data, data_umat in zip(_p1_mask_err, _p1_mask_err_umat):
                self.assertEqual(data.shape, data_umat.shape)
                self.assertEqual(data.dtype, data_umat.dtype)
        for _p1_mask_err_umat in [_p1_mask_err_umat1, _p1_mask_err_umat2]:
            for data_umat0, data_umat in zip(_p1_mask_err_umat0[:2], _p1_mask_err_umat[:2]):
                self.assertTrue(np.allclose(data_umat0, data_umat))
    def track(self, new_frame, old_position):
        self.bbPoints(self.bounding_box)
        # if self.points is None or self.timer_for_calculate_points <= 0:
        #     self.calculate_points(old_position)
        if self.points is not None:
            new_points, self.status, self.err = cv2.calcOpticalFlowPyrLK(old_position.buffer[0], new_frame, self.points,
                                                                         None, **self.lk_params)
            pointsFB, statusFB, self.FB_error = cv2.calcOpticalFlowPyrLK(new_frame, old_position.buffer[0], new_points,
                                                                         None,
                                                                         **self.lk_params)
            i = 0
            while i < len(self.points):
                self.FB_error[i] = norm(substractPoint(pointsFB[i][0], self.points[i][0]))
                i += 1

            self.normCrossCorrelation(old_position.buffer[0], new_frame, self.points, new_points)
            self.points, new_points, tracked = self.filterPoints(self.points, new_points)

            if not tracked:
                return None

            # новые бокс и точки
            newbox = getNewBB(self.points, new_points, self.bounding_box)
            # if newbox[0] < 0 or newbox[1] < 0 or newbox[0] + newbox[2] > new_frame.shape[0] or newbox[1] + newbox[
            #     3] > new_frame.shape[1]:
            #     return None
            self.points = new_points
            self.bounding_box = newbox
            self.timer_for_calculate_points -= 1
            return self.bounding_box
        else:
            return None
def OpticalFlow(img0, img1, lk_params, feature_params):
    # convert image to gray
    img0_tmp = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
    img1_tmp = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

    p0 = cv2.goodFeaturesToTrack(img0_tmp, mask = None, **feature_params)    
    p1 = cv2.goodFeaturesToTrack(img1_tmp, mask = None, **feature_params)    
    if p0 is None or  p1 is None:
        return -1.0, [], []
    
    st1_0, st0_1 = [], []
    if p0 is not None:
        p0_1, st0_1, err0_1 = cv2.calcOpticalFlowPyrLK(img0_tmp, img1_tmp, p0, None, **lk_params)
        st0_1_tmp = list(np.array(st0_1).T[0])
    if p1 is not None:
        p1_0, st1_0, err1_0 = cv2.calcOpticalFlowPyrLK(img1_tmp, img0_tmp, p1, None, **lk_params)
        st1_0_tmp = list(np.array(st1_0).T[0])

    l_move_x = []
    l_move_y = []
    for pts0, pts1, s in itertools.izip(p0, p1_0, st1_0):
        if s[0] == 1:
            l_move_x.append(pts1[0][0]-pts0[0][0])
            l_move_y.append(pts1[0][1]-pts0[0][1])

    return np.mean(st0_1_tmp + st1_0_tmp), l_move_x, l_move_y
def UpdateTracks(tracks, img0, img1, track_len):
    """
    Updates all the point lists using new and old image data.
    :param tracks: List of lists of points, in increasing order of recentness
    :param img0: old image
    :param img1: new image
    :return: Updated list of points
    """
    if len(tracks) > 0:
        #convert old pointlist to matrix, taking the last element in each point tracked
        p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
        #new pointlist from old points
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
        #old pointlist from new points
        p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
        #
        d = abs(p0-p0r).reshape(-1, 2).max(-1)
        good = d < 1
        new_tracks = []
        for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
            if not good_flag:
                continue
            tr.append((x, y))
            #eliminate track length size. I don't care about history, so make track len small
            if len(tr) > track_len:
                del tr[0]
            new_tracks.append(tr)
        return new_tracks
Example #10
0
    def __motion_estimation_shi_tomasi(self, ref_frame, new_frame, min_matches=20):
        # detect corners
        grey_frame = cv.cvtColor(ref_frame, cv.COLOR_BGR2GRAY)
        corners = cv.goodFeaturesToTrack(grey_frame, self.n_max_corners, .01, 50)    # Better with Fast ?
        corners_next, status, _ = cv.calcOpticalFlowPyrLK(ref_frame, new_frame, corners)    # Track points

        corners_next_back, status_back, _ = cv.calcOpticalFlowPyrLK(new_frame, ref_frame, corners_next)     # Track back

        # - sort out to keep reliable points :
        corners, corners_next = self.__sort_corners(corners, corners_next, status, corners_next_back, status_back, 1.0)

        if len(corners) < 5:
            return None, False

        # Compute the transformation from the tracked pattern
        # -- estimate the rigid transform
        transform, mask = cv.findHomography(corners, corners_next, cv.RANSAC, 5.0)

        # -- see if this transform explains most of the displacements (thresholded..)
        if len(mask[mask > 0]) > min_matches:
            print "Enough match for motion compensation"
            return transform, True

        else:
            print "Not finding enough matchs - {}".format(len(mask[mask > 0]))
            return None, False
Example #11
0
    def __compensate_shi_tomasi(self, new_frame):
        """
        Measure and compensate for inter-frame motion:
        - get points on both frames
        -- we use Shi & Tomasi here, to be adapted ?
        @rtype : opencv frame
        """
        self.corners = cv2.goodFeaturesToTrack(self.frame_prev, self.n_max_corners, .01, 50)

        # - track points
        [self.corners_next, status, err] = cv2.calcOpticalFlowPyrLK(self.frame_prev, new_frame, self.corners)

        # - track back (more reliable)
        [corners_next_back, status_back, err_back] = cv2.calcOpticalFlowPyrLK(new_frame,
                                                                              self.frame_prev, self.corners_next)

        # - sort out to keep reliable points :
        [self.corners, self.corners_next] = self.__sort_corners(self.corners,
                                                                self.corners_next, status,
                                                                corners_next_back, status_back)

        # - compute the transformation from the tracked pattern
        # -- estimate the rigid transform
        transform, mask = cv2.findHomography(self.corners_next, self.corners, cv2.RANSAC, 5.0)

        # -- see if this transform explains most of the displacements (thresholded..)
        if len(mask[mask > 0]) > 20: # TODO: More robust test here
            print "Enough match for motion compensation"
            acc_frame_aligned = cv2.warpPerspective(self.frame_acc, transform, self.frame_acc.shape[2::-1])
            self.frame_acc = acc_frame_aligned
            return True

        else:
            print "Not finding enough matchs - {}".format(len(mask[mask > 0]))
            return False
Example #12
0
    def opticalFlow(self,img1,img2,img3):

        #set variables
        lk_params = dict(winSize = (10,10),
                        maxLevel = 5,
                        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,0.03))

        features_param = dict( maxCorners = 3000,
                                qualityLevel = 0.5,
                                minDistance = 3,
                                blockSize = 3)

        # feature extraction of points to track 
        pt = cv2.goodFeaturesToTrack(img1,**features_param)
        p0 =np.float32(pt).reshape(-1,1,2)

        # calaculate average movement
        dist = list()
        for loop in p0: 
            p1,st,err =cv2.calcOpticalFlowPyrLK(img1, img2,loop,
                                                None,**lk_params)
      
            p0r,st,err =cv2.calcOpticalFlowPyrLK(img2,img1,p1,
                                            None,**lk_params)

            if abs(loop-p0r).reshape(-1, 2).max(-1) < 1:
                dst = distance.euclidean(loop,p0r)
                dist.append(dst)
        
        return round(max(dist)*10,2)
Example #13
0
    def track_keypoints(self, prev_image, cur_image, tracks):
        #print "track_keypoints"	
        ts = time()
        img0, img1 = prev_image._data, cur_image._data
        p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
        p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
        d = abs(p0-p0r).reshape(-1, 2).max(-1)
        good = d < 1
        #good = st
        #print "tk: ", str(time() - ts)
        new_tracks = []
        for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
            if not good_flag:
	       continue
            tr.append((x, y))
            if len(tr) > self.track_len:
                del tr[0]
            new_tracks.append(tr)
            #cv2.circle(self.vis, (x, y), 2, (0, 255, 0), -1)
        #print "initial tp: ", len(self.tracks), " current tp: ", len(new_tracks)
        #print len(new_tracks), len(tracks)
        tracks[:] = new_tracks[:]
        if len(tracks) == 0:
            logging.warn("lost ALL tp!")
            self.bot.stop()
Example #14
0
def create_point_tracks(name, bounding_box, total, start_frame = 0, max_track_length = 20, draw = False):
  if start_frame > total - 1:
    return []
    
  # Initialize with first frame and second frame.
  image = cv2.imread(name + (("/image%.05d.jpg") % start_frame))
  
  # Select features within bounding box if given
  sub_image = image[bounding_box[0]:bounding_box[2], bounding_box[1]:bounding_box[3]]
  features = get_features(sub_image)

  if features == None:
    return []

  # Transform features to big image if bounding box
  for i in xrange(len(features)):
    features[i][0][0] = features[i][0][0]+bounding_box[0]
    features[i][0][1] = features[i][0][1]+bounding_box[1]
  
  active_tracks = []
  dead_tracks = []
  for row in xrange(len(features)):
    active_tracks.append(Track(features[row], start_frame))
  
  for frame_nr in xrange(start_frame + 1, min(start_frame + max_track_length, total)):
    if len(features) == 0:
      # Break if no features anymore
      break
  
    #print "Features: %d Length: %d" % (len(features), track_length)
    if draw:
      draw_features(image, features)
    next_image = cv2.imread(name + (("/image%.05d.jpg") % frame_nr))

    ### Feature Selection method: Forward-Backward Tracking (Optical flow)
    # Forward in time
    forward_features, st, err = cv2.calcOpticalFlowPyrLK(image, next_image, features, None, **lk_params)
    # Backward in time
    backward_features, st, err = cv2.calcOpticalFlowPyrLK(next_image, image, forward_features, None, **lk_params)
    # Remove wrong matches
    distance = abs(features - backward_features).reshape(-1, 2).max(-1)
    matches = distance < 1
    matched_features = []
    new_active_tracks = []
    
    # Throw away all bad matches
    for i in range(len(matches)):
      if matches[i]:
        matched_features.append(forward_features[i])
        active_tracks[i].add_point(forward_features[i], frame_nr)
        new_active_tracks.append(active_tracks[i])
      else:
        dead_tracks.append(active_tracks[i])
        
    active_tracks = new_active_tracks
    features = np.array(matched_features)
    image = next_image.copy()

  return active_tracks, dead_tracks
    def run(self):
        while True:
            ret, frame = cap.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            cars = car_cascade.detectMultiScale(frame_gray, scaleFactor=1.1, minSize=(90, 90), maxSize=(800, 800))
            print len(cars)

            for (x,y,w,h) in cars:
                cv2.rectangle(vis,(x,y),(x+w,y+h),(0,0,255),2)

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                #print p0
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                # cars = car_cascade.detectMultiScale(frame_gray, scaleFactor=1.1, minSize=(90, 90), maxSize=(1000, 1000))
                # for (x,y,w,h) in cars:
                #     cv2.rectangle(vis,(x,y),(x+w,y+h),(0,0,255),2)
                #     car_centroid = [(x+(x/2)), (y+(y/2))]
                #     # self.tracks = np.append(self.tracks, car_centroid, axis=0)
                #     # p0 = np.float32(self.tracks).reshape(-1, 1, 2)
                #print "p: \n", car_centroid
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #16
0
    def detectTrackCars(self):
        """Detect object classes in an image using pre-computed object proposals."""

        cap = cv2.VideoCapture("/media/senseable-beast/beast-brain-1/Data/TrafficIntersectionVideos/slavePi2_RW1600_RH1200_TT900_FR15_06_10_2016_18_11_00_604698.h264")

        track_len = 10
        tracks = [] #stores center values of detected cars that pass the threshold
        #rectangleTracks = [] #coordinates of all the cars that are detected (x,y,w,h)
        
        while (cap.isOpened()):
            ret, im = cap.read()
            frame_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            im_copy = im.copy()
            c.detectCars(im, im_copy, frame_gray, net, tracks)

            print "before if statement"
            print "len(tracks): ", len(tracks)
            print "prev_gray: ", prev_gray
            if len(tracks) > 0:
                img0, img1 = prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
                print "p0: ", p0
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
    #               print "p0r", p0r
                d = abs(p0-p0r).reshape(-1, 2).max(-1) #included for robustness - calculates diff btwn calc point and backtracks it
                good = d < 1 #returns a boolean value. This value is the threshold
                print "good or not: ", good
                new_tracks = []
                #Figure out how the points are added to the array (how are they determined. Where are all these points coming from?)
                for tr, (x, y), good_flag, (rectX, rectY, rectW, rectH) in zip(tracks, p1.reshape(-1, 2), good, rectangleTracks):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    print "tr: ", tr
    #                     if len(tr) > track_len: #remove point if it exceeds the track length
    #                         del tr[0]
    #                     new_tracks.append(tr)
    #                     cv2.circle(im_copy, (x, y), 2, (0, 255, 0), -1)
                print "end of if statement"
            elif len(tracks) <= 0:
                print "didn't run if statement"
    #                 #Filter out the points within the boxed areas. Keep them in the array to figure out the trajectory of the cars

    #                 tracks = new_tracks
    #                 cv2.polylines(im_copy, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
    #                 #draw_str(im_copy, (20, 20), 't rack count: %d' % len(tracks))
    #                 #draw_str(im_copy, (40, 40), 'total car count: %d' % (totalNumCars))
    #                 #draw_str(im_copy, (20, 60), 'car count: %d' % (totalNumCarsInFrame))
            print "before prev_gray"
            prev_gray = frame_gray

            # if cv2.waitKey(0) & 0xFF == ord('q'):
            #     break

            print "take prev_gray frame--------------------------------------------------"
Example #17
0
    def runTracker(self):
        foregroundPointsNum = 0

        while True:
            frame = self.render.getNextFrame()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append([(x, y), self.frame_idx])
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                self.tracks = new_tracks

            if self.frame_idx % self.detect_interval == 0:
                goodTracksCount = 0
                for tr in self.tracks:
                    oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1])
                    newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1])
                    if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect):
                        goodTracksCount += 1

                if self.frame_idx == self.detect_interval:
                    foregroundPointsNum = goodTracksCount

                fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1)
                fgRate = float(goodTracksCount) / (len(self.tracks) + 1)

                if self.frame_idx > 0:
                    self.assertGreater(fgIndex, 0.9)
                    self.assertGreater(fgRate, 0.2)

                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([[(x, y), self.frame_idx]])

            self.frame_idx += 1
            self.prev_gray = frame_gray

            if self.frame_idx > 300:
                break
Example #18
0
def stabilize (img_list):
	cv_images = []

	for i in xrange(len(img_list)):
		cv_images.append(pil_to_opencv(img_list[i]))

	#prev and prev_gray should be of type Mat
	prev = cv_images[0]
	cv2.cvtColor(prev, prev_gray, cv2.COLOR_BGR2GRAY)

	for i in xrange(1, len(cv_images)):
		current = cv_images[i]
		cv2.cvtColor(current, current_gray, cv2.COLOR_BGR2GRAY)

		cv2.goodFeaturesToTrack(prev_gray, prev_corner, 200, 0.01, 30)
		cv2.calcOpticalFlowPyrLK(prev_gray, current_gray, prev_corner, current_corner, status, err)

		prev_corner2 = []
		current_corner2 = []

		for j in xrange(len(status)):
			if(status[j]):
				prev_corner2.push_back(prev_corner[j])
				current_corner2.push_back(current_corner[j])

		T = cv2.estimateRigidTransform(prev_corner2, current_corner2, false)

		if T.data == NULL:
			last_T.copyTo(T)

		T.copyTo(last_T)

		dx = T.at(0,2)
		dy = T.at(1,2)
		da = numpy.atan2(T.at(1,0), T.at(0,0))

		transform = []
		transform.push_back(Transform(dx, dy, da))

		current.copyTo(prev)
		current_gray.copyTo(prev_gray)

	# accumulate transformations to get image trajectory
	x = 0
	y = 0
	a = 0

	trajectory_list = []

	for k in xrange(len(transform)):
		x += transform[i].dx
        y += transform[i].dy
        a += transform[i].da
		#trajectory_list.push_back(Trajectory(x,y,a))

	return img_list
Example #19
0
    def run(self):
        self.points_array=[]
        frame_id = -1
        while True:
            frame_id += 1
            present_tracks = []
            ret, frame = self.cam.read()
            if (frame == None):
                print("Video Ended")
                break
            if frame_id%self.drop_rate:
                continue

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    present_tracks.append(tr[-2:])
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                # present_tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)
            self.points_array.append(present_tracks)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #20
0
 def Calc_Comp_Angular_Point(self,back_threshold=2.0):
     #前一帧的角点和当前帧的图像=]作为输入来得到角点在当前帧的位置
     self.p1, st, err = cv2.calcOpticalFlowPyrLK(self.prev_img, self.next_img, self.p0, None, **self.lk_params)
     #当前帧跟踪到的角点及图像和前一帧的图像作为输入来找到前一帧的角点位置
     p0r, st, err = cv2.calcOpticalFlowPyrLK(self.next_img, self.prev_img, self.p1, None, **self.lk_params)
     #得到角点回溯与前一帧实际角点的位置变化关系
     d = abs(self.p0-p0r).reshape(-1, 2).max(-1)
     #判断d内的值是否小于阈值,大于阈值被认为是错误的跟踪点
     self.status = d < back_threshold
     return self.p1, self.status
    def track_points(self):
        """Track the detected features."""

        if self.features != []:
            # use the newly loaded image and create grayscale
            self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)

            # reshape to fit input format
            # tmp = np.float32(self.features).reshape(-1, 1, 2)
            tmp = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)

            # calculate optical flow using forwards-backwards algorithm
            features, status, track_error = cv2.calcOpticalFlowPyrLK(self.prev_gray,
                                                                     self.gray, tmp,
                                                                     None, **lk_params)

            features_r, status1, track_error = cv2.calcOpticalFlowPyrLK(self.gray,
                                                                        self.prev_gray,
                                                                        features, None,
                                                                        **lk_params)

            d = abs(tmp - features_r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []

            for tr, (x, y), good_flag in zip(self.tracks, features.reshape(-1, 2), good):
                if not good_flag:
                    continue
                tr.append((x, y))
                if len(tr) > self.track_len:
                    del tr[0]
                new_tracks.append(tr)

                cv2.circle(self.image, (x, y), 2, (0, 255, 0), -1)

            self.tracks = new_tracks
            cv2.polylines(self.image, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))

        # replenish lost points every self.interval steps
        if self.current_frame % self.interval == 0:
            mask = np.zeros_like(self.gray)
            mask[:] = 255
            for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                cv2.circle(mask, (x, y), 5, 0, -1)
            p = cv2.goodFeaturesToTrack(self.gray, mask=mask, **feature_params)

            # Refine the features using cornerSubPix.
            # Takes time to compute, and makes the video choppy, so only enable if you need it.
            cv2.cornerSubPix(self.gray, p, **subpix_params)

            if p is not None:
                for x, y in np.float32(p).reshape(-1, 2):
                    self.tracks.append([(x, y)])

        self.prev_gray = self.gray
Example #22
0
    def run(self):
        while True:
            ret, frame = self.cam.read()  # get frame

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray  # old and new image
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)  # something strange
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)

                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)  # calc error for every point(?)

                good = d < 1  # flag(s)

                new_tracks = []

                for tr, (x, y), good_flag in zip(
                    self.tracks, p1.reshape(-1, 2), good
                ):  # taking old points, new points and flags
                    if not good_flag:  # if bad error then next
                        continue
                    tr.append((x, y))  # maybe it is path?
                    if len(tr) > self.track_len:  # delete old points
                        del tr[0]
                    new_tracks.append(tr)  # create new list of tracking points
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)  # draw it
                self.tracks = new_tracks

                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))  # draw tracks
                draw_str(vis, (20, 20), "track count: %d" % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 0

                mask[100:200, 100:200] = 255

                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                # print p, '!!!!!!!!!!!'
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow("lk_track", vis)

            ch = cv2.waitKey(1)
            if ch == 27:
                break
Example #23
0
    def updateError(self, frame):
        self.frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      
        if len(self.tracks) > 0:
            img0, img1 = self.prev_gray, self.frame_gray
            p0 = np.float32([tr[-1][:2] for tr in self.tracks]).reshape(-1, 1, 2)
            p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)
            d = abs(p0-p0r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []

            self.xerror = 0.0
            self.yerror = 0.0
            self.n = 0.0

            current_time = time.time()
            for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                tr.append((x, y, current_time))
                if len(tr) >  500:
                    del tr[0]
                new_tracks.append(tr)

                if(len(tr)>=2):
                    t = np.float32([v[2] for v in tr])
                    x = np.float32([v[0] for v in tr])
                    y = np.float32([v[1] for v in tr])


                    self.xerror = self.xerror + (x[-1] - x[0])
                    self.yerror = self.yerror + (y[-1] - y[0])
                    self.n = self.n + 1.0

            if self.n>0:
                self.xerror = self.xerror / float(self.n)
                self.yerror = self.yerror / float(self.n)

            self.tracks = new_tracks

              
          

        if self.xerror==0 and self.yerror==0:
              current_time = time.time()
              mask = np.zeros_like(self.frame_gray)
              mask[:] = 255
              p = cv2.goodFeaturesToTrack(self.frame_gray, mask = mask, **self.feature_params)
              if p is not None:
                  for x, y in np.float32(p).reshape(-1, 2):
                      self.tracks.append([(x, y, current_time)])


        self.prev_gray = self.frame_gray
    def track_feature_point(self, grey, prev_grey):
        # We are tracking points between the previous frame and the current frame
        img0, img1 = prev_grey, grey

        # Reshape the current feature point into a Numpy array required by calcOpticalFlowPyrLK()
        p0 = self.feature_point[:2].reshape(-1, 1 ,2)

        # Calculate the optical flow from the previous frame to the current frame
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)

        # Do the reverse calculation: from the current frame to the previous frame
        try:
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)

            # Compute the distance between corresponding points in the two flows
            d = abs(p0-p0r).reshape(-1, 2).max(-1)

            # If the distance between pairs of points is < 1 pixel, set
            # a value in the "good" array to True, otherwise False
            good = d < 1

            # Initialize a list to hold new feature_points


            # Cycle through all current and new feature_points and only keep
            # those that satisfy the "good" condition above
            for (x, y), good_flag in zip(p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                new_feature_point = np.array((x, y, self.feature_point[2]))

            # Draw the center of the circle
            cv2.circle(self.marker_image, (new_feature_point[0], new_feature_point[1]), self.feature_size, (0, 0, 255), cv.CV_FILLED, 8, 0)
            
            # Draw the outer circle
            cv2.circle(self.marker_image, (new_feature_point[0], new_feature_point[1]), new_feature_point[2], (0, 255, 0), self.feature_size, 8, 0)
            
            # Draw error line
            cv2.line(self.marker_image, (self.frame_width/2, self.frame_height/2), (new_feature_point[0], new_feature_point[1]), (0, 0, 255), 10, 8, 0)
	            
            # Display error distance on screen
            strInfo =  str('Error: ' + str(self.frame_width/2-new_feature_point[0]) + " ," + str(self.frame_height/2-new_feature_point[1]))
	    cv2.putText(self.marker_image, strInfo, (self.frame_width/2, self.frame_height/2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
            
            # Set the global feature_point list to the new list
            self.feature_point = new_feature_point
            feature_point_to_track = np.array((new_feature_point[0], new_feature_point[1], new_feature_point[2]))

            # Provide self.tracked_point to publish_poi to be published on the /poi topic
            self.tracked_point = feature_point_to_track

        except:
            self.tracked_point = None

        return self.tracked_point
Example #25
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            if not ret:
                return
            global frame_gray
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            global height, width, depth
            height, width = frame_gray.shape
            #print(frame_gray.shape)
            global vis
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                print("#"*30+"\n")    
                print(tr) 
                print("#"*30+"\n")
                self.tracks = new_tracks
                print(kmean(new_tracks))
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #26
0
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
    p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
    p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
    d = abs(p0-p0r).reshape(-1, 2).max(-1)
    status = d < back_threshold

    if False:
        print "Alex: p1 = %s" % str(p1)
        print "Alex: status = %s" % str(status)

    return p1, status
Example #27
0
    def work(self, image):

        currentrect = self.currentrect
        
        image_size = cv.GetSize(image)
        # create grayscale version
        frame_gray = cv.CreateImage(image_size, 8, 1)
        cv.CvtColor(image, frame_gray, cv.CV_BGR2GRAY)
        #frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_BGR2GRAY)
        
        storage = cv.CreateMemStorage(0)
        #cv.EqualizeHist(grayscale, grayscale)

        #vis = grayscale.copy()
        vis = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.Copy(image, vis)
        #frame_gray = grayscale
        if len(self.tracks) > 0:
            img0, img1 = self.prev_gray, frame_gray
            p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
            p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
            d = abs(p0-p0r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []
            for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                tr.append((x, y))
                if len(tr) > self.track_len:
                    del tr[0]
                new_tracks.append(tr)
                cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
            self.tracks = new_tracks
            cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
            draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

        if self.frame_idx % self.detect_interval == 0:
            mask = np.zeros(cv.GetSize(frame_gray))#np.zeros_like(frame_gray)
            #print frame_gray
            #print mask
            mask[:] = 255
            for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                cv2.circle(mask, (x, y), 5, 0, -1)
            p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
            if p is not None:
                for x, y in np.float32(p).reshape(-1, 2):
                    self.tracks.append([(x, y)])


        self.frame_idx += 1
        self.prev_gray = frame_gray
        #cv2.imshow('lk_track', vis)
        return vis
 def track_keypoints(self, grey, prev_grey):
     # We are tracking points between the previous frame and the
     # current frame
     img0, img1 = prev_grey, grey
     
     # Reshape the current keypoints into a numpy array required
     # by calcOpticalFlowPyrLK()
     p0 = np.float32([p for p in self.keypoints]).reshape(-1, 1, 2)
     
     # Calculate the optical flow from the previous frame to the current frame
     p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)
     
     # Do the reverse calculation: from the current frame to the previous frame
     try:
         p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)
         
         # Compute the distance between corresponding points in the two flows
         d = abs(p0-p0r).reshape(-1, 2).max(-1)
         
         # If the distance between pairs of points is < 1 pixel, set
         # a value in the "good" array to True, otherwise False
         good = d < 1
     
         # Initialize a list to hold new keypoints
         new_keypoints = list()
         
         # Cycle through all current and new keypoints and only keep
         # those that satisfy the "good" condition above
         for (x, y), good_flag in zip(p1.reshape(-1, 2), good):
             if not good_flag:
                 continue
             new_keypoints.append((x, y))
             
             # Draw the keypoint on the image
             cv2.circle(self.marker_image, (x, y), self.feature_size, (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
         
         # Set the global keypoint list to the new list    
         self.keypoints = new_keypoints
         
         # If we have enough points, find the best fit ellipse around them
         if len(self.keypoints) > 6:
             self.keypoints_matrix = cv.CreateMat(1, len(self.keypoints), cv.CV_32SC2)
             i = 0
             for p in self.keypoints:
                 cv.Set2D(self.keypoints_matrix, 0, i, (int(p[0]), int(p[1])))
                 i = i + 1           
             track_box = cv.FitEllipse2(self.keypoints_matrix)
         else:
             # Otherwise, find the best fitting rectangle
             track_box = cv2.boundingRect(self.keypoints_matrix)
     except:
         track_box = None
                     
     return track_box
Example #29
0
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
	# Calculate flow points (p1) from img0 to img1, using p0 as the baseline
	p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)

	# Calculate flow points (p0r) from img1 to img0, using p1 as the baseline
	p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)

	# Calculate the error in back projection for each point
	d = abs(p0-p0r).reshape(-1, 2).max(-1)
	status = d < back_threshold # If error < threshold, status is True, else False. This builds an array of statuses.
	return p1, status # Return the new positions of the features, and the status array
Example #30
0
    def _optical_flow(self, current_frame, previous_frame):

        sum = 0
        expected_res = 0
        self.tracks_count = 0
        for i, track in enumerate(self.tracks):
            if track:
                p0 = np.reshape([tr[-1] for tr in track], (-1, 1, 2))
                p1 = cv2.calcOpticalFlowPyrLK(previous_frame, current_frame,
                                              p0, None, **self.lk_params)[0]
                p0r = cv2.calcOpticalFlowPyrLK(current_frame, previous_frame,
                                               p1, None, **self.lk_params)[0]
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = deque(maxlen=self.app_params['tracks_number'])
                sum_r = 0

                for tr, (x, y), good_flag in zip(track, p1.reshape(-1, 2),
                                                 good):
                    if not good_flag:
                        continue

                    tr.append((x, y))
                    new_tracks.append(tr)

                    # pixel shift
                    radius = tr[-1][-1] - tr[-2][-1]

                    if self.training and not i:
                        self.network.add_sample(y=tr[-1][-1],
                                                dy=radius,
                                                result=radius)
                    elif self.training:
                        self.network.add_sample(y=tr[-1][-1], dy=radius,
                                                result=expected_res)

                    if self.training is None and self.network:
                        # If neural network is ready use it
                        sum_r += self.network.result(y=tr[-1][-1], dy=radius)
                    else:
                        sum_r += radius

                if self.training and not i:
                    # Result used to teaching neural network
                    expected_res = sum_r / len(track)

                self.tracks[i] = new_tracks
                self.tracks_count += len(new_tracks)
                if new_tracks:
                    sum += (self.app_params['speed_multi'] * sum_r /
                            len(new_tracks))

        return sum / len(self.tracks)
Example #31
0
    def _generator(self):
        # params for ShiTomasi corner detection
        if self._shi_tomashi_params is None:
            feature_params = dict(maxCorners=100,
                                  qualityLevel=0.03,
                                  minDistance=7,
                                  blockSize=7)
        else:
            feature_params = self._shi_tomashi_params

        # Parameters for lucas kanade optical flow
        if self._lucas_kanade_params is None:
            lk_params = dict(winSize=(15, 15),
                             maxLevel=2,
                             criteria=(cv2.TERM_CRITERIA_EPS
                                       | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        else:
            lk_params = self._lucas_kanade_params

        prev_img = None
        p0 = None
        elapsed_frames = 0
        while True:
            image = self._get_inputs('cam')
            assert isinstance(image, np.ndarray)
            assert len(image.shape) == 2 or (len(image.shape) == 3
                                             and image.shape[2] == 3)

            # Get image from camera and convert it to grayscale if needed
            if len(image.shape) == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # Prepare first image if needed
            if prev_img is None:
                prev_img = image
                yield None
                begin = time.time()
                p0 = cv2.goodFeaturesToTrack(image,
                                             mask=None,
                                             **feature_params)
                log.debug('Found {} features to track in {} sec'.format(
                    len(p0),
                    time.time() - begin))
                elapsed_frames = 0
                continue

            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_img, image, p0, None,
                                                   **lk_params)

            # Select and yield good points
            good_new = p1[st == 1]
            good_old = p0[st == 1]
            yield (good_old, good_new)

            # Now update the previous frame and previous points
            prev_img = image
            p0 = good_new.reshape(-1, 1, 2)

            # Re-initialize tracking points if needed
            elapsed_frames += 1
            if elapsed_frames > self._renew_after:
                begin = time.time()
                p0 = cv2.goodFeaturesToTrack(image,
                                             mask=None,
                                             **feature_params)
                log.debug('Found {} features to track in {} sec'.format(
                    len(p0),
                    time.time() - begin))
                elapsed_frames = 0
Example #32
0
# --- INITIALIZATION ---
# reset frame idx and capture to first frame
vid.set(cv2.CAP_PROP_POS_FRAMES, 300)
# loop through all frames in video file
mask = np.zeros_like(oldFrame)
firstFrame = 300
for frameIdx in range(firstFrame, 800):
    # acquire new frame --> grayscale --> blur
    success, newFrameC = vid.read()
    newFrame = visfunctions.GrayBlur(newFrameC, 5, 5, 3)
    # drawing mask
    if not (np.mod(frameIdx, 30)) and (frameIdx != firstFrame) and drawKLT:
        mask = np.zeros_like(oldFrame)

    # propagate KLT tracker, identify good tracks
    p1, st, err = cv2.calcOpticalFlowPyrLK(oldFrame, newFrame, p0, None,
                                           **lk_params)
    stIdx = np.zeros([kp0.size, 1], dtype=np.int16)

    # compare kp descriptors every xx frames to eliminate bad tracks
    if not (np.mod(frameIdx, 5)) and (frameIdx != firstFrame):
        # detect kp, compute des within ROI in new hull image
        newHull = newFrame[y:y + h, x:x + w]
        kp1, des1 = orb.detectAndCompute(newHull, None)
        kp1 = np.array(kp1)
        # adjust point coordinates from hull frame to image frame
        for ii in range(0, kp1.size):
            kp1[ii].pt = (kp1[ii].pt[0] + x, kp1[ii].pt[1] + y)
        # identify strongest matches between library des and current des
        matches = bf.match(des0, des1)
        matches = sorted(matches, key=lambda x: x.distance)
        # build orb matching indices and update succesful track index
def of_tracker(v, file_name):
    # Open output file
    output_name = sys.argv[3] + file_name
    output = open(output_name,"w")

    frameCounter = 0
    # read first frame
    ret ,frame = v.read()
    if ret == False:
        return

    # detect face in first frame
    c,r,w,h = detect_one_face(frame)

    pt = 0, c+w/2, r+h/2
    # Write track point for first frame
    output.write("%d,%d,%d\n" % pt) # Write as 0,pt_x,pt_y
    frameCounter = frameCounter + 1

    # set the initial tracking window
    track_window = (c,r,w,h)

    feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.2,
                       minDistance = 7,
                       blockSize = 7 )
    # Parameters for lucas kanade optical flow
    lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

    while(1):
        ret ,frame = v.read() # read another frame
        if ret == False:
            break
        c,r,w,h = detect_one_face(frame)
        if c!=0 and r!=0 and w!=0 and h!=0:
            pt = frameCounter, c+w/2, r+h/2
            old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
        else:
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
            x = []
            y = []
            for i in p1:
                x.append(i[0][0])
                y.append(i[0][1])
            x = np.mean(x)
            y = np.mean(y)
            pt = frameCounter, x, y
            good_new = p1[st==1]
            good_old = p0[st==1]
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1,1,2)

        # write the result to the output file
        output.write("%d,%d,%d\n" % pt) # Write as frame_index,pt_x,pt_y
        frameCounter = frameCounter + 1

    output.close()
Example #34
0
def featureMatch(video_path, interval = 1):
	print "Open video ", video_path
	cap = cv2.VideoCapture(video_path)
	# Initiate SIFT detector
	sift = cv2.xfeatures2d.SIFT_create()
	# BFMatcher with default params
	bf = cv2.BFMatcher()
        fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
        # fgbg = cv2.createBackgroundSubtractorMOG2()

        # Parameters for lucas kanade optical flow
        lk_params = dict( winSize  = (4,4),maxLevel = 5,criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03))

        feature_params = dict(maxCorners = 100,qualityLevel = 0.3, minDistance = 7,blockSize = 7)
	# FLANN parameters
	# FLANN_INDEX_KDTREE = 0
	# index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
	# search_params = dict(checks=50)   # or pass empty dictionary

	# flann = cv2.FlannBasedMatcher(index_params,search_params)

	ret, img1 = cap.read()
        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
	kp1, des1 = sift.detectAndCompute(img1,None)
        # p = cv2.goodFeaturesToTrack(gray1, mask = None, **feature_params)
        # print p
        fgmask = fgbg.apply(img1)
	# height, width, channels = img1.shape
	
	if_play = True

	while(1):
		if if_play:
			ret, img2 = cap.read()
			if not ret:
				break

                        res = img2.copy()
                        # compute optical flow
                        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                        
                        # background subtraction
                        fgmask = fgbg.apply(img2)
                        fgmask, boxes = getBoundingBoxes(fgmask)
                        
			# find the keypoints and descriptors with SIFT
			kp2, des2 = sift.detectAndCompute(img2,None)
                        
			matches = bf.knnMatch(des1,des2, k=2)
			# matches = flann.knnMatch(des1,des2,k=2)
			# Apply ratio test
			good = []
                        p0 = []
                        p0_match = []
			for m,n in matches:
                                # queryIdx is the indexes in kp1, trainIdx is the index in kp2
                                pt1 = kp1[m.queryIdx].pt
                                pt2 = kp2[n.trainIdx].pt
                                if_check = False
                                for b in boxes:
                                    #if fgmask[pt1[1], pt1[0]] == 0 or fgmask[pt2[1], pt2[0]] == 0:
                                    if ifContains(b, pt1) or ifContains(b, pt2):
                                        if_check = True
                                        break

				if if_check and m.distance < 0.75*n.distance:
				    good.append([m])
                                    p0.append(pt1)
                                    p0_match.append(pt2)
                                    print pt1, pt2
                       
                        if len(p0) > 0:
                            p0 = np.array(p0).astype('float32')
                            p0_match = np.array(p0).astype('float32')
                            row, col = p0.shape
                            p0 = p0.reshape(row, 1, col)
                            p0_match = p0_match.reshape(row, 1, col)

                            p1, st, err = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None, **lk_params)
                            # Select good points
                            good_new = p1[st==1]
                            good_old = p0[st==1]
                            good_match = p0_match[st==1]

			# cv2.drawMatchesKnn expects list of lists as matches.
			img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
                        for (x, y, w, h) in boxes:
                            cv2.rectangle(res, (x, y), (x+w, y+h), (255, 0, 0))
                        
                        if len(p0) > 0:
                            color = np.random.randint(0,255,(len(good_new),3))
                            for i,(new,old, match) in enumerate(zip(good_new,good_old, good_match)):
                                (x1, y1) = old.ravel()
                                (x2, y2) = new.ravel()
                                (x11, y11) = match.ravel()
                                res = cv2.line(res, (x1, y1), (x2, y2), color[i].tolist(), 1)
                                res = cv2.line(res, (x1, y1), (x11, y11), (0, 255, 0))
                                res = cv2.circle(res, (x1, y1), 2, color[i].tolist(), 1)
                                res = cv2.circle(res, (x11, y11), 2, color[i].tolist(), 1)

			#plt.imshow(img3),plt.show()
			#break
						
		cv2.imshow('frame', img3) 
                cv2.imshow('fgbg', fgmask)
                cv2.imshow('res', res)
		# take keyboard input
		k = cv2.waitKey(30) & 0xff
		if k == 27:
			break
		if k == ord(' '):
			if_play = not if_play
			   
                img1 = img2
                gray1 = gray2
                kp1 = kp2
                des1 = des2
				
	cap.release()
	cv2.destroyAllWindows()
Example #35
0
    def predict(self, frame, tracks):
        """
        Predicts tracklet positions in the next frame and estimates camera motion.
        Parameters
        ----------
        frame : ndarray
            The next frame.
        tracks : List[Track]
            List of tracks to predict.
            Feature points of each track are updated in place.
        Returns
        -------
        Dict[int, ndarray], ndarray
            Returns a dictionary with track IDs as keys and predicted bounding
            boxes of [x1, x2, y1, y2] as values, and a 3x3 homography matrix.
        """
        # preprocess frame
        cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, dst=self.frame_gray)
        cv2.resize(self.frame_gray,
                   self.frame_small.shape[::-1],
                   dst=self.frame_small)

        # order tracks from closest to farthest
        tracks.sort(reverse=True)

        # detect target feature points
        all_prev_pts = []
        self.fg_mask[:] = 255
        for track in tracks:
            inside_tlbr = intersection(track.tlbr, self.frame_rect)
            target_mask = crop(self.fg_mask, inside_tlbr)
            target_area = mask_area(target_mask)
            keypoints = self._rect_filter(track.keypoints, inside_tlbr,
                                          self.fg_mask)
            # only detect new keypoints when too few are propagated
            if len(keypoints) < self.feature_density * target_area:
                img = crop(self.prev_frame_gray, inside_tlbr)
                feature_dist = self._estimate_feature_dist(
                    target_area, self.feat_dist_factor)
                keypoints = cv2.goodFeaturesToTrack(img,
                                                    mask=target_mask,
                                                    minDistance=feature_dist,
                                                    **self.target_feat_params)
                if keypoints is None:
                    keypoints = np.empty((0, 2), np.float32)
                else:
                    keypoints = self._ellipse_filter(keypoints, track.tlbr,
                                                     inside_tlbr[:2])
            # batch keypoints
            all_prev_pts.append(keypoints)
            # zero out target in foreground mask
            target_mask[:] = 0
        target_ends = list(
            itertools.accumulate(
                len(pts) for pts in all_prev_pts)) if all_prev_pts else [0]
        target_begins = itertools.chain([0], target_ends[:-1])

        # detect background feature points
        cv2.resize(self.prev_frame_gray,
                   self.prev_frame_bg.shape[::-1],
                   dst=self.prev_frame_bg)
        cv2.resize(self.fg_mask,
                   self.bg_mask_small.shape[::-1],
                   dst=self.bg_mask_small,
                   interpolation=cv2.INTER_NEAREST)
        keypoints = self.bg_feat_detector.detect(self.prev_frame_bg,
                                                 mask=self.bg_mask_small)
        if len(keypoints) == 0:
            self.bg_keypoints = np.empty((0, 2), np.float32)
            self.prev_frame_gray, self.frame_gray = self.frame_gray, self.prev_frame_gray
            self.prev_frame_small, self.frame_small = self.frame_small, self.prev_frame_small
            LOGGER.warning('Camera motion estimation failed')
            return {}, None
        keypoints = np.float32([kp.pt for kp in keypoints])
        keypoints = self._unscale_pts(keypoints, self.bg_feat_scale_factor)
        bg_begin = target_ends[-1]
        all_prev_pts.append(keypoints)

        # match features using optical flow
        all_prev_pts = np.concatenate(all_prev_pts)
        scaled_prev_pts = self._scale_pts(all_prev_pts,
                                          self.opt_flow_scale_factor)
        all_cur_pts, status, err = cv2.calcOpticalFlowPyrLK(
            self.prev_frame_small, self.frame_small, scaled_prev_pts, None,
            **self.opt_flow_params)
        status = self._get_status(status, err, self.max_error)
        all_cur_pts = self._unscale_pts(all_cur_pts,
                                        self.opt_flow_scale_factor, status)

        # save preprocessed frame buffers for next prediction
        self.prev_frame_gray, self.frame_gray = self.frame_gray, self.prev_frame_gray
        self.prev_frame_small, self.frame_small = self.frame_small, self.prev_frame_small

        # estimate camera motion
        homography = None
        prev_bg_pts, matched_bg_pts = self._get_good_match(
            all_prev_pts, all_cur_pts, status, bg_begin, -1)
        if len(matched_bg_pts) < 4:
            self.bg_keypoints = np.empty((0, 2), np.float32)
            LOGGER.warning('Camera motion estimation failed')
            return {}, None
        homography, inlier_mask = cv2.findHomography(
            prev_bg_pts,
            matched_bg_pts,
            method=cv2.RANSAC,
            maxIters=self.ransac_max_iter,
            confidence=self.ransac_conf)
        self.prev_bg_keypoints, self.bg_keypoints = self._get_inliers(
            prev_bg_pts, matched_bg_pts, inlier_mask)
        if homography is None or len(self.bg_keypoints) < self.inlier_thresh:
            self.bg_keypoints = np.empty((0, 2), np.float32)
            LOGGER.warning('Camera motion estimation failed')
            return {}, None

        # estimate target bounding boxes
        next_bboxes = {}
        self.fg_mask[:] = 255
        for begin, end, track in zip(target_begins, target_ends, tracks):
            prev_pts, matched_pts = self._get_good_match(
                all_prev_pts, all_cur_pts, status, begin, end)
            prev_pts, matched_pts = self._fg_filter(prev_pts, matched_pts,
                                                    self.fg_mask, self.size)
            if len(matched_pts) < 3:
                track.keypoints = np.empty((0, 2), np.float32)
                continue
            # model motion as partial affine
            affine_mat, inlier_mask = cv2.estimateAffinePartial2D(
                prev_pts,
                matched_pts,
                method=cv2.RANSAC,
                maxIters=self.ransac_max_iter,
                confidence=self.ransac_conf)
            if affine_mat is None:
                track.keypoints = np.empty((0, 2), np.float32)
                continue
            est_tlbr = self._estimate_bbox(track.tlbr, affine_mat)
            track.prev_keypoints, track.keypoints = self._get_inliers(
                prev_pts, matched_pts, inlier_mask)
            if (intersection(est_tlbr, self.frame_rect) is None
                    or len(track.keypoints) < self.inlier_thresh):
                track.keypoints = np.empty((0, 2), np.float32)
                continue
            next_bboxes[track.trk_id] = est_tlbr
            track.inlier_ratio = len(track.keypoints) / len(matched_pts)
            # zero out predicted target in foreground mask
            target_mask = crop(self.fg_mask, est_tlbr)
            target_mask[:] = 0
        return next_bboxes, homography
Example #36
0
def optical_flow_tracker(v, file_name):
    # Open output file
    output_name = sys.argv[3] + file_name
    output = open(output_name, "w")

    frameCounter = 0
    # read first frame
    ret, frame = v.read()
    if ret == False:
        return

    # detect face in first frame
    c, r, w, h = detect_one_face(frame)

    pt = (0, c + w / 2, r + h / 2)
    # Write track point for first frame
    output.write("%d,%d,%d\n" % pt)  # Write as 0,pt_x,pt_y
    frameCounter = frameCounter + 1

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    face_mask = np.zeros((old_gray.shape[:2]), np.uint8)
    # Take a small patch of the face around the middle area else result will deviate from the actual point.
    x1 = r - 20 + h / 2
    x2 = r + 20 + h / 2
    y1 = c - 15 + w / 2
    y2 = c + 15 + w / 2
    face_mask[x1:x2, y1:y2] = old_gray[x1:x2, y1:y2]

    p0 = cv2.goodFeaturesToTrack(old_gray, mask=face_mask, **feature_params)

    # Create a mask image for drawing purposes
    mask = np.zeros_like(frame)
    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    while (1):
        ret, frame = v.read()  # read another frame
        if ret == False:
            break
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **lk_params)

        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]
        c1, r1, w1, h1 = detect_one_face(frame)
        if c1 != 0 and h1 != 0:
            realPos = [c1 + w1 / 2, r1 + h1 / 2]
        else:
            realPos = np.sum(good_new.T, axis=1) / len(
                good_new)  # Use optical flow in case face can't be detected.
        # draw the tracks
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 1)
            frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)

        img = cv2.add(frame, mask)

        cv2.imshow('frame', img)
        k = cv2.waitKey(60) & 0xff
        if k == 27:
            break
        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1, 1, 2)

        pt = (frameCounter, realPos[0], realPos[1])
        output.write("%d,%d,%d\n" % pt)  # Write as frame_index,pt_x,pt_y
        frameCounter = frameCounter + 1

    output.close()
Example #37
0
def track_features(
        prvs_image,
        next_image,
        points,
        winsize=(50, 50),
        nr_levels=3,
        criteria=(3, 10, 0),
        flags=0,
        min_eig_thr=1e-4,
        verbose=False,
):
    """
    Interface to the OpenCV `Lucas-Kanade`_ features tracking algorithm
    (cv.calcOpticalFlowPyrLK).

    .. _`Lucas-Kanade`:\
       https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323

    .. _calcOpticalFlowPyrLK:\
       https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323


    .. _MaskedArray:\
        https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray

    Parameters
    ----------

    prvs_image : array_like or MaskedArray_
        Array of shape (m, n) containing the first image.
        Invalid values (Nans or infs) are filled using the min value.

    next_image : array_like or MaskedArray_
        Array of shape (m, n) containing the successive image.
        Invalid values (Nans or infs) are filled using the min value.

    points : array_like
        Array of shape (p, 2) indicating the pixel coordinates of the
        tracking points (corners).

    winsize : tuple of int, optional
        The **winSize** parameter in calcOpticalFlowPyrLK_.
        It represents the size of the search window that it is used at each
        pyramid level.

    nr_levels : int, optional
        The **maxLevel** parameter in calcOpticalFlowPyrLK_.
        It represents the 0-based maximal pyramid level number.

    criteria : tuple of int, optional
        The **TermCriteria** parameter in calcOpticalFlowPyrLK_ ,
        which specifies the termination criteria of the iterative search
        algorithm.

    flags : int, optional
        Operation flags, see documentation calcOpticalFlowPyrLK_.

    min_eig_thr : float, optional
        The **minEigThreshold** parameter in calcOpticalFlowPyrLK_.

    verbose : bool, optional
        Print the number of vectors that have been found.

    Returns
    -------

    xy : array_like
        Array of shape (d, 2) with the x- and y-coordinates of *d* <= *p*
        detected sparse motion vectors.

    uv : array_like
        Array of shape (d, 2) with the u- and v-components of *d* <= *p*
        detected sparse motion vectors.

    Notes
    -----

    The tracking points can be obtained with the
    :py:func:`pysteps.utils.images.ShiTomasi_detection` routine.

    See also
    --------

    pysteps.motion.lucaskanade.dense_lucaskanade

    References
    ----------

    Bouguet,  J.-Y.:  Pyramidal  implementation  of  the  affine  Lucas Kanade
    feature tracker description of the algorithm, Intel Corp., 5, 4,
    https://doi.org/10.1109/HPDC.2004.1323531, 2001

    Lucas, B. D. and Kanade, T.: An iterative image registration technique with
    an application to stereo vision, in: Proceedings of the 1981 DARPA Imaging
    Understanding Workshop, pp. 121–130, 1981.
    """

    if not CV2_IMPORTED:
        raise MissingOptionalDependency(
            "opencv package is required for the calcOpticalFlowPyrLK() "
            "routine but it is not installed")

    prvs_img = np.copy(prvs_image)
    next_img = np.copy(next_image)
    p0 = np.copy(points)

    if ~isinstance(prvs_img, MaskedArray):
        prvs_img = np.ma.masked_invalid(prvs_img)
    np.ma.set_fill_value(prvs_img, prvs_img.min())

    if ~isinstance(next_img, MaskedArray):
        next_img = np.ma.masked_invalid(next_img)
    np.ma.set_fill_value(next_img, next_img.min())

    # scale between 0 and 255
    prvs_img = ((prvs_img.filled() - prvs_img.min()) /
                (prvs_img.max() - prvs_img.min()) * 255)

    next_img = ((next_img.filled() - next_img.min()) /
                (next_img.max() - next_img.min()) * 255)

    # convert to 8-bit
    prvs_img = np.ndarray.astype(prvs_img, "uint8")
    next_img = np.ndarray.astype(next_img, "uint8")

    # Lucas-Kanade
    # TODO: use the error returned by the OpenCV routine
    params = dict(
        winSize=winsize,
        maxLevel=nr_levels,
        criteria=criteria,
        flags=flags,
        minEigThreshold=min_eig_thr,
    )
    p1, st, __ = cv2.calcOpticalFlowPyrLK(prvs_img, next_img, p0, None,
                                          **params)

    # keep only features that have been found
    st = st.squeeze() == 1
    if np.any(st):
        p1 = p1[st, :]
        p0 = p0[st, :]

        # extract vectors
        xy = p0
        uv = p1 - p0

    else:
        xy = uv = np.empty(shape=(0, 2))

    if verbose:
        print("--- %i sparse vectors found ---" % xy.shape[0])

    return xy, uv
Example #38
0
def _build_impl(frame_sequence: pims.FramesSequence,
                builder: _CornerStorageBuilder) -> None:
    image_0 = frame_sequence[0]
    lks = dict(winSize=WIN_SIZE,
               maxLevel=MAX_LEVEL,
               criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                         0.03))

    features = dict(maxCorners=MAX_CORNERS,
                    qualityLevel=QUALITY_LEVEL,
                    minDistance=MIN_DISTANCE,
                    blockSize=BLOCK_SIZE)
    corners = cv2.goodFeaturesToTrack(image=image_0, mask=None, **features)
    n = len(corners)
    ids = np.array(range(n))
    next_corner = n
    prev_corner = n
    frame_corners = FrameCorners(ids=ids,
                                 points=corners,
                                 sizes=np.full(n, MIN_DISTANCE))

    builder.set_corners_at_frame(0, frame_corners)
    for frame, image_1 in enumerate(frame_sequence[1:], 1):
        prev_img = np.uint8(image_0 * 255. / image_0.max())
        next_img = np.uint8(image_1 * 255. / image_1.max())
        point_0, _, _ = cv2.calcOpticalFlowPyrLK(prev_img, next_img, corners,
                                                 None, **lks)
        point_1, _, _ = cv2.calcOpticalFlowPyrLK(next_img, prev_img, point_0,
                                                 None, **lks)
        delta = np.abs(corners - point_1).reshape(-1, 2).max(-1)
        pred = delta < 1
        ids = ids[pred]
        corners = point_0[pred]
        n = len(corners)

        if n < MAX_CORNERS:
            mask = np.full(image_1.shape, 255, dtype=np.uint8)
            for coord in corners:
                cv2.circle(mask, (coord[0][0], coord[0][1]), MIN_DISTANCE, 0,
                           -1)

            candidates = cv2.goodFeaturesToTrack(image_1,
                                                 mask=mask,
                                                 **features)
            if candidates is None:
                frame_corners = FrameCorners(
                    ids=ids,
                    points=corners,
                    sizes=np.full(n, MIN_DISTANCE),
                )
                builder.set_corners_at_frame(frame, frame_corners)
                image_0 = image_1
                continue

            new_corners = []
            delta_n = 0
            for coord in candidates:
                if n + delta_n < MAX_CORNERS:
                    new_corners.append(coord)
                    next_corner += 1
                    delta_n += 1
            corners = np.concatenate([corners, new_corners])
            n = len(corners)
            ids = np.concatenate(
                [ids, np.array(range(prev_corner, next_corner))])
            prev_corner = next_corner
            frame_corners = FrameCorners(
                ids=ids,
                points=corners,
                sizes=np.full(n, MIN_DISTANCE),
            )
            builder.set_corners_at_frame(frame, frame_corners)
            image_0 = image_1
Example #39
0
import sys
import cv2

src1 = cv2.imread('.\\ch10\\frame1.jpg')
src2 = cv2.imread('.\\ch10\\frame2.jpg')

if src1 is None or src2 is None:
    print('Image load failed!')
    sys.exit()

gray1 = cv2.cvtColor(src1, cv2.COLOR_BGR2GRAY)

pt1 = cv2.goodFeaturesToTrack(gray1, 50, 0.01, 10)
pt2, status, err = cv2.calcOpticalFlowPyrLK(src1, src2, pt1, None)

dst = cv2.addWeighted(src1, 0.5, src2, 0.5, 0)

for i in range(pt2.shape[0]):
    if status[i, 0] == 0:
        continue

    cv2.circle(dst, tuple(pt1[i, 0]), 4, (0, 255, 255), 2, cv2.LINE_AA)
    cv2.circle(dst, tuple(pt2[i, 0]), 4, (0, 0, 255), 2, cv2.LINE_AA)
    cv2.arrowedLine(dst, tuple(pt1[i, 0]), tuple(pt2[i, 0]), (0, 255, 0), 2)

cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
Example #40
0
def find_opponent(img, img_prev, display_list):
    def draw_flow(img, flow, step=16):
        h, w = img.shape[:2]
        y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1)
        fx, fy = flow[y, x].T
        lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
        lines = np.int32(lines + 0.5)
        vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        cv2.polylines(vis, lines, 0, (0, 255, 0))
        for (x1, y1), (x2, y2) in lines:
            cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
        return vis

    def draw_rects(img, rects, color):
        for x1, y1, x2, y2 in rects:
            cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)

    #start_time = current_milli_time()

    ## General preparations
    if 'opponent' in display_list:
        img_opponent = img_prev.copy()
    zc.check_and_display('rotated',
                         img,
                         display_list,
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    zc.check_and_display('rotated_prev',
                         img_prev,
                         display_list,
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    bw_prev = cv2.cvtColor(img_prev, cv2.COLOR_BGR2GRAY)

    # valid part of img_prev
    mask_img_prev_valid = zc.get_mask(img_prev, rtn_type="mask")
    bool_img_prev_valid = zc.shrink(mask_img_prev_valid, 15,
                                    iterations=3).astype(bool)
    bool_img_prev_invalid = np.bitwise_not(bool_img_prev_valid)
    mask_screen_prev = zc.shrink(find_screen(img_prev, []), 5)
    mask_white_prev = zc.color_inrange(img_prev, 'HSV', S_U=50, V_L=130)
    mask_white_prev = mask_screen_prev
    bool_white_prev = zc.shrink(mask_white_prev,
                                13,
                                iterations=3,
                                method='circular').astype(bool)
    # valid part of img
    mask_img_valid = zc.get_mask(img, rtn_type="mask")
    bool_img_valid = zc.shrink(mask_img_valid, 15, iterations=3).astype(bool)
    bool_img_invalid = np.bitwise_not(bool_img_valid)
    mask_screen = zc.shrink(find_screen(img, []), 5)
    mask_white = zc.color_inrange(img, 'HSV', S_U=50, V_L=130)
    mask_white = mask_screen
    bool_white = zc.shrink(mask_white, 13, iterations=3,
                           method='circular').astype(bool)

    # prior score according to height
    row_score, col_score = np.mgrid[0:img.shape[0], 0:img.shape[1]]
    #row_score = img.shape[0] * 1.2 - row_score.astype(np.float32)
    row_score = row_score.astype(np.float32) + 30

    #print "time0: %f" % (current_milli_time() - start_time)
    ## method 1: optical flow - dense
    opt_flow = np.zeros((bw.shape[0], bw.shape[1], 2), dtype=np.float32)
    opt_flow[::2, ::2, :] = cv2.calcOpticalFlowFarneback(bw_prev[::2, ::2],
                                                         bw[::2, ::2],
                                                         pyr_scale=0.5,
                                                         levels=1,
                                                         winsize=15,
                                                         iterations=3,
                                                         poly_n=7,
                                                         poly_sigma=1.5,
                                                         flags=0)
    if 'denseflow' in display_list:
        zc.display_image('denseflow',
                         draw_flow(bw, opt_flow, step=16),
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    # clean optical flow
    mag_flow = np.sqrt(np.sum(np.square(opt_flow), axis=2))
    bool_flow_valid = mag_flow > 2
    bool_flow_valid = np.bitwise_and(bool_flow_valid, bool_img_prev_valid)
    bool_flow_valid = np.bitwise_and(bool_flow_valid,
                                     np.bitwise_not(bool_white_prev))
    bool_flow_invalid = np.bitwise_not(bool_flow_valid)
    # substract all the flow by flow average
    x_ave = np.mean(opt_flow[bool_flow_valid, 0])
    y_ave = np.mean(opt_flow[bool_flow_valid, 1])
    opt_flow[:, :, 0] -= x_ave
    opt_flow[:, :, 1] -= y_ave
    opt_flow[bool_flow_invalid, :] = 0
    if 'denseflow_cleaned' in display_list:
        zc.display_image('denseflow_cleaned',
                         draw_flow(bw, opt_flow, step=16),
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)

    # give the flow a "score"
    score_flow = np.sqrt(np.sum(np.square(opt_flow), axis=2))
    score_flow = score_flow * row_score
    score_horizonal = np.sum(score_flow, axis=0)
    low_pass_h = np.ones(120)
    low_pass_h /= low_pass_h.sum()
    score_horizonal_filtered_dense = np.convolve(score_horizonal,
                                                 low_pass_h,
                                                 mode='same')
    if 'dense_hist' in display_list:
        plot_bar(score_horizonal_filtered_dense, name='dense_hist')
        print np.argmax(score_horizonal_filtered_dense)
    if np.max(score_horizonal_filtered_dense) < 20000:
        # TODO: this is also a possible indication that the rally is not on
        rtn_msg = {
            'status': 'fail',
            'message': 'Motion too small, probably no one in the scene'
        }
        return (rtn_msg, None)
    if 'opponent' in display_list:
        cv2.circle(img_opponent,
                   (np.argmax(score_horizonal_filtered_dense), 220), 20,
                   (0, 255, 0), -1)
    #print "time1: %f" % (current_milli_time() - start_time)

    ## method 2: optical flow - LK
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.03,
                          minDistance=5,
                          blockSize=3)
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    p0 = cv2.goodFeaturesToTrack(bw_prev,
                                 mask=mask_img_prev_valid,
                                 useHarrisDetector=False,
                                 **feature_params)
    if p0 is None:
        # TODO: this is also a possible indication that the rally is not on
        rtn_msg = {
            'status':
            'fail',
            'message':
            'No good featuresToTrack at all, probably no one in the scene'
        }
        return (rtn_msg, None)
    p1, st, err = cv2.calcOpticalFlowPyrLK(bw_prev, bw, p0, None, **lk_params)
    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]
    # draw the tracks
    if 'LKflow' in display_list:
        img_LK = img_prev.copy()
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            cv2.line(img_LK, (a, b), (c, d), (0, 255, 0), 2)
            cv2.circle(img_LK, (c, d), 5, (0, 255, 0), -1)
        zc.display_image('LKflow',
                         img_LK,
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    bool_flow_valid = np.bitwise_and(bool_img_valid,
                                     np.bitwise_not(bool_white))
    bool_flow_invalid = np.bitwise_not(bool_flow_valid)
    bool_flow_valid_prev = np.bitwise_and(bool_img_prev_valid,
                                          np.bitwise_not(bool_white_prev))
    bool_flow_invalid_prev = np.bitwise_not(bool_flow_valid_prev)
    is_reallygood = np.zeros((good_new.shape[0]), dtype=bool)
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
        if bool_flow_invalid_prev[d,
                                  c] or max(a, b) > config.O_IMG_HEIGHT or min(
                                      a, b) < 0 or bool_flow_invalid[b, a]:
            continue
        is_reallygood[i] = True
    reallygood_new = good_new[is_reallygood]
    reallygood_old = good_old[is_reallygood]
    motion = reallygood_new - reallygood_old
    motion_real = motion - np.mean(motion, axis=0)
    if 'LKflow_cleaned' in display_list:
        img_LK_cleaned = img_prev.copy()
        img_LK_cleaned[bool_flow_invalid_prev, :] = [0, 0, 255]
        for i, (new, old) in enumerate(zip(reallygood_new, reallygood_old)):
            c, d = old.ravel()
            cv2.line(img_LK_cleaned, (c, d),
                     (c + motion_real[i, 0], d + motion_real[i, 1]),
                     (0, 255, 0), 2)
            cv2.circle(img_LK_cleaned, (c, d), 5, (0, 255, 0), -1)
        zc.display_image('LKflow_cleaned',
                         img_LK_cleaned,
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    score_flow = np.zeros(bw.shape, dtype=np.float32)
    score_flow[reallygood_old[:, 1].astype(np.int),
               reallygood_old[:, 0].astype(np.int)] = np.sqrt(
                   np.sum(np.square(motion_real), axis=1))
    score_flow = score_flow * row_score
    score_horizonal = np.sum(score_flow, axis=0)
    low_pass_h = np.ones(120)
    low_pass_h /= low_pass_h.sum()
    score_horizonal_filtered_LK = np.convolve(score_horizonal,
                                              low_pass_h,
                                              mode='same')
    if 'LK_hist' in display_list:
        plot_bar(score_horizonal_filtered_LK, name='LK_hist')
        print np.argmax(score_horizonal_filtered_LK)
    # if motion too small, probably no one is there...
    if np.max(score_horizonal_filtered_LK) < 900:
        # TODO: this is also a possible indication that the rally is not on
        rtn_msg = {
            'status': 'fail',
            'message': 'Motion too small, probably no one in the scene'
        }
        return (rtn_msg, None)
    if 'opponent' in display_list:
        cv2.circle(img_opponent, (np.argmax(score_horizonal_filtered_LK), 220),
                   20, (0, 0, 255), -1)
    #print "time2: %f" % (current_milli_time() - start_time)

    ## method 3: remove white wall
    mask_screen = zc.shrink(find_screen(img_prev, []), 5)
    mask_white = zc.color_inrange(img_prev, 'HSV', S_U=50, V_L=130)
    mask_white = mask_screen
    zc.check_and_display('mask_white_wall',
                         mask_white,
                         display_list,
                         is_resize=False,
                         wait_time=config.DISPLAY_WAIT_TIME)
    score = row_score
    score[bool_img_invalid] = 0
    score[bool_white] = 0
    score_horizonal = np.sum(score, axis=0)
    low_pass_h = np.ones(120)
    low_pass_h /= low_pass_h.sum()
    score_horizonal_filtered_wall = np.convolve(score_horizonal,
                                                low_pass_h,
                                                mode='same')
    if 'wall_hist' in display_list:
        plot_bar(score_horizonal_filtered_wall, name='wall_hist')
        print np.argmax(score_horizonal_filtered_wall)
    if 'opponent' in display_list:
        cv2.circle(img_opponent,
                   (np.argmax(score_horizonal_filtered_wall), 220), 20,
                   (255, 0, 0), -1)
    #print "time3: %f" % (current_milli_time() - start_time)

    ## combining results of three methods
    #score_horizonal_filtered = score_horizonal_filtered_dense * score_horizonal_filtered_LK * score_horizonal_filtered_wall
    score_horizonal_filtered = score_horizonal_filtered_wall / 20 + score_horizonal_filtered_dense / 10 + score_horizonal_filtered_LK * 2
    opponent_x = np.argmax(score_horizonal_filtered)
    if 'opponent' in display_list:
        cv2.circle(img_opponent, (opponent_x, 220), 20, (200, 200, 200), -1)
        zc.check_and_display('opponent',
                             img_opponent,
                             display_list,
                             is_resize=False,
                             wait_time=config.DISPLAY_WAIT_TIME)

    rtn_msg = {'status': 'success'}
    return (rtn_msg, opponent_x)
Example #41
0
def opt_flow_pair(img1, img2):
    # calculate first img's gftt
    p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None, **lk_params)
    if (p1 is None):
        return 0

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    # color = np.random.randint(0,255,(100,3))
    # mask = np.zeros_like(img1)
    # # draw the tracks
    # for i,(new,old) in enumerate(zip(good_new,good_old)):
    #     a,b = new.ravel()
    #     c,d = old.ravel()
    #     mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
    #     frame = cv2.circle(img2,(a,b),5,color[i].tolist(),-1)
    # img = cv2.add(frame,mask)

    cv2.imshow('frame', img)

    # sift
    sift = cv2.SIFT_create()
    kp1 = [cv2.KeyPoint(x=f[0], y=f[1], _size=20) for f in good_old]
    kp2 = [cv2.KeyPoint(x=f[0], y=f[1], _size=20) for f in good_new]
    # kp1= sift.detect(img1, None)
    # kp2= sift.detect(img2, None)
    kp1, des1 = sift.compute(img1, kp1)
    kp2, des2 = sift.compute(img2, kp2)
    des1 = np.float32(des1)
    des2 = np.float32(des2)

    # bf = cv2.BFMatcher()
    # matches = bf.knnMatch(des1,des2, k=2)

    # FLANN parameters: Fast Library for Approximate Nearest Neighbor
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    # K=2 => get 2 Nearest Neighbors which is then filtered out after applying a ratio test
    # This filters out around 90% of false matches
    #(Learning OpenCV 3 Computer Vision with Python By Joe Minichino, Joseph Howse)
    matches = flann.knnMatch(des1, des2,
                             k=2)  # Interest points from image2 to image1

    good = []
    for m in matches:
        if (m[0].distance < 0.5 * m[1].distance):
            good.append(m)
    matches = np.asarray(good)

    if (len(matches[:, 0]) >= 4):
        src = np.float32([kp1[m.queryIdx].pt
                          for m in matches[:, 0]]).reshape(-1, 1, 2)
        dst = np.float32([kp2[m.trainIdx].pt
                          for m in matches[:, 0]]).reshape(-1, 1, 2)
        H, masked = cv2.findHomography(src, dst, cv2.RANSAC, 5.0)

        dst = cv2.warpPerspective(
            img1, H,
            ((img1.shape[1] + img2.shape[1]), img2.shape[0]))  #wraped image
        dst[0:img2.shape[0], 0:img2.shape[1]] = img2  #stitched image
        cv2.imwrite('output.jpg', dst)
        plt.imshow(dst)
        plt.show()
    else:
        raise AssertionError('Can’t find enough keypoints.')

    # get mean movement vector
    delta = good_new - good_old
    delta_mean = np.mean(delta, axis=0)
    delta_std = np.std(delta, axis=0)
    direction = np.arctan2(delta_mean[1], delta_mean[0])
    amplitude = np.hypot(*delta_mean)
    print(amplitude, direction)
Example #42
0
def backprop(cache):
    w = 640
    h = 360
    framen = len(cache) - 1  #待读取的帧数(因为要比较当前帧与下一帧所以减1)

    bbox0 = (0, 0, w, h)  #初始ROI(整幅画面)
    bboxb = [(bbox0[0] / w * size[0], bbox0[1] * size[1] / h,
              bbox0[2] * size[0] / w, bbox0[3] * size[1] / h)
             ]  #用来存储每一帧的ROI用于判准
    skip = False
    for i in range(framen):
        if not skip:  #如果不跳过,更新前一帧的画面
            #当前帧im0
            name0 = cache[i]
            img0 = cv2.cvtColor(name0, cv2.COLOR_BGR2GRAY)
            im0 = cv2.resize(img0, (w, h))  #缩小图像加快处理速度
        #下一帧im
        name = cache[i + 1]
        img = cv2.cvtColor(name, cv2.COLOR_BGR2GRAY)
        im = cv2.resize(img, (w, h))

        if i == 0:  #视频第一帧,初始化跟踪点pt0
            pt0 = cv2.goodFeaturesToTrack(im0, 500, 0.01, int(w / 50))
        elif not skip:  #以后直接迭代
            pt0 = pt

        #得到下一帧的跟踪点位置pt
        pt = np.empty(len(pt0))
        res = cv2.calcOpticalFlowPyrLK(im0, im, pt0, pt)
        pt = res[0]

        pt1 = []  #跟踪点筛选后的list

        x = []
        y = []

        for j in range(len(pt)):
            if res[1][j][0] == 1 and ifsame(pt0[j][0], pt[j][0],
                                            (0, 0, w, h)):  #下一帧的跟踪点是否误匹配
                pt1 += [[[pt[j][0][0], pt[j][0][1]]]]  #将正确的点加入下一次迭代

                #每个点的横纵坐标
                x += [pt[j][0][0]]
                y += [pt[j][0][1]]

        if len(pt1) < 50:  #如果追踪到的点减少到50个以下,跳过该帧,直接进行下一帧
            skip = True
        else:
            skip = False

        if not skip:
            #根据xy坐标得到soft margin bbox
            lefttest, righttest = getedge(x)
            uptest, downtest = getedge(y)

            pt2 = np.array(pt1)  #list转np.array
            pt = pt2

            bbox1 = (lefttest, uptest, righttest, downtest)  #下一帧的ROI
            bbox0 = fixbbox(bbox1)  #ROI长宽比出格时修正ROI,并更新ROI

            bboxb += [(bbox0[0] / w * size[0], bbox0[1] * size[1] / h,
                       bbox0[2] * size[0] / w, bbox0[3] * size[1] / h)
                      ]  #加入历史ROIlist

        else:
            bboxb += [0]  #如果追踪失败,ROI以0占位
    bboxb[0] = bboxb[5]
    bboxb[1] = bboxb[5]
    bboxb[2] = bboxb[5]
    bboxb[3] = bboxb[5]
    bboxb[4] = bboxb[5]
    return bboxb
Example #43
0
    def __generate_transformations(self):
        """
        An internal method that generate previous-to-current transformations [dx,dy,da].
        """
        frame_gray = cv2.cvtColor(
            self.__frame_queue[-1],
            cv2.COLOR_BGR2GRAY)  # retrieve current frame and convert to gray
        frame_gray = self.__clahe.apply(frame_gray)  # optimize it

        # calculate optical flow using Lucas-Kanade differential method
        curr_kps, status, error = cv2.calcOpticalFlowPyrLK(
            self.__previous_gray, frame_gray, self.__previous_keypoints, None)

        # select only valid key-points
        valid_curr_kps = curr_kps[status == 1]  # current
        valid_previous_keypoints = self.__previous_keypoints[status ==
                                                             1]  # previous

        # calculate optimal affine transformation between pevious_2_current key-points
        if check_CV_version() == 3:
            # backward compatibility with OpenCV3
            transformation = cv2.estimateRigidTransform(
                valid_previous_keypoints, valid_curr_kps, False)
        else:
            transformation = cv2.estimateAffinePartial2D(
                valid_previous_keypoints, valid_curr_kps)[0]

        # check if transformation is not None
        if not (transformation is None):
            # pevious_2_current translation in x direction
            dx = transformation[0, 2]
            # pevious_2_current translation in y direction
            dy = transformation[1, 2]
            # pevious_2_current rotation in angle
            da = np.arctan2(transformation[1, 0], transformation[0, 0])
        else:
            # otherwise zero it
            dx = dy = da = 0

        # save this transformation
        self.__transforms.append([dx, dy, da])

        # calculate path from cumulative transformations sum
        self.frame_transform = np.array(self.__transforms, dtype="float32")
        self.__path = np.cumsum(self.frame_transform, axis=0)
        # create smoothed path from a copy of path
        self.__smoothed_path = np.copy(self.__path)

        # re-calculate and save GFTT key-points for current gray frame
        self.__previous_keypoints = cv2.goodFeaturesToTrack(
            frame_gray,
            maxCorners=200,
            qualityLevel=0.05,
            minDistance=30.0,
            blockSize=3,
            mask=None,
            useHarrisDetector=False,
            k=0.04,
        )
        # save this gray frame for further processing
        self.__previous_gray = frame_gray[:]
Example #44
0
while True:
    #time.sleep(0.02)
    frame_count = frame_count + 1
    decisionFrameCount = decisionFrameCount + 1

    present = time.time()
    fps = present - past
    fps = 1 / fps
    #print "FPS:",fps
    past = present

    #time.sleep(0.2)
    s, frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)
    old_gray = frame_gray.copy()

    nDetectedpoints = sum(st)
    #print nDetectedpoints==4

    if nDetectedpoints < 8:
        print "--------->", "reinit p0"
        mask = np.copy(mask_blank)
        init_points()
        continue

    p1_new = p1[st == 1]
    p0_old = p0[st == 1]

    #print 'p0' + str(p1)
    def start(self):

        prev_image = None
        feature_detector = cv2.FastFeatureDetector_create(
            threshold=25, nonmaxSuppression=True)

        lk_params = dict(winSize=(21, 21),
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 30, 0.03))

        camera_matrix = np.array([[718.8560, 0.0, 607.1928],
                                  [0.0, 718.8560, 185.2157], [0.0, 0.0, 1.0]])

        current_pos = np.zeros((3, 1))
        current_rot = np.eye(3)

        br = CvBridge()

        index = 0

        path = r'/home/user/catkin_ws/src/publisher/src/point_demo/sequence_00'

        image_format_left = '{:06d}.png'

        while not rospy.is_shutdown():

            loc = os.path.join(path, image_format_left.format(index))
            print(loc)

            image = cv2.imread(loc)

            detector = cv2.FastFeatureDetector_create(threshold=25,
                                                      nonmaxSuppression=True)

            kps = detector.detect(image)

            kp = np.array([x.pt for x in kps], dtype=np.float32)

            if prev_image is None:
                prev_image = image
                prev_keypoint = kp
                continue

            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_image, image,
                                                   prev_keypoint, None,
                                                   **lk_params)

            E, mask = cv2.findEssentialMat(p1, prev_keypoint, camera_matrix,
                                           cv2.RANSAC, 0.999, 1.0, None)

            points, R, t, mask = cv2.recoverPose(E, p1, prev_keypoint,
                                                 camera_matrix)

            scale = 1.0

            current_pos += current_rot.dot(t) * scale
            current_rot = R.dot(current_rot)

            x, y, z = current_pos[0], current_pos[1], current_pos[2]

            p = Point()

            p.x = x * 1.0
            p.y = (z * (1.0))
            p.z = 0

            sy = math.sqrt(current_rot[0, 0] * current_rot[0, 0] +
                           current_rot[1, 0] * current_rot[1, 0])
            roll = math.atan2(current_rot[2, 1], current_rot[2, 2])

            pitch = math.atan2(-current_rot[2, 0], sy)

            yaw = math.atan2(current_rot[1, 0], current_rot[0, 0])

            roll = math.degrees(roll)
            pitch = math.degrees(pitch)
            yaw = math.degrees(yaw)

            image = cv2.drawKeypoints(image, kps, None)

            self.img_pub.publish(br.cv2_to_imgmsg(image, "bgr8"))

            self.points.points.append(p)

            self.point_pub.publish(self.points)

            index = index + 1
            prev_image = image
            prev_keypoint = kp
            self.loop_rate.sleep()
Example #46
0
 ret, frame = cap.read()
 if not ret:
     break
 img_draw = frame.copy()
 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 # 최초 프레임 경우
 if prevImg is None:
     prevImg = gray
     # 추적선 그릴 이미지를 프레임 크기에 맞게 생성
     lines = np.zeros_like(frame)
     # 추적 시작을 위한 코너 검출  ---①
     prevPt = cv2.goodFeaturesToTrack(prevImg, 200, 0.01, 10)
 else:
     nextImg = gray
     # 옵티컬 플로우로 다음 프레임의 코너점  찾기 ---②
     nextPt, status, err = cv2.calcOpticalFlowPyrLK(prevImg, nextImg, \
                                     prevPt, None, criteria=termcriteria)
     # 대응점이 있는 코너, 움직인 코너 선별 ---③
     prevMv = prevPt[status == 1]
     nextMv = nextPt[status == 1]
     for i, (p, n) in enumerate(zip(prevMv, nextMv)):
         px, py = p.ravel()
         nx, ny = n.ravel()
         # 이전 코너와 새로운 코너에 선그리기 ---④
         cv2.line(lines, (px, py), (nx, ny), color[i].tolist(), 2)
         # 새로운 코너에 점 그리기
         cv2.circle(img_draw, (nx, ny), 2, color[i].tolist(), -1)
     # 누적된 추적 선을 출력 이미지에 합성 ---⑤
     img_draw = cv2.add(img_draw, lines)
     # 다음 프레임을 위한 프레임과 코너점 이월
     prevImg = nextImg
     prevPt = nextMv.reshape(-1, 1, 2)
Example #47
0
    # read frame of video
    _, img = cap.read()

    # try/except to see if video is over, and convert frame to gray
    try:
        old_gray = img_gray.copy()
    except:
        old_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    try:
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    except:
        break

    # try and track using opticalk flow
    p1, error, _ = cv2.calcOpticalFlowPyrLK(old_gray, img_gray, p0, None,
                                            **lk_params)
    if error == [0][0]: wait_time = 0

    # convert the coordinates to two intigers
    xy = int(p1[0][0][0]), int(p1[0][0][1])
    # update p0 to p1
    p0 = p1
    # make a circle around the object being tracked
    cv2.circle(img, xy, 5, (244, 4, 4), 1)
    cv2.circle(img, xy, 15, (244, 4, 4), 1)

    # show the frame and wait
    cv2.imshow('img', img)
    k = cv2.waitKey(wait_time)

    # if the keypress is "t"
Example #48
0
def camera_thread_better():
    global key, ra, dec, killFlag, error_in_deg_h, error_in_deg_v
    global gray
    global mouseX, mouseY
    global update_tracker
    global start_tracking,track_x,track_y , bias_h,bias_v , width , height, p_bias_h ,p_bias_v,track_center , p_e_h , p_e_v

    start_tracking = False
    is_tracking = False
    track_x, track_y = (0, 0)
    tracking_corners = None

    lk_params = dict(winSize=(15, 15),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    minimun_pts_track = 10

    cam = cv2.VideoCapture(0)

    cv2.namedWindow('viewer', cv2.WINDOW_NORMAL)
    cv2.setMouseCallback("viewer", draw_circle)

    old_gray = None

    while True:
        ret, frame = cam.read()

        if ret:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            height, width = gray.shape



            if is_tracking:
                new_points, status, error = cv2.calcOpticalFlowPyrLK(old_gray, gray, tracking_corners, None,
                                                                     **lk_params)
                tracking_corners = new_points

                if tracking_corners.shape[0] < minimun_pts_track:
                    for i in range(-5, 5):
                        for j in range(-5, 5):
                            tracking_corners.append([track_x + i, track_y + j])
                    tracking_corners = np.array(tracking_corners).reshape(len(tracking_corners), 1, 2).astype('float32')

                track_center = tracking_corners[:, 0, :].mean(axis=0).astype('int32')
                #print(track_center)
                # x, y = new_points.ravel()
                cv2.circle(frame, (track_center[0], track_center[1]), 5, (0, 255, 0), -1)

                #x = track_center[0]
                #y = track_center[1]


                pix_to_deg_v = height / fov_v
                pix_to_deg_h = width / fov_h

                error_x = (width / 2 + p_bias_h - p_e_h) - track_center[0]
                error_y = (height / 2 + p_bias_v - p_e_v) - track_center[1]

                error_in_deg_v = error_y / pix_to_deg_v
                error_in_deg_h = error_x / pix_to_deg_h

                #print(error_x,error_y)

            if start_tracking:
                tracking_corners = []
                for i in range(-5, 5):
                    for j in range(-5, 5):
                        tracking_corners.append([track_x + i, track_y + j])
                tracking_corners = np.array(tracking_corners).reshape(len(tracking_corners), 1, 2).astype('float32')
                # print(tracking_corners)
                start_tracking = False
                is_tracking = True

            cv2.line(frame, (width / 2, height / 2 - 10), (width / 2, height / 2 + 10), (0, 255, 0), 3)
            cv2.line(frame, (width / 2 - 10, height / 2), (width / 2 + 10, height / 2), (0, 255, 0), 3)

            old_gray = gray.copy()

            cv2.imshow("viewer", frame)
            cv2.waitKey(1)
Example #49
0
import cv2
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt

frame1 = cv2.imread('1.jpg', 0)
p0 = cv2.goodFeaturesToTrack(frame1, 100, 0.01, 1, useHarrisDetector=True)

frame4 = cv2.imread('4.jpg', 0)
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))
p1, st, err = cv2.calcOpticalFlowPyrLK(frame1, frame4, p0, None, **lk_params)

goodp1 = np.array(p1, dtype='int')
goodp0 = np.array(p0, dtype='int')

#Plotting
rows, cols = frame1.shape
corrImage = np.zeros((rows, (2 * cols)), dtype=np.uint8)
corrImage[0:rows, 0:cols] = frame1[:, :]
corrImage[0:rows, cols:(2 * cols)] = frame4[:, :]
pairs = zip(goodp0, goodp1)
u, v = [], []
x, y = [], []
for (old, new) in pairs:
    a, b = old.ravel()
    c, d = new.ravel()
    cv2.line(corrImage, (a, b), (c + cols, d), [0, 255, 0], 1)
    #Taking a patch at the center of the image for better homography as only 8 points are being considered
Example #50
0
    def run(self):
        color = np.random.randint(0,255,(100,3))
        out = cv.VideoWriter('result.avi', \
        cv.VideoWriter_fourcc('M','J','P','G'), 30, \
        (self.frameWidth,self.frameHeight))
        while True:
            ret, frame = self.cam.read()
            if ret==True:
                #frameNew = imutils.resize(frame, width=500)
                vis = frame.copy()
                layer = np.zeros_like(frame, dtype = "uint8")
                frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                
                if len(self.tracks) > 0:
                    img0, img1 = self.prev_gray, frame_gray
                    p0 = np.float32([tr[-1] for tr in self.tracks]).reshape\
                    (-1, 1, 2)
                    p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, \
                    None, **lk_params)
                    p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, \
                    None, **lk_params)
                    d = abs(p0-p0r).reshape(-1, 2).max(-1)
                    good = d < 1
                    new_tracks = []
                    for tr, (x, y), good_flag in zip(self.tracks, \
                    p1.reshape(-1, 2), good):
                        if not good_flag:
                            continue
                        tr.append((x, y))
                        if len(tr) > self.track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv.circle(layer, (x, y), 2, (255,255,255), -1)
                        cv.circle(layer, (x,y), 1, (0, 255, 0), -1)
                    self.tracks = new_tracks
                    for i in range (10):
                        cv.polylines(layer,[np.int32(tr) for tr in self.tracks]\
                        , False, color[i].tolist())
                if self.frame_idx % self.detect_interval == 0:
                    mask = np.zeros_like(frame_gray)
                    mask[:] = 255
                    for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                        cv.circle(mask, (x, y), 5, 0, -1)
                    p = cv.goodFeaturesToTrack(frame_gray, mask = mask, \
                    **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            self.tracks.append([(x, y)])
                
                self.frame_idx += 1
                self.prev_gray = frame_gray
                #out.write(layer)
                cv.imshow('Animation', layer)
                cv.imshow('Original', frame)

            ch = cv.waitKey(1)
            if ch == ord('q'):
                break
                
        self.cam.release()
        cv.destroyAllWindows()
Example #51
0
def of_tracker(v, file_name):
    # Open output file
    output_name = sys.argv[3] + file_name
    output = open(output_name, "w")

    frameCounter = 0

    # read first frame
    ret, old_frame = v.read()

    if ret == False:
        return

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    # detect face in first frame
    c, r, w, h = detect_one_face(old_frame)
    rect = cv2.rectangle(old_frame, (c, r), (c + w, r + h), (255, 0, 0), 2)
    #cv2.imshow('im',old_frame)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    old_track_window = old_gray[r + 5:r + h - 5, c + 5:c + w - 5]
    p0 = cv2.goodFeaturesToTrack(old_track_window, mask=None, **feature_params)

    for i in range(0, len(p0)):
        p0[i][0][0] = p0[i][0][0] + c
        p0[i][0][1] = p0[i][0][1] + r

    rect = cv2.rectangle(old_frame, (c, r), (c + w, r + h), (255, 0, 0), 2)
    #cv2.imshow('im',old_track_window)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)

    frameCounter = 0
    tracking_pt = np.mean(p0, axis=0)

    # Write track point for first frame
    output.write("%d,%d,%d\n" %
                 (0, c + w / 2, r + h / 2))  # Write as 0,pt_x,pt_y
    frameCounter = frameCounter + 1

    # set the initial tracking window
    track_window = (c, r, w, h)

    while (1):
        ret, frame = v.read()  # read another frame
        if ret == False:
            break

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #track_window = frame_gray[r:r+h, c:c+w]

        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **lk_params)

        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]

        # draw the tracks
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
            frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
        img = cv2.add(frame, mask)

        cv2.imshow('opticalFlow', img)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break

        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1, 1, 2)

        tracking_pt = np.mean(p0, axis=0)

        # write the result to the output file
        output.write("%d,%d,%d\n" %
                     (frameCounter, tracking_pt[0][0],
                      tracking_pt[0][1]))  # Write as 0,pt_x,pt_y
        frameCounter = frameCounter + 1

    output.close()
Example #52
0
        a = cv2.waitKey(5)
        if a == 27:
            cv2.destroyAllWindows()
            cap.release()
        elif a == 97:
            break

    #----Actual Tracking-----
    while True:
        'Now we have oldFrame,we can get new_frame,we have old corners and we can get new corners and update accordingly'

        #read new frame and cvt to gray
        ret, frame = cap.read()
        frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #finding the new tracked points
        new_corners, st, err = cv2.calcOpticalFlowPyrLK(
            oldFrameGray, frameGray, old_corners, None, **lk_params)

        #---pruning far away points:
        #first finding centroid
        r_add, c_add = 0, 0
        for corner in new_corners:
            r_add = r_add + corner[0][1]
            c_add = c_add + corner[0][0]
        centroid_row = int(1.0 * r_add / len(new_corners))
        centroid_col = int(1.0 * c_add / len(new_corners))
        #draw centroid
        cv2.circle(frame, (int(centroid_col), int(centroid_row)), 5,
                   (255, 0, 0))
        #add only those corners to new_corners_updated which are at a distance of 30 or lesse
        new_corners_updated = new_corners.copy()
        tobedel = []
Example #53
0
stp = 0

## optical movement

old_pts = np.array([[x, y]], dtype=np.float32).reshape(-1, 1, 2)

mask = np.zeros_like(inp_img)

while True:
    _, new_inp_img = cap.read()
    new_inp_img = cv2.flip(new_inp_img, 1)
    new_gray = cv2.cvtColor(new_inp_img, cv2.COLOR_BGR2GRAY)
    new_pts, status, err = cv2.calcOpticalFlowPyrLK(gray_inp_img,
                                                    new_gray,
                                                    old_pts,
                                                    None,
                                                    maxLevel=1,
                                                    criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                                                              15, 0.08))

    for i, j in zip(old_pts, new_pts):
        
        x, y = j.ravel()
        a, b = i.ravel()

        if cv2.waitKey(2) & 0xff == ord('g'):
            stp = 1

        elif cv2.waitKey(0) & 0xff == ord('s'):
            stp = 0
Example #54
0

cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", select_point)

point_selected = False
point = ()
old_points = np.array([[]])
while True:
    _, frame = cap.read()
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if point_selected is True:
        cv2.circle(frame, point, 5, (0, 0, 255), 2)

        new_points, status, error = cv2.calcOpticalFlowPyrLK(
            old_gray, gray_frame, old_points, None, **lk_params)
        old_gray = gray_frame.copy()
        old_points = new_points

        x, y = new_points.ravel()
        cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)

    cv2.imshow("Frame", frame)

    key = cv2.waitKey(1)
    if key == 27:
        break

cap.release()
cv2.destroyAllWindows()
Example #55
0
def getFrame(img):
    """ This function returns a frame and applies haarcascades and 
    optical flow for further analysis
    """
    global p0
    global old_gray
    global hand_pos
    global count
    global mask_features

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    frame_shape = gray.shape
    mask = np.zeros_like(img)

    # Haarcascade Detection:
    try:
        palms = palm_cascade.detectMultiScale(gray,
                                              scaleFactor=1.05,
                                              minNeighbors=15,
                                              minSize=(20, 20))
        fists = fist_cascade.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=13,
                                              minSize=(40, 40))
    except cv2.error:
        print(
            "Could not find 'haarcascade_fist.xml' and/or 'haarcascade_palm.xml' file in: "
            + file_location + '/')

    if np.any(fists):
        x, y, w, h = fists[0]
        hand_shape = w * 1.8

    if np.any(palms):
        multi_hand_dist = []
        for x, y, w, h in palms:  # For every hand detected:
            multi_hand_dist.append(
                np.hypot(hand_pos[0] - (x + (w / 2)),
                         hand_pos[1] - (y + (h / 2))))
            cv2.rectangle(mask, (x, y), (x + w, y + h), (0, 40, 250), 1)
            cv2.circle(mask, (int(x + (w / 2)), int(y + (h / 2))),
                       int(w / 1.7),
                       color=(0, 0, 0),
                       thickness=-1)
        x, y, w, h = palms[np.argmin(
            multi_hand_dist
        )]  # Use the detected hand which is closest to the previously detected hand
        hand_shape = w
        count = 0

    if np.any(palms) or np.any(fists):
        hand_pos = np.array([
            int(x + (w / 2)), int(y + (h / 2))
        ])  # Defines the new hand position as the center of the detected hand
        cv2.rectangle(mask, (x, y), (x + w, y + h), (0, 255, 0), 1)
        cv2.circle(mask,
                   tuple(hand_pos),
                   int(w / 1.8),
                   color=(0, 0, 0),
                   thickness=-1)
        mask_features = np.zeros(
            frame_shape,
            dtype=np.uint8)  # Masking out the hand for goodFeaturesToTrack
        cv2.circle(mask_features,
                   tuple((hand_pos[0], hand_pos[1] - 10)),
                   int(hand_shape / 1.8),
                   color=1,
                   thickness=-1)
        p0 = cv2.goodFeaturesToTrack(old_gray,
                                     mask=mask_features,
                                     **feature_params)

    # Optical Flow:
    elif p0 is not None:  # If no hand/fist was detected, use optical flow to give an estimate for the location of the hand
        count += 1
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, gray, p0, None,
                                               **lk_params)
        if np.sum(st) > 0:  # If it found at least 1 good new point
            good_new = p1[st == 1]
            good_old = p0[st == 1]
            p0 = good_new.reshape(-1, 1, 2)
            flows = []
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                cv2.circle(mask, (a, b), 2, [0, 255, 0], -1)
                flows.append([a - c, b - d])
            flow = np.average(
                flows, 0)  # Average all movements of every tracked point
            hand_pos = np.add(hand_pos, flow)  # Update the hand position
            hand_pos = np.array([int(hand_pos[0]), int(hand_pos[1])])

    cv2.circle(mask, tuple(hand_pos), 4, (0, 255, 0), -1)
    old_gray = gray.copy()

    return img, mask, mask_features, hand_pos, palms, fists, count
Example #56
0
def frontprop(cache):
    fNUMS = len(cache)
    framen = fNUMS - 1
    pts = []  #存每帧的点的index和点的坐标
    bboxf = [0] * fNUMS
    skip = False
    for i in range(framen):
        if not skip:
            name0 = cache[framen - i]
            img0 = cv2.cvtColor(name0, cv2.COLOR_BGR2GRAY)
        name = cache[framen - i - 1]
        img = cv2.cvtColor(name, cv2.COLOR_BGR2GRAY)

        if i == 0:
            pt0 = cv2.goodFeaturesToTrack(img0, 5000, 0.01,
                                          size[0] / 200)  #第一帧生成初始待跟踪的点
            res0 = [1] * len(pt0)  #待跟踪点的状态(全部存在)
            ptt = []  #ptt是前一帧还在的点的index和点的坐标
            for j in range(len(pt0)):
                ptt += [[j, pt0[j]]]

        pts += [ptt]

        #从ptt取出pt0
        pt00 = []
        pt0index = []
        if not ptt == -1:
            for j in range(len(ptt)):
                if res0[ptt[j][0]] == 1:
                    pt0index += [ptt[j][0]]  #点的index
                    pt00 += [ptt[j][1]]  #点的坐标
            pt0 = np.array(pt00)

        if len(pt0) < 5:
            skip = True
        else:
            skip = False

        if not skip:
            #pt0是传进来的还在的点的坐标
            #得到下一帧的跟踪点位置pt
            pt = np.empty(len(pt0))
            res = cv2.calcOpticalFlowPyrLK(img0, img, pt0, pt)
            pt = res[0]

            ptt = []  #跟踪点筛选后的list(点的index和点的坐标)
            #判断每个留下的点,如果是ROI内的,置1,如果是ROI外的,置0.5,其他噪声点:置0
            for j in range(len(pt)):
                if res[1][j][0] == 1 and ifsame(
                        pt0[j][0], pt[j][0],
                    (0, 0, size[0], size[1])):  #下一帧的跟踪点是否误匹配
                    ptt += [[pt0index[j],
                             [[pt[j][0][0],
                               pt[j][0][1]]]]]  #将还在的点的index和坐标的点加入下一次迭代

                    if ifedge(pt[j][0], size[0], size[1]):
                        res0[pt0index[j]] = 0.5
                else:
                    res0[pt0index[j]] = 0
        else:
            ptt = -1

    #接下来生成bbox
    for i in range(len(pts)):
        pt_todraw = pts[i]
        if not pt_todraw == -1:
            x = []
            y = []
            for pt in pt_todraw:
                if res0[pt[0]] == 1:
                    x += [pt[1][0][0]]
                    y += [pt[1][0][1]]
            if len(x) > 2:
                lefttest, righttest = getedge2(x)
                uptest, downtest = getedge2(y)
                bbox1 = (lefttest, uptest, righttest, downtest)  #下一帧的ROI
                bbox0 = fixbbox(bbox1)  #ROI长宽比出格时修正ROI,并更新ROI
                bboxf[framen - i] = bbox0
    return bboxf
Example #57
0
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
    p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
    p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
    d = abs(p0-p0r).reshape(-1, 2).max(-1)
    status = d < back_threshold
    return p1, status
Example #58
0
def align_image(hyp_img, rgb_img):

    #preprocess rgb to hsv
    rgb_hsv = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)
    # Convert images to grayscale
    rgb_gray = cv2.cvtColor(rgb_hsv, cv2.COLOR_BGR2GRAY)
    hyp_gray = cv2.cvtColor(hyp_img, cv2.COLOR_BGR2GRAY)
    '''
    Tunable parameters:
    Modify the values in the cv2.adaptiveThreshold functions to obtain better h_matrix
    '''
    rgb_thresh = cv2.adaptiveThreshold(rgb_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,15,5)

    hyp_thresh = cv2.adaptiveThreshold(hyp_gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,11,2)
    # cv2.imshow("rgbthresh", rgb_thresh)
    # cv2.waitKey(0)
    # cv2.imshow("hypthresh", hyp_thresh)
    # cv2.waitKey(0)
    '''
    Tunable parameters:
    Modify the values in the cv2.goodFeaturesToTrack function to obtain better h_matrix
    '''
    # find the coordinates of good features to track  in prep_rgb_img
    hyp_features = cv2.goodFeaturesToTrack(hyp_thresh, 10000, .1, 5)

    # find corresponding features in current photo
    rgb_features = np.array([])
    rgb_features, pyr_stati, _ = cv2.calcOpticalFlowPyrLK(hyp_thresh,
                                                          rgb_thresh,
                                                          hyp_features,
                                                          rgb_features,
                                                          flags=1)

    # only add features for which a match was found to the pruned arrays
    good_rgb_features = []
    good_hyp_features = []
    for index, status in enumerate(pyr_stati):
        if status == 1:
            good_rgb_features.append(rgb_features[index])
            good_hyp_features.append(hyp_features[index])

    # convert lists to numpy arrays so they can be passed to opencv function
    rgb_final_features = np.asarray(good_rgb_features)
    hyp_final_features = np.asarray(good_hyp_features)

    # find perspective transformation using the arrays of corresponding points
    h_transformation = cv2.findHomography(rgb_final_features,
                                          hyp_final_features,
                                          method=cv2.RANSAC,
                                          ransacReprojThreshold=1)[0]

    # transform the images and overlay them to see if they align properly
    height, width = rgb_img.shape[:2]
    warped_rgb = cv2.warpPerspective(rgb_img, h_transformation,
                                     (width, height))

    align_img = cv2.addWeighted(warped_rgb, .3, hyp_img, .7, 1)

    unalign_img = cv2.addWeighted(rgb_img, .3, hyp_img, .7, 1)

    return align_img, unalign_img, warped_rgb, h_transformation
Example #59
0
def gen_opt_flow_img(img_pth):
    cap = IterImage(img_pth)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    # Take first frame and find corners in it
    old_frame = next(cap)

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)

    while 1:
        try:
            frame = next(cap)
        except StopIteration:
            break

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]

        # draw the tracks
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
            frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
        img = cv2.add(frame, mask)

        cv2.imshow('frame', img)

        # cv2.waitKey(0)

        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break

        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1, 1, 2)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #60
0
def run(**kwargs):
    """
        Main loop for background removal.
    """
    time_lst = [0]

    # setup an image for the background
    bg_pic_path = kwargs['background']
    bg_pic = cv2.imread(bg_pic_path)
    bg_pic = cv2.resize(bg_pic, dst_size)

    # setup the video writer if needed
    writer = None
    if kwargs["output_video"]:
        codec = codec_from_ext(kwargs["output_video"])
        writer = cv2.VideoWriter(kwargs["output_video"],
                                 codec,
                                 fps,
                                 frameSize=(width, height))

    # create the output frame folder if needed
    if kwargs["frame_folder"]:
        if kwargs["refresh"]: recursive_clean(kwargs["frame_folder"])
        make_folder(kwargs["frame_folder"])

    # initialize background
    hsv_bg = np.zeros(dst_shape_multi, dtype='uint16')
    black_bg = np.zeros(dst_shape_multi[:-1], dtype='uint8')

    # initialize vector of points for opticalFlow
    start_points = np.array([], dtype=np.float32)
    mask_saved = False
    prev_gray = None

    # start looping through frames
    frame_count = 0
    if cap.isOpened():
        while cap.isOpened():
            # retrieve the current frame and exit if needed
            ret, frame = cap.read()
            if not ret:
                break

            # otherwise, perform basic operations on the current frame
            frame = cv2.resize(frame, dst_size)
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray_frame = cv2.GaussianBlur(gray_frame,
                                          ksize=gauss_kernel,
                                          sigmaX=2,
                                          sigmaY=2)
            hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            hsv_frame_blurred = cv2.GaussianBlur(hsv_frame,
                                                 gauss_kernel,
                                                 sigmaX=2,
                                                 sigmaY=2)

            # build a model for the background during the first frames
            if frame_count < bg_frame_limit:
                hsv_bg = hsv_bg.copy() + hsv_frame_blurred
                if frame_count == bg_frame_limit - 1:
                    hsv_bg = np.uint8(hsv_bg.copy() / bg_frame_limit)

            # when the bg has been modeled, segment the fg
            else:
                time_in = time.perf_counter()

                # check if we should behave 'normally' or use opt-flow
                if mask_saved:

                    # if it is the first frame of the opt-flow method, initilize pts
                    # to be tracked ith the previous mask
                    if len(start_points) == 0:
                        indices = np.where(fg_mask_closed == 255)
                        start_points = np.array(
                            [[round(indices[1][i]),
                              round(indices[0][i])]
                             for i in range(len(indices[0]))],
                            dtype=np.float32)
                        points = start_points
                        status = np.array([[1]] * len(points))

                    # otherwise, run the Lucas Kanade algorithm
                    else:
                        prev_points = np.array(points, dtype=np.float32)
                        points, status, error = cv2.calcOpticalFlowPyrLK(
                            prev_gray,
                            gray_frame,
                            prev_points,
                            None,
                            winSize=(15, 15),
                            maxLevel=2,
                            criteria=(cv2.TERM_CRITERIA_EPS
                                      | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

                    # retain only points successfully tracked
                    point_set = np.array(points, dtype=np.int32)
                    point_set_ = point_set.copy()[status[:, 0] == 1]

                    # discard out of bounds indices
                    rows = point_set_[:, 1]
                    max_row_shift = max(rows) - n_rows + 1 if max(
                        rows) >= n_rows else 1
                    cols = point_set_[:, 0]
                    max_col_shift = max(cols) - n_cols + 1 if max(
                        cols) >= n_cols else 1
                    rows = rows - max_row_shift
                    cols = cols - max_col_shift

                    # build a convex hull around the tracked points and use it as a mask
                    fg_mask_closed = black_bg.copy()
                    fg_mask_closed[rows, cols] = 255
                    fg_mask_closed = cv2.medianBlur(fg_mask_closed.copy(),
                                                    ksize=median_ksize)
                    point_set_median = np.where(fg_mask_closed == 255)
                    point_set_median = np.array([[
                        round(point_set_median[1][i]),
                        round(point_set_median[0][i])
                    ] for i in range(len(point_set_median[0]))],
                                                dtype=np.float32)
                    if len(point_set_median) > 0:
                        hull = np.array(cv2.convexHull(point_set_median),
                                        dtype=np.int32)
                        fg_mask_closed = cv2.fillConvexPoly(
                            black_bg.copy(), hull, 255)

                else:
                    # perform frame differencing
                    diff = cv2.absdiff(hsv_frame_blurred, hsv_bg)
                    h_diff, s_diff, v_diff = cv2.split(diff)

                    # automatic global thresholding with Otsu's technique
                    r1, h_diff_thresh = cv2.threshold(
                        h_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    r2, s_diff_thresh = cv2.threshold(
                        s_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    r3, v_diff_thresh = cv2.threshold(
                        v_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

                    # take into account contribution of saturation and value (aka 'brightness')
                    # clean the saturation mask beforehand, it usually is more unstable
                    s_diff_thresh_median = cv2.medianBlur(s_diff_thresh,
                                                          ksize=median_ksize)
                    fg_mask = s_diff_thresh_median + v_diff_thresh
                    fg_mask_closed = cv2.morphologyEx(fg_mask,
                                                      cv2.MORPH_CLOSE,
                                                      kernel=kernel,
                                                      iterations=10)
                    fg_mask_closed = cv2.dilate(fg_mask_closed.copy(), kernel)

                # compute the actual foreground and background
                foreground = cv2.bitwise_and(frame, frame, mask=fg_mask_closed)
                background = bg_pic - cv2.bitwise_and(
                    bg_pic, bg_pic, mask=fg_mask_closed)

                # ... and add them to generate the output image
                out = cv2.add(foreground, background)

                # display the output and the masks
                cv2.imshow("Output", out)

                # quit if needed
                key = cv2.waitKey(ms)
                if key == ord('q'):
                    break

                # if user presses 's' save and track the current mask with optical flow assumptions
                elif key == ord('s'):
                    mask_saved = True

                # if user presses 'r', reset the background model
                elif key == ord('r'):
                    mask_saved = False
                    start_points = np.array([], dtype=np.float32)
                    frame_count = -1
                    hsv_bg = np.zeros(dst_shape_multi, dtype='uint16')

                # write the video on the fs if the user requested it
                if writer:
                    writer.write(cv2.resize(out, dsize=(width, height)))

                # save frames on the fs if the user requested it
                if kwargs["frame_folder"] and frame_count % kwargs[
                        "throttle"] == 0:
                    cv2.imwrite(
                        os.path.join(
                            kwargs["frame_folder"],
                            "{}.jpg".format(frame_count - bg_frame_limit + 1)),
                        out)

                # keep track of time
                time_out = time.perf_counter()
                time_diff = time_out - time_in
                time_lst.append(time_diff)

            prev_gray = gray_frame.copy()
            frame_count += 1

    print("Average Time x Frame: ",
          round(np.sum(np.array(time_lst)) / len(time_lst), 2))
    cv2.destroyAllWindows()
    cap.release()
    if writer:
        writer.release()