Exemple #1
0
    def run(self):

        p2_track_window = (100,250,90,90) #x, y, w, h
        p1_track_window = (400,250,90,90)

        cap = cv2.VideoCapture(0)
        ret, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        p1_mask = cv2.inRange(hsv, self.p1_hsv_min, self.p1_hsv_max)
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        while(self.running):
            ret, frame = cap.read()
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            # dla player1
            p1_mask = cv2.inRange(hsv, self.p1_hsv_min, self.p1_hsv_max)
            ret, p1_track_window = cv2.meanShift(p1_mask, p1_track_window, term_crit)
            x,y,w,h = p1_track_window
            self.p1_position = y
            cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
            # dla player2
            p2_mask = cv2.inRange(hsv, self.p2_hsv_min, self.p2_hsv_max)
            ret, p2_track_window = cv2.meanShift(p2_mask, p2_track_window, term_crit)
            x,y,w,h = p2_track_window
            self.p2_position = y
            cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)

            #cv2.imshow('img2',frame)
            self.frame = frame
Exemple #2
0
def meanShift(hsv, f1, f2, rois, mask):
	if f1 is not None:
		term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
		#import pdb; pdb.set_trace()
		ff1 = cv2.normalize(f1,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX)
		ff2 = cv2.normalize(f2,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX)
		dst1 = cv2.calcBackProject([hsv], [0,2], ff1, [70, 180, 10, 255], 1)
		dst2 = cv2.calcBackProject([hsv], [0,2], ff2, [70, 180, 10, 255], 1)
		bMask = cv2.inRange(hsv, np.array((70., 0., 10.)), np.array((255.,255.,50.)))
		wMask = cv2.inRange(hsv, np.array((70., 0., 200.)), np.array((255.,255.,255.)))
		dst1 = cv2.bitwise_and(dst1, mask)
		dst2 = cv2.bitwise_and(dst2, mask)
		dst1 = cv2.bitwise_and(dst1, wMask)
		dst2 = cv2.bitwise_and(dst2, bMask)
		dstVis1 = cv2.applyColorMap(dst1, cv2.COLORMAP_JET)
		dstVis2 = cv2.applyColorMap(dst2, cv2.COLORMAP_JET)

		nRois = []
		for (x0,y0), (x1,y1), flag in rois:
			if flag:
				x,y,w,h = cv2.meanShift(dst1, (x0,y0,x1-x0,y1-y0), term_crit)[1]
				nRois.append( [(x, y), (x+w, y+h), flag] )
				dst1[y:y+h, x:x+w] = 0
			else:
				x,y,w,h = cv2.meanShift(dst2, (x0,y0,x1-x0,y1-y0), term_crit)[1]
				nRois.append( [(x, y), (x+w, y+h), flag] )
				dst2[y:y+h, x:x+w] = 0
		
		#cv2.imshow('dst1', dstVis1)
		#cv2.imshow('dst2', dstVis2)
		return nRois
	return []
Exemple #3
0
  def update(self, frame):
    # print "updating %d " % self.id
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    back_project = cv2.calcBackProject([hsv],[0], self.roi_hist,[0,180],1)
    
    if args.get("algorithm") == "c":
      ret, self.track_window = cv2.CamShift(back_project, self.track_window, self.term_crit)
      pts = cv2.boxPoints(ret)
      pts = np.int0(pts)
      self.center = center(pts)
      cv2.polylines(frame,[pts],True, 255,1)
      
    if not args.get("algorithm") or args.get("algorithm") == "m":
      ret, self.track_window = cv2.meanShift(back_project, self.track_window, self.term_crit)
      x,y,w,h = self.track_window
      self.center = center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
      cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)

    self.kalman.correct(self.center)
    prediction = self.kalman.predict()
    cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4, (255, 0, 0), -1)
    # fake shadow
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (11, (self.id + 1) * 25 + 1),
        font, 0.6,
        (0, 0, 0),
        1,
        cv2.LINE_AA)
    # actual info
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (10, (self.id + 1) * 25),
        font, 0.6,
        (0, 255, 0),
        1,
        cv2.LINE_AA)
Exemple #4
0
def run_main():
    cap = cv2.VideoCapture('test.mp4')
    ret, frame = cap.read()
    c,r,w,h = 200,250,70,70
    track_window = (c,r,w,h)

    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    while True:
        ret, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
        ret, track_window = cv2.meanShift(dst, track_window, term_crit)
        x,y,w,h = track_window
        # print track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
        cv2.imshow('Tracking', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break

    cap.release()
    cv2.destroyAllWindows()
Exemple #5
0
    def track(self, img, center, face_rect, det_face_hsv):
        """
        Uses mean shifting to track the users face. Only useful once
        a face has already been detected. The Process
        1) Convert the image to HSV, since we track with Hue
        2) Pull out the Hue values in the detected region, and develop a histogram
        3) Create a Back Projection of the initial image with values equal to the
            probability of that hue appearing in the facial region
        4) Use mean shifting to determine where the new face is

        :param img: BGR image from webcam
        :param center: tuple giving the normalized center of teh face
        :param face_rect: non-normalized dimensions of face rectangle (x, y, cols, rows)
        :param det_face_hsv: hsv of the most recently detected face
        :return: (new_position, rect)
        """
        # convert the original image to hsv, and pull out the face
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hue_face_hist = cv2.calcHist([det_face_hsv],[0], None, [32], [0, 180])
        cv2.normalize(hue_face_hist, hue_face_hist, 0, 255, cv2.NORM_MINMAX)
        #calculate teh back projection probabilities
        back_proj = cv2.calcBackProject([img_hsv], [0], hue_face_hist, [0, 180], 1)
        #track face using meanshift
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        track_box, rect = cv2.meanShift(back_proj, face_rect, term_crit)
        #return values
        height, width, x = img.shape #rows, cols, depth
        new_position = ((rect[0] + rect[2]/2)/float(width), (rect[1] + rect[3]/2)/float(height))
        cv2.rectangle(img, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), 255)

        return (new_position, rect)
    def _append_boxes_from_meanshift(self, frame, box_all):
        """Adds to the list all bounding boxes found with mean-shift tracking

            Mean-shift tracking is used to track objects from frame to frame.
            This information is combined with a saliency map to discard
            false-positives and focus only on relevant objects that move.

            :param frame: current RGB image frame
            :box_all: append bounding boxes from tracking to this list
            :returns: new list of all collected bounding boxes
        """
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        for i in xrange(len(self.object_roi)):
            roi_hist = copy.deepcopy(self.object_roi[i])
            box_old = copy.deepcopy(self.object_box[i])

            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, box_new = cv2.meanShift(dst, tuple(box_old), self.term_crit)
            self.object_box[i] = copy.deepcopy(box_new)

            # discard boxes that don't move
            (xo, yo, wo, ho) = box_old
            (xn, yn, wn, hn) = box_new

            co = [xo + wo/2, yo + ho/2]
            cn = [xn + wn/2, yn + hn/2]
            if (co[0]-cn[0])**2 + (co[1]-cn[1])**2 >= self.min_shift2:
                box_all.append(box_new)

        return box_all
	def track_object(self,frames,masks):
		hsv_frames = [];
		for frame in frames:
			hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
			hsv_frames.extend([hsv_frame]);
		activeWindowFeats = [self.__computeFrameFeats__(hsv_frame,mask) for (hsv_frame,mask) in zip(hsv_frames,masks)]
		activeWindowFeats = [np.vstack(activeWindowFeat) for activeWindowFeat in activeWindowFeats if len(activeWindowFeat) > 0];
		if len(activeWindowFeats) > 0:
			activeWindowFeats = np.vstack(activeWindowFeats);
			self.__build_model__(activeWindowFeats[:,:self.n_bins],activeWindowFeats[:,self.n_bins:]);
		term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 2, 1 );
		frame_track_windows = []
		for hsv_frame in hsv_frames:
			track_windows=[];
			for idx in range(self.numMixtures):
				if not self.hist_gmm is None :
					back_proj = cv2.calcBackProject([hsv_frame],[0],self.hist_gmm.means_[idx,:],[0,self.n_bins],1);
				else:
					back_proj = None;
					
				if not self.shape_kmeans is None:
					window = np.array(self.shape_kmeans.cluster_centers_[idx,:],dtype =int)
				else:
					shape = hsv_frame.shape[:2]
					window = np.array([shape[1]/4,shape[0]/4,shape[1]/2,shape[0]/2])
					
				window = tuple(window.clip(0))
				if(back_proj != None and window[2]> 10 and window[3]> 10):
					ret,window = cv2.meanShift(back_proj, window, term_crit)
				window = (window[0],window[1],window[0]+window[2],window[1]+window[3]);
				track_windows.extend([(window,idx)]);
			frame_track_windows.extend([track_windows])
		return frame_track_windows;
    def run(self, cur_frame, next_frame,):
        # Setup the termination criteria, either 10 iteration or move by at least 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        new_list_of_objects = []

        for obj_tuple in self.list_of_objects:
            hsv_roi = None
            if len(obj_tuple) == 4:
                obj, hsv_roi, n_in_frame, n_not_moving = obj_tuple
            if (hsv_roi is not None) and (obj[2] > 0 or obj[3] > 0):
                mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
                roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
                cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

                # track in next frame
                # backprojection
                hsv = cv2.cvtColor(next_frame, cv2.COLOR_BGR2HSV)
                dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

                # apply meanshift to get the new location
                ret, obj_new = cv2.meanShift(dst, obj, term_crit)
                n_in_frame += 1
                if PostProcessing.distance_two_squares(obj, obj_new) < 1:
                    n_not_moving += 1
                else:
                    n_not_moving = 0

                x, y, w, h = obj_new
                if n_not_moving < 20:
                    new_list_of_objects.append((obj_new, hsv_roi, n_in_frame, n_not_moving))

                # draw
                cv2.rectangle(next_frame, (x, y), (x + w, y + h), 255, 2)
        self.list_of_objects = new_list_of_objects
        pass
    def find_center(self,im):
        '''actually do the tracking!'''
        im_hsv = cv2.cvtColor(im,cv2.COLOR_BGR2HSV)
        track_im = cv2.calcBackProject([im_hsv],[0],self.query_hist,[0,255],1)

        track_im_visualize = track_im.copy()
        # convert to (x,y,w,h)
        track_roi = (self.last_detection[0],self.last_detection[1],self.last_detection[2]-self.last_detection[0],self.last_detection[3]-self.last_detection[1])
        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        # this is done to plot intermediate results of mean shift
        for max_iter in range(1,10):
            term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, 1 )
            (ret, intermediate_roi) = cv2.meanShift(track_im,track_roi,term_crit)
            cv2.rectangle(track_im_visualize,(intermediate_roi[0],intermediate_roi[1]),(intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]),max_iter/10.0,2)

        self.last_detection = [intermediate_roi[0],intermediate_roi[1],intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]]
        
        # update_hist = True
        # if update_hist:
        #     self.query_img = im
        #     self.query_roi = intermediate_roi
        #     self.get_query_histogram()


        #get the center of the box
        posX = (self.last_detection[0]+self.last_detection[2])/2
        posY = (self.last_detection[1]+self.last_detection[3])/2

        cv2.circle(im,(posX,posY),2,(255,0,0),10)
        cv2.imshow('image',im)
        cv2.rectangle(self.query_img,(self.query_roi[0],self.query_roi[1]),(self.query_roi[0]+self.query_roi[2],self.query_roi[1]+self.query_roi[3]),1.0,2)
        cv2.imshow('query_img',self.query_img)
        cv2.waitKey(20)

        return Target(x = posX, y = posY, x_img_size = self.query_img.shape[0],y_img_size = self.query_img.shape[1])
    def process(self, src, **kwargs):         
        sw = SW('meanShift')
        
        # 获得直方图
        hls = cv2.cvtColor(src, cv2.COLOR_BGR2HLS)
        # 获得直方图反向投影
        dst = cv2.calcBackProject([hls], self.hist_channel, self.roi_hist, [0,180], 1)
        # 应用MeanShift
        # window 放大到占屏幕1/4面积的大小,计算长宽比例k
        k = ((src.shape[0]*src.shape[1]/2.0) / (self.track_window[2]*self.track_window[3]))**0.5
        assert k>1
        scale_list = [1+i*(k-1)/4 for i in range(5)]
        window = self.track_window
        rst_img = src.copy()
        for scale in scale_list[::-1]:
            window = self.resize_window(self.track_window, scale)
            _, window = cv2.meanShift(dst, window, self.term_crit)
            x,y,w,h = window
            cv2.rectangle(rst_img, (x,y), (x+w,y+h), OBJECT_MATCH_COLOR, LINE_WIDTH)
            scale = scale - (k-1)/4.0
        # 更新跟踪窗口
        self.track_window = window
        x,y,w,h = window
        # 计算中点
        center = (int(x+w/2), int(y+h/2))
        # 绘制跟踪结果
#         rst_img = src.copy()
#         cv2.rectangle(rst_img, (x,y), (x+w,y+h), OBJECT_MATCH_COLOR, LINE_WIDTH)
        # 绘制中间过程(反向投影图)
        prj_img = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
        # 画红框
        # cv2.rectangle(prj_img, (x,y), (x+w,y+h), OBJECT_MATCH_COLOR, LINE_WIDTH)
        
        sw.stop()
        return rst_img, [util.Point(center)], prj_img
Exemple #11
0
def meanShiftTracking(input, roi_hist, term_crit, track_window):
    input_hsv = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([input_hsv],[0],roi_hist,[0,180],1)
    ret, track_window = cv2.meanShift(dst, track_window, term_crit)
    x,y,w,h = track_window
    cv2.rectangle(input, (x,y), (x+w,y+h),(0,0,200),2)
    output = input
    return output, track_window
    def track(self,img,mask=None):
        self.backprojection = cv2.calcBackProject([img],self.chans,self.hist,self.ranges,1/self.hist.max())

        if mask is not None: 
            self.backprojection *= mask

        niter, self.bbox = cv2.meanShift(self.backprojection,self.bbox,self.term_criteria)

        return self.bbox
	def detecting(self,im):
		#print 'detecting'

		#Pauls Code - went through it and changed it to fit ours. will probably need further alterations
		img_bw = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
		training_keypoints = self.detector.detect(img_bw)
		#print training_keypoints
		#print "new_descriptors"
		#print self.new_descriptors

		desc, training_descriptors = self.extractor.compute(img_bw,training_keypoints)
		#finds the k best matches for each descriptor from a query set. (http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html)
		matches = self.matcher.knnMatch(self.new_descriptors, training_descriptors, k=2)
		#print matches
		#print dir(matches[0][0])
		#print matches[0][0].imgIdx
		good_matches = []
		for m,n in matches: 
			#makes sure distance to closest match is sufficiently better than to 2nd closest
			if (m.distance < self.ratio_threshold*n.distance and
				training_keypoints[m.trainIdx].response > self.corner_threshold):
				#print 'finding matches'
				good_matches.append((m.queryIdx, m.trainIdx))

		#print 'good matches type: %s' %type(good_matches)

		#print 'good matches: %s' %good_matches

		self.matching_new_pts = np.zeros((len(good_matches),2))
		self.matching_training_pts = np.zeros((len(good_matches),2))

		track_im = np.zeros(img_bw.shape)
		for idx in range(len(good_matches)):
			match = good_matches[idx]
			self.matching_new_pts[idx,:] = self.new_keypoints[match[0]].pt
			self.matching_training_pts[idx,:] = training_keypoints[match[1]].pt
			track_im[training_keypoints[match[1]].pt[1], training_keypoints[match[1]].pt[0]] = 1.0

		#print 'matching_keypoint type: %s' %type(self.matching_new_pts)
		#print 'matching_keypoints: %s' %self.matching_new_pts

		
		track_im_visualize = track_im.copy()

		#converting to (x,y,width,height)
		track_region = (self.last_detection[0],self.last_detection[1],self.last_detection[2]-self.last_detection[0],self.last_detection[3]-self.last_detection[1])

		#setup criterial for termination, either 10 iteritation or move at least 1 pt
		#done to plot intermediate results of mean shift
		for max_iter in range(1,10): 
			term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, 1 )
			(ret, intermediate_region) = cv2.meanShift(track_im,track_region,term_crit)
			cv2.rectangle(track_im_visualize,(intermediate_region[0],intermediate_region[1]),(intermediate_region[0]+intermediate_region[2],intermediate_region[1]+intermediate_region[3]),max_iter/10.0,2)
		
		self.last_detection = [intermediate_region[0],intermediate_region[1],intermediate_region[0]+intermediate_region[2],intermediate_region[1]+intermediate_region[3]]

		cv2.imshow("track_win", track_im_visualize)
Exemple #14
0
def track_ball_4(video):
    """ Track the ball's center in 'video' using Gaussian Blur
        Hough Circle Detection and Meanshift tracking.

    Arguments and Outputs are the same as track_ball_1.
    """

    grabbed, img = video.read()
    if not grabbed:
        return

    # Blur the initial image to reduce noise from sharp background edges
    gimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gimg = cv2.GaussianBlur(gimg, (21, 21), 0)

    circles = cv2.HoughCircles(gimg, cv2.cv.CV_HOUGH_GRADIENT, 2, 100,
                               param1=24, param2=4, minRadius=0, maxRadius=0)

    # Initial bounds and associated track_window
    x, y, radius = circles[0][0][0], circles[0][0][1], circles[0][0][2]
    track_window = (int(x-radius), int(y+radius), int(2*radius), int(2*radius))
    bounds = [(x-radius, y-radius, x+radius, y+radius)]

    # Initialize the binary image of our tracked circle for Meanshift
    roi = img[x-radius:x+radius, y-radius:y+radius]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)

    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))

    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # Meanshift termination criteria:
    # Max 10 iterations, or moved at least 1 pixel
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        grabbed, img = video.read()
        if not grabbed:
            break

        # Calculates the meanshift for the current frame.
        # Meanshift detects the tracking_window shift of maximum foreground
        # coverage, where the foregrounds are indicated by our roi histogram &
        # the current frame's hsv image.
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x, y, w, h = track_window
        bounds.append((x, y, x+w, y+h))

    return bounds
Exemple #15
0
    def track(self, hsv, box, hist):
        back_proj = cv2.calcBackProject([hsv], [0], hist, [0, 180], 1)

        # apply meanshift to get the new location
        ret, box = cv2.meanShift(back_proj, box, TERM_CRIT)

        # do this
        self.update_pos(self.front_box[0] + self.front_box[2]/2, 
                        self.front_box[1] + self.front_box[3]/2)

        return box
Exemple #16
0
def track_roi(VIDEO_FILE):

  video = cv2.VideoCapture(VIDEO_FILE)  # hy: changed from cv2.VideoCapture()
  # cv2.waitKey(10)

  video.set(1, 2)  # hy: changed from 1,2000 which was for wheelchair test video,
  # hy: propID=1 means 0-based index of the frame to be decoded/captured next

  if not video.isOpened():
    print "cannot find or open video file"
    exit(-1)

  # Read the first frame of the video
  ret, frame = video.read()

  # Set the ROI (Region of Interest). Actually, this is a
  # rectangle of the building that we're tracking
  c,r,w,h = 900,650,400,400
  track_window = (c,r,w,h)

  # Create mask and normalized histogram
  roi = frame[r:r+h, c:c+w]
  hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)

  mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))

  roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])

  cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

  term_cond = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) #hy: TERM_CRITERIA_EPS - terminate iteration condition

  while True:
    ret, frame = video.read()
    if ret:
      hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
      dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
      ret, track_window = cv2.meanShift(dst, track_window, term_cond)

      x,y,w,h = track_window

      #hy: draw rectangle as tracked window area
      cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
      cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
          1, (255,255,255), 2, cv2.CV_AA)

      cv2.imshow('Tracking', frame)
      if cv2.waitKey(1) & 0xFF == ord('q'):
          break
    else:
      print 'no frame received'
      break

  return [track_window]
    def update_warp(self, new_frame):
        self.current_frame = self.__scale_image(new_frame)

        if self.tracking_on:
            hsv = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv],[0],self.roi_hist,[0,180],1)

            (x1,y1,x2,y2) = self.template
            track_window = (x1,y1,x2-x1,y2-y1)
            ret, track_window = cv2.meanShift(dst, track_window, self.term_crit)
            x,y,w,h = track_window
            self.template = (x,y,x+w,y+h)
def run_main():
    cap = cv2.VideoCapture('./Media/vid/ryan_no_rope.MP4')

    # video writer
    fourcc = cv.CV_FOURCC('m', 'p', '4', 'v') # note the lower case
    width = int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
    video = cv2.VideoWriter('video_ryan_tracking.mp4',fourcc,30,(width,height))

    # Skip forward 150 frames
    for i in range(150):
        ret, frame = cap.read()

    # Grab the next frame as starting point
    ret, frame = cap.read()
    cv2.imshow('Tracking', frame)

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    c,r,w,h = 925,280,200,400
    track_window = (c,r,w,h)
    cv2.rectangle(frame, (c,r), (c+w,r+h), 255, 2)
    
    cv2.imwrite('test.png', frame)

    # Create mask and normalized histogram
    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    while True:
        ret, frame = cap.read()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x,y,w,h = track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        
        cv2.imshow('Tracking', frame)
        video.write(frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    video.release()
    cv2.destroyAllWindows()
Exemple #19
0
    def process(self, heatmap):
        """
        The main processing method. Given the probability map, returns a bounding region

        :param heatmap: the map (e.g. result of histogram backprojection etc.). Its supposed to be a numpy array of float.
        :type heatmap: numpy.ndarray
        :return: bounding_box
        :rtype: PVM_tools.bounding_region.BoundingRegion
        """
        self._last_prob = heatmap
        harea = np.prod(heatmap.shape)

        if self._last_bb is None or self._last_bb.empty:
            (bb, area, mean_prob) = self._find_new_box(heatmap)
            confidence = mean_prob
            if bb[2] > 0 and bb[3] > 0 and np.prod(bb[2:]) < 2 * harea / 3:
                self._last_bb = BoundingRegion(image_shape=(heatmap.shape[0],
                                                            heatmap.shape[1],
                                                            3),
                                               box=np.array(bb, dtype=np.int),
                                               confidence=confidence)
            else:
                # Empty bounding box
                self._last_bb = BoundingRegion()
        else:
            # Step 1 go with the last bbox
            _, bb0 = cv2.meanShift(heatmap,
                                   tuple(self._last_bb.get_box_pixels()),
                                   self.term_crit)
            area0 = np.prod(bb0[2:])
            mean_prob0 = np.sum(heatmap[bb0[1]:bb0[1] + bb0[3], bb0[0]:bb0[0] +
                                        bb0[2]]) / (1e-12 + area0)
            _, bb = cv2.CamShift(heatmap,
                                 tuple(self._last_bb.get_box_pixels()),
                                 self.term_crit)
            area = np.prod(bb[2:])
            mean_prob = np.sum(heatmap[bb[1]:bb[1] + bb[3],
                                       bb[0]:bb[0] + bb[2]]) / (1e-12 + area)
            if mean_prob > self.threshold_retrieve and area > self.min_recovered_box_area and area < 2 * harea / 3:
                self._last_bb = BoundingRegion(image_shape=(heatmap.shape[0],
                                                            heatmap.shape[1],
                                                            3),
                                               box=np.array(bb, dtype=np.int),
                                               confidence=mean_prob)
            elif mean_prob0 > self.threshold_retrieve and area0 > self.min_recovered_box_area:
                # Go with the mean shift box, changing size apparently does no good.
                return self._last_bb
            else:
                # Empty bounding box
                self._last_bb = BoundingRegion()

        return self._last_bb
Exemple #20
0
 def detect(self, frame):
     hsv_frame = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
     # find the same object with Back Projection based on histogram
     dst = cv.calcBackProject([hsv_frame], [0], self.roi_hist, [0, 180], 1)
     # apply meanshift to get the new location
     _, self.loc = cv.meanShift(dst, self.loc, self.term)
     self.last_rois.append(self.loc)
     # set the maximum length of the list to 10 elements
     if len(self.last_rois) > 10:
         del self.last_rois[0]
     # recognize gesture and replace it with the assigned number
     gesture = self.classify_with_coords(self.last_rois, frame)
     return gesture
def find_meanshift(image):
  """finds which third of image finger exists in
  this isn't used, because it didn't actually work, but we
  attempted it """
  rows, cols = np.shape(image)
  # setup initial location of window
  r,h,c,w = rows/2,10,cols/2,10  # simply hardcoded the values
  track_window = (c,r,w,h)

  term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
  ret, track= cv2.meanShift(image, track_window, term_crit)
  x,y,w,h = track
  return (x,y)
def meanShifter(roi_hist, bgFrame, frame):
	#_, bgFrame = bgFrameCap.read()
	#_, frame   = frameCap.read()
	hsv = cv2.cvtColor(bgFrame, cv2.COLOR_BGR2HSV)
	dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
	_, track_window = cv2.meanShift(dst, track_window, term_crit)

	# Draw it on image
	x,y,w,h = track_window
	cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
	cv2.rectangle(bgFrame, (x,y), (x+w,y+h), 255,2)

	return frame, newTrackWindow
    def find_center(self, im):
        '''actually do the tracking!'''
        im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        track_im = cv2.calcBackProject([im_hsv], [0], self.query_hist,
                                       [0, 255], 1)
        track_im_visualize = track_im.copy()

        # convert to (x,y,w,h)
        track_roi = (self.last_detection[0], self.last_detection[1],
                     self.last_detection[2] - self.last_detection[0],
                     self.last_detection[3] - self.last_detection[1])

        # TODO: change this to use contours/connected component instead of mean shift?
        for max_iter in range(1, 10):
            term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                         max_iter, 1)
            (ret, intermediate_roi) = cv2.meanShift(track_im, track_roi,
                                                    term_crit)
            cv2.rectangle(track_im_visualize,
                          (intermediate_roi[0], intermediate_roi[1]),
                          (intermediate_roi[0] + intermediate_roi[2],
                           intermediate_roi[1] + intermediate_roi[3]),
                          max_iter / 10.0, 2)  # vizualise things!

        self.last_detection = [
            intermediate_roi[0], intermediate_roi[1],
            intermediate_roi[0] + intermediate_roi[2],
            intermediate_roi[1] + intermediate_roi[3]
        ]

        # find the center of the ROI (so of the object)
        posX = (self.last_detection[0] + self.last_detection[2]) / 2
        posY = (self.last_detection[1] + self.last_detection[3]) / 2

        # once upon a time we used these for updating the ROI
        # similar_pose_x = abs(self.last_center[0]-posX)<50
        # similar_pose_y = abs(self.last_center[1]-posY)<50

        self.last_center = [posX, posY]

        # find the average value in detection heatmap to get detection probability
        x_min, y_min, x_max, y_max = self.last_detection
        self.prob = (255 - np.mean(track_im[x_min:x_max, y_min:y_max])) / 255.0

        # update the ROI, if the probability is really high for a couple frames
        self.previous_probs = [self.prob] + self.previous_probs[:-1]
        cum_prob = reduce(lambda x, y: x * y, self.previous_probs)
        if cum_prob > .8 and self.prob > .98:
            print "\n\n\n\n\n\n\n\n\n\n\n\n\n\n I UPDATED!!!!!!!!!!!\n\n\n\n\n\n\n\n\n"
            self.set_query(im, self.last_detection)
            self.previous_probs = [0] * 10
    def objectTrack(self, hist_frame, shift = "cam"):
        
        # Initialize the track window
        track_window = (self.x, self.y, self.width, self.height)
        
        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        
        # Initialize camera
        video = cv2.VideoCapture(0)
        
        while True:

            # Read video
            ret, frame = video.read()

            if ret == True:
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                dst = cv2.calcBackProject([hsv],[0],hist_frame,[0,180],1)

                # apply whichever mean/cam shift
                if shift == "cam":
                    ret, track_window = cv2.CamShift(dst, track_window, term_crit)
                    
                    # Draw it on image
                    pts = cv2.boxPoints(ret)
                    pts = np.int0(pts)
                    img2 = cv2.polylines(frame,[pts],True, 255,2)
                    cv2.imshow('img2',img2)

                elif shift == "mean":
                    ret, track_window = cv2.meanShift(dst, track_window, term_crit)
                   
                    # Draw static rectangle on image
                    img2 = cv2.rectangle(frame, (self.x, self.y), (self.x + self.width, self.y + self.height), 255,2)
                    cv2.imshow('img2',img2)
                    
                else:
                    print("Use a valid method. This function only permits 'mean' for mean shift and 'cam' for cam shift.")
                    break


                # Use the q button to quit the operation
                if cv2.waitKey(60) & 0xff == ord('q'):
                    break

            else:
                break

        cv2.destroyAllWindows()
        video.release()
Exemple #25
0
def meanShift():
    global frame,frame2,inputmode,trackWindow,roi_hist

    try:
        # 저장된 영상 불러옴
        cap = cv2.VideoCapture('./images/walking.avi')
        cap.set(3,1280)
        cap.set(4,720)

    except Exception as e:
        print(e)
        return

    ret, frame = cap.read()

    cv2.namedWindow('frame')
    cv2.setMouseCallback('frame',onMouse,param=(frame,frame2))

    # meanShift 함수의 3번째 인자. 10회 반복 혹은 C1_o ~ C1_r의 차이가 1pt 날 때까지 작동
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,1)

    while True:
        ret, frame = cap.read()
        if not ret:
            break
        if trackWindow is not None:
            hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
            ret, trackWindow = cv2.meanShift(dst,trackWindow,termination)

            x,y,w,h = trackWindow
            # 추적된 물체 녹색 사각형으로 표시
            cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)

        cv2.imshow('frame',frame)

        k = cv2.waitKey(60) & 0xFF
        if k == 27:
            break
        # i를 눌러서 영상을 멈춰서 roi 설정    
        if k == ord('i'):
            print("Meanshift를 위한 지역을 선택하고 키를 입력해라")
            inputmode = True
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame',frame)
                cv2.waitKey(0)

    cap.release()
    cv2.destroyAllWindows()
Exemple #26
0
def run_main():
    cap = cv2.VideoCapture('crowdnofade.mp4')

    # Read the first frame of the video
    ret, frame = cap.read()

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    # c,r,w,h = 1480,800,40,100 is pretty good
    # 1480,790,40,110 also decent
    # 1470,790,60,110 bigger box - jumpier
    c,r,w,h = 1480,800,40,100
    track_window = (c,r,w,h)

    # Create mask and normalized histogram
    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    fourcc = cv2.cv.CV_FOURCC(*'XVID')
    video  = cv2.VideoWriter('video.avi', fourcc, 20, (1920, 1080))
    
    while True:
        ret, frame = cap.read()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x,y,w,h = track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
            1, (255,255,255), 2, cv2.CV_AA)
        
        small = cv2.resize(frame, (0,0), fx=0.5, fy=0.5) 
        cv2.imshow('Tracking', small)
        
        #cv2.imshow('Tracking', frame)
        
        #video.write(frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

	video.release()
    cap.release()
    cv2.destroyAllWindows()
Exemple #27
0
def tracker(yt, BB_size, mixcout, threshold_y):
    global tracker_detect
    _, track_window_break_1 = cv2.meanShift(remov, (0, yt, 1280, BB_size),
                                            term_criteria)

    xt, yt, wt, ht = track_window_break_1
    tracker_F = r_frame

    # print(xt,yt,wt,ht)
    #reset value
    #set tracker at mean Point of the wave!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    if yt + ht + base_cut >= roi_base - 10:
        remove_c = int((25 / 180) * 5) + 1

        BB_size = BB_size - remove_c

        yt = yt + remove_c
        '''
        #time synchronisation
        '''
        if BB_size <= remove_c + 3:

            yt = startyt
            xt, yt, wt, ht = 0, startyt, 1280, bb_org
            track_window_break = (0, 100, 1280, bb_org)
            ht = ht_fix
            threshold_y = int((81 / 180) * 100)
            mixcout = mixcout + 1
            print('tracker_wave_count: ', mixcout)
            tracker_detect = True
            #print('mixcout',mixcout,time.time())
            BB_size = bb_org
    '''uni-directional movement'''
    if threshold_y > yt:
        yt = threshold_y
    threshold_y = yt
    #added
    # print(yt,yt+ht)                  #yt+ht
    try:
        cv2.rectangle(tracker_F, (0, base_cut + yt),
                      (1280, base_cut + yt + ht), (0, 255, 0), 2)
    #base_cut+base_cut+
    except:
        cv2.rectangle(tracker_F, (0, base_cut), (1280, base_cut + yt),
                      (0, 255, 0), 2)

    cv2.imshow('tracker_F:', tracker_F)

    #remov_tracker=remov[yt:roi_base,xt:xt+wt]

    return yt, BB_size, mixcout, threshold_y
Exemple #28
0
def meanshift_capture():
    cap = cv2.VideoCapture(0)
    # 获取第一帧
    ret, frame = cap.read()
    print(frame.shape)
    # 设置初始跟踪对象的窗口大小
    # r,h,c,w = 120,100,253,100
    r, h, c, w = 180, 80, 140, 90
    track_window = (c, r, w, h)

    cv2.rectangle(frame, (c, r), (c + w, r + h), 255, 2)
    cv2.imshow("frame", frame)
    cv2.waitKey(0)
    # 设置感兴趣的区域
    roi = frame[r:r + h, c:c + w]
    # cv2.imshow("roi",roi)
    # cv2.waitKey(0)
    hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 0., 32.)), np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], None, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        ret, frame = cap.read()

        if ret is True:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

            # 调用meanshift获取新的位置
            ret, track_window = cv2.meanShift(dst, track_window, term_crit)

            # 画出它的位置
            x, y, w, h = track_window

            cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
            cv2.imshow("frame", frame)

            k = cv2.waitKey(60) & 0xff
            if k == 27:
                break
            # else:
            #    cv2.imwrite(chr(k)+".jpg",frame)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
def run_main():

    videoName = "crosswalk"
    cap = cv2.VideoCapture(f'./resources/inputs/videos/{videoName}.avi')

    # Read the first frame of the video
    ret, frame = cap.read()

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking

    if videoName == "crosswalk":
        c, r, w, h = 1080, 500, 30, 90
    
    elif videoName == "fourway":
        c, r, w, h = 1530, 150, 380, 895

    track_window = (c, r, w, h)
    
    # Create mask and normalized histogram
    roi = frame[r:(r + h), c:(c + w)]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [185], [0, 185])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # Setup the termination criteria, either 80 iteration or move by atleast 1 pt
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    while True:
        ret, frame = cap.read()

        if not ret:
            break

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x, y, w, h = track_window
        cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)

        cv2.putText(frame, 'Tracked', (x - 25, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
        
        cv2.imshow('Tracking', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #30
0
 def update(self, frame, is_hsv=False, suggested_roi=None):
     try:
         if not is_hsv:
             hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
         else:
             hsv = frame
         mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))        
         prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
         prob &= mask
         term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
         retval, self.track_window = cv2.meanShift(prob, self.track_window, term_crit)
     except cv2.error as e:
         sys.stdout.write('cv2 error in tracking')
         self.track_window=(0,0,0,0)
Exemple #31
0
def main(filename):
    cap = cv2.VideoCapture(filename)

    # Take first frame of the video
    ret, frame = cap.read()

    # Setup initial location of window
    r, h, c, w = 250, 90, 400, 125  # Hardcoded. Row, Height, Column, Width
    track_window = (c, r, w, h)

    # Set up the ROI (Region of Interest) for tracking
    roi = frame[r:r + h, c:c + w]
    # Hue-Saturation-Value ROI
    hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # Discard low light values
    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))
    # Histogram using HSV ROI and Mask
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    # Normalize values of histogram
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # Set up termination criteria: either 10 iterations or if ROI moves only by 1 pt
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while (1):
        ret, frame = cap.read()

        if ret == True:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

            # Appy meanshift to get new location
            ret, track_window = cv2.meanShift(dst, track_window, term_crit)

            # Draw it an image
            x, y, w, h = track_window
            img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
            cv2.imshow('img2', img2)

            k = cv2.waitKey(60) & 0xff
            if k == ord('q'):
                break
            else:
                cv2.imwrite(chr(k) + ".jpg", img2)
        else:
            break

    cv2.destroyAllWindows()
    cap.release()
 def hand_track(self, img_hsv):
     if self.start_track == False:
         return
     dst = cv2.calcBackProject([img_hsv], [0, 1], self.hist,
                               [0, 180, 0, 256], 1)
     track_window = (self.hand_x, self.hand_y, self.hand_width,
                     self.hand_height)
     ret, track_window = cv2.meanShift(dst, track_window, self.term_crit)
     x, y, w, h = track_window
     #self.move = self.distance(self.hand_x_pre, self.hand_y_pre, x, y)
     self.hand_x_pre = self.hand_x
     self.hand_y_pre = self.hand_y
     self.hand_x = x
     self.hand_y = y
Exemple #33
0
    def track(self, frame):
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        marks = []
        for region in self._tracked_regions:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], region['histogram'], [0,180], 1)

            _ret, coords = cv2.meanShift(dst, region['coords'], term_crit)
            region['coords'] = coords
            (x, y, w, h) = coords
            cv2.imshow(self._window_name, frame[y:y+h+5, x:x+w+5])
            marks.append([(x, y), (x+w, y+h)])

        return marks
Exemple #34
0
def mean_Shift():
    global frame, frame2, inputmode, trackWindow, roi_hist

    cap = cv2.VideoCapture(0)

    ret, frame = cap.read()
    width = cap.get(3)
    height = cap.get(4)

    cv2.namedWindow('frame')
    cv2.setMouseCallback('frame', onMouse, param=(frame, frame2))

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        ret, frame = cap.read()

        if not ret: break

        #		width = cap.get(3)
        #		height = cap.get(4)

        if trackWindow is not None:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ## Maybe this window is track size
            ret, trackWindow = cv2.meanShift(dst, trackWindow, termination)

            x, y, w, h = trackWindow
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            cv2.line(frame, (int(width / 2), int(height)), (int(
                (2 * x + w) / 2), int((2 * y + h) / 2)), (0, 255, 0), 2)

        cv2.imshow('frame', frame)

        key = cv2.waitKey(60) & 0xFF

        if key == 27: break

        if key == ord('i'):
            print('Select for CamShift')
            inputmode = True
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame', frame)
                cv2.waitKey(0)
    cap.release()
    cv2.destroyAllWindows()
Exemple #35
0
    def detection_substance(self):
        """
        物体检测
        :return:
        """
        cap = cv2.VideoCapture(0)

        # take first frame of the video
        ret, frame = cap.read()

        # setup initial location of window
        r, h, c, w = 250, 90, 400, 125  # simply hardcoded the values
        track_window = (c, r, w, h)

        # set up the ROI for tracking
        roi = frame[r:r + h, c:c + w]
        hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                           np.array((180., 255., 255.)))
        roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
        cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

        while (1):
            ret, frame = cap.read()
            if ret is True:
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

                # apply meanshift to get the new location
                ret, track_window = cv2.meanShift(dst, track_window, term_crit)

                # Draw it on image
                x, y, w, h = track_window
                img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
                cv2.imshow('img2', img2)

                k = cv2.waitKey(60) & 0xff
                if k == 27:
                    break
                else:
                    cv2.imwrite(chr(k) + ".jpg", img2)

            else:
                break

        cv2.destroyAllWindows()
        cap.release()
Exemple #36
0
 def update(self, frame):
     hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
     dst = cv2.calcBackProject([hsv], [0], self.roi_hist, [0, 180], 1)
     term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
     # apply meanshift to get the new location
     ret, self.window = cv2.meanShift(dst, self.window, term_crit)
     if ret == 10:
         self.center = None
         print 'Lost track of face'
     else:
         x, y, w, h = self.window
         self.center = (x + w / 2, y + h / 2, 3 * h)
         cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
         cv2.circle(frame, self.center[:2], 1, (255, 0, 0), 2)
    def extract(self, frame):
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], self.roi_hist, [0, 180], 1)

        # Apply meanshift to get the new location.
        (ret, self.track_window) = cv2.meanShift(dst, self.track_window, self.term_crit)

        # Draw it on image.
        (c, r, w, h) = self.track_window
        (x1, y1) = (c, r)
        (x2, y2) = (c+w, r+h)
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
        frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
        return frame
Exemple #38
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    ava.utl.setup_logging()
    logger = logging.getLogger(__name__).getChild('main')

    logger.debug('starting main.')

    img_path = '../images/baboon1.jpg'
    img_2_path = '../images/baboon3.jpg'

    # read image
    src_img_gray = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    src_img_bgr = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_COLOR)
    src_img_2_bgr = cv2.imread(img_2_path, cv2.CV_LOAD_IMAGE_COLOR)

    src_img_hsv = cv2.cvtColor(src_img_bgr, cv2.COLOR_BGR2HSV)
    src_img_2_hsv = cv2.cvtColor(src_img_2_bgr, cv2.COLOR_BGR2HSV)
    src_img_cp = src_img_bgr
    src_img_2_cp = src_img_2_bgr
    ava.cv.utl.show_image_wait_2(src_img_bgr) # ---------

    # tracking window
    x, y, w, h = (110, 260, 35, 40)
    track_window = (x, y, w, h)
    # draw rectangle
    # (x1, y1), (x2, y2), color
    cv2.rectangle(src_img_bgr, (110, 260), (110 + 35, 260 + 40), (255,200,100))
    ava.cv.utl.show_image_wait_2(src_img_bgr) # --------

    # get the hist_sample
    hist_sample = get_hue_histogram(src_img_hsv[260:(260 + 40), 110:(110 + 35)], 65)

    # find in the image_2
    src_img_2_masked = find_content(src_img_2_hsv, hist_sample)
    # termination criteria
    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

    ret, track_window = cv2.meanShift(src_img_2_masked, track_window, term_crit)
    x, y, w, h = track_window
    print('found window: ', track_window)
    cv2.rectangle(src_img_2_cp, (x, y), (x + w, y + h), (255, 255, 0))

    # result = np.vstack([src_img_2_cp, src_img_2_masked_bgr])
    ava.cv.utl.show_image_wait_2(src_img_2_cp, 0) # --------------------

    cv2.destroyAllWindows()

    exit()
Exemple #39
0
def calc_meanshift_all_fruits(fruits_info, img_hsv):
    """
    Calculates the meanshift for all fruits on screen.
    Calculates the difference between the histograms of the fruits between the frames and makes sure it passes
    HISTS_THRESH. If it does it updates the track window for fruit. Otherwise we had lost the fruit and add it to
    FRUIT_TO_EXTRACT.
    :param fruits_info: list of fruits info
    :param img_hsv: The frame in hsv form
    """
    global FRUIT_TO_EXTRACT
    for fruit in fruits_info:
        x, y, w, h = fruit.track_window
        if len(fruit.centers) > 1:
            # Checks if fruit is falling or not to know in what half of the frame to look for it.
            if not fruit.is_falling:
                img_bproject = cv2.calcBackProject([img_hsv[:y + h, :]],
                                                   [0, 1], fruit.hist,
                                                   [0, 180, 0, 255], 1)
            else:
                img_bproject = cv2.calcBackProject([img_hsv[y:, :]], [0, 1],
                                                   fruit.hist,
                                                   [0, 180, 0, 255], 1)
        else:
            img_bproject = cv2.calcBackProject([img_hsv], [0, 1], fruit.hist,
                                               [0, 180, 0, 255], 1)
        # Calculation of the new track window by meanshift algorithm.
        ret, track_window = cv2.meanShift(img_bproject, fruit.track_window,
                                          term_crit)  # credit for eisner
        # Calculates the new histogram of the fruit.
        new_hist = calculate_hist_window(track_window, img_hsv)
        # Calculated correlation between new histogram to previous one.
        correlation = cv2.compareHist(new_hist, fruit.hist,
                                      HISTS_COMPARE_METHOD)
        # If the correlation is high enough we update the track window.
        if (
                abs(correlation) > HISTS_THRESH
        ) and fruit.counter < MAX_NUM_OF_FRAMES_ON_SCREEN:  # threshold for histogram
            # resemblance
            fruit.track_window = track_window
            fruit.hist = new_hist
            fruit.correlation = correlation
            # fruit.centers[fruit.counter] = fruit.centers[fruit.counter][:-1] + (correlation,)
            fruit.counter += 1
        else:  # Otherwise the fruit is gone and we remove it from fruits_info and add it to FRUIT_TO_EXTRACT
            fruits_info.remove(fruit)
            if not fruit.is_falling and len(
                    fruit.centers) > MINIMUM_NUM_OF_CENTERS_TO_EXTRACT:
                FRUIT_TO_EXTRACT.append(fruit)
    update_trajectories(FRUIT_TO_EXTRACT)
def camShift():
    global frame2, frame, inputmode, trackWindow, roi_hist

    try:
        cap = cv2.VideoCapture(0)
    except Exception as e:
        print(e)
        return

    ret, frame = cap.read()

    cv2.namedWindow('frame')
    cv2.setMouseCallback('frame', onMouse, param=(frame, frame2))

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        # 영상을 취득
        ret, frame = cap.read()

        if not ret:
            break

        # 추적물체를 표시.
        if trackWindow is not None:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, trackWindow = cv2.meanShift(dst, trackWindow, termination)

            x, y, w, h = trackWindow
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        cv2.imshow('frame', frame)

        k = cv2.waitKey(60) & 0xFF
        if k == 27: break

        # i 키를 누를때 input Mode 활성화하고 화면을 멈춤.
        if k == ord('i'):
            print('Select Area for Camshift and Enter a Key')
            inputmode = True
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame', frame)
                cv2.waitKey(0)

    cap.release()
    cv2.destroyAllWindows()
Exemple #41
0
    def extract(self, frame):
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], self.roi_hist, [0, 180], 1)

        # Apply meanshift to get the new location.
        (ret, self.track_window) = cv2.meanShift(dst, self.track_window,
                                                 self.term_crit)

        # Draw it on image.
        (c, r, w, h) = self.track_window
        (x1, y1) = (c, r)
        (x2, y2) = (c + w, r + h)
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
        frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
        return frame
Exemple #42
0
def meanshift(video_path,r,h,c,w,mask,roi_hist):

    cap = cv2.VideoCapture(video_path)

    # take first frame of the video
    ret,frame = cap.read()

    # setup initial location of window
    # r,h,c,w - region of image
    #           simply hardcoded the values
    r,h,c,w = 200,20,300,20  
    track_window = (c,r,w,h)

    # set up the ROI for tracking
    roi = frame[r:r+h, c:c+w]
    hsv_roi =  cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
    cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)

    # Setup the termination criteria, either 10 iteration or move by at least 1 pt
    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

    while(1):
        ret ,frame = cap.read()

        if ret == True:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)

            # apply meanshift to get the new location
            ret, track_window = cv2.meanShift(dst, track_window, term_crit)

            # Draw it on image
            x,y,w,h = track_window
            img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
            cv2.imshow('img2',img2)

            k = cv2.waitKey(60) & 0xff
            if k == 27:
                break
            else:
                cv2.imwrite(chr(k)+".jpg",img2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
Exemple #43
0
def meanShift(CAM_ID):
    global frame, frame2, inputmode, trackWindow, roi_hist, out

    try:
        cap = cv2.VideoCapture(CAM_ID)
        cap.set(3, 480)
        cap.set(4, 320)
    except:
        print('Can\'t open the CAM(%d)' % (CAM_ID))
        return

    ret, frame = cap.read()

    cv2.namedWindow('frame')
    cv2.setMouseCallback('frame', onMouse, param=(frame, frame2))

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        if trackWindow is not None:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, trackWindow = cv2.meanShift(dst, trackWindow, termination)

            x, y, w, h = trackWindow
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        cv2.imshow('frame', frame)

        k = cv2.waitKey(60) & 0xFF
        if k == 27:
            break

        if k == ord('i'):
            print("box & press enter key")
            inputmode = True
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame', frame)
                cv2.waitKey(0)

    cap.release()
    cv2.destroyAllWindows()
    def update(self, frame):
        print("Updating: {:d}".format(self.id))
        hsv = cv2.cvtColor(frame, code=cv2.COLOR_BGR2HSV)
        back_project = cv2.calcBackProject(images=[hsv],
                                           channels=[0],
                                           hist=self.roi_hist,
                                           ranges=[0, 180],
                                           scale=1)

        if args.get("algorithm") == "c":
            ret, self.track_window = cv2.CamShift(probImage=back_project,
                                                  window=self.track_window,
                                                  criteria=self.term_crit)
            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            self.center = center(pts)
            cv2.polylines(frame,
                          pts=[pts],
                          isClosed=True,
                          color=(0, 255, 0),
                          thickness=1)

        if not args.get("algorithm") or args.get("algorithm") == "m":
            ret, self.track_window = cv2.meanShift(probImage=back_project,
                                                   window=self.track_window,
                                                   criteria=self.term_crit)
            x, y, w, h = self.track_window
            self.center = center([[x, y], [x + w, y], [x, y + h],
                                  [x + w, y + h]])
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        self.kalman.correct(self.center)
        prediction = self.kalman.predict()
        cv2.circle(frame,
                   center=(int(prediction[0]), int(prediction[1])),
                   radius=4,
                   color=(255, 0, 0),
                   thickness=-1)

        # fake shadow
        cv2.putText(frame,
                    "ID: {:d} -> {:s}".format(self.id, self.center),
                    org=(11, (self.id + 1) * 25 + 1),
                    fontFace=font,
                    fontScale=0.6,
                    color=(0, 0, 0),
                    thickness=1,
                    lineType=cv2.LINE_AA)
def mean_shift(hair_type, n):
    """

    :param path: path to image files
    :param n: number of images to segment
    :return: list of segmented images
    """
    root = args.input_dir
    imgs = []
    r, h, c, w = 250, 90, 400, 125  # simply hardcoded the values
    track_window = (c, r, w, h)
    # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
    term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
    _, _, imgs, _, _ = load_preprocess_contours(hair_type, n, (50,50), segmented=False)
    for i in range(n):
        img = imgs[i]
        roi = img[r:r+h, c:c+w]
        hsv_roi =  cv.cvtColor(roi, cv.COLOR_BGR2HSV)
        # mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
        roi_hist = cv.calcHist([hsv_roi],[0],None,[180],[0,180])
        cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)

        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
        dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
        ret, track_window = cv.meanShift(dst, track_window, term_crit)
        # Draw it on image
        x,y,w,h = track_window
        img2 = cv.rectangle(img, (x,y), (x+w,y+h), 255,2)
        cv.imshow('img2',img2)
        cv.waitKey(0)
        cv.destroyAllWindows()

        # applyColorMap applies a GNU Octave/MATLAB equivalent colormap on a given image
        # cv.applyColorMap(src, colormap [, dst]) -> dst
        # ret, thresh = cv.threshold(gray, 100, 255, cv.THRESH_BINARY_INV)

        # apply cv.calcHist and cv.calcBackProject on thresh first of all
        """
            ######### Model histogram and meanshift  #############
            s = [[1,2,3],[1,2,3],[1,2,3]]
            calcHist(s) returns  [3,3,3]
            calcBackProject(s, hist, 2) returns  [[6,6,6],[6,6,6],[6,6,6]]
            hist = cv2.calcHist([s], [0], None, [3], [1,4])
            dst = cv2.calcBackProject([s], [0], hist, [1,4], 2)
        """

    return imgs
    def cut_zoom_video(self, update_progress):
        """
        a method which track the speaker in the video and cut a video from this

        @param update_progress: a function which handles the progressbar countprocess
        """

        self.frame = 0
        folder = Path(self.folder_path, self.folder_name)
        speaker_filename = os.path.join(folder, 'speaker.mp4')
        video_data = self.files[0]

        reader = skvideo.io.FFmpegReader(video_data, {}, {})
        #fgbg = cv2.createBackgroundSubtractorMOG2()
        fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5 , 1)

        videometadata = skvideo.io.ffprobe(video_data)
        self.frame_rate = videometadata['video']['@avg_frame_rate']
        self.number_frames = int(videometadata['video']['@nb_frames'])

        x, y, width, height = 220, 400, 300, 850
        track_window = (width,x,height,y)

        speaker_out = skvideo.io.FFmpegWriter(speaker_filename, inputdict={
            "-r": self.frame_rate
        })

        for frame in reader.nextFrame():
            gmask = fgbg.apply(frame)
            is_ok, track_window = cv2.meanShift(gmask, track_window, term_crit)
            x, y, width, height = track_window
            if self.width == 1920:
                y = 100
                speaker_out.writeFrame(frame[y:y+height, x+100:x+width])
                self.frame += 1
                if self.frame % 30 == 0:
                    update_progress((int)(self.frame/self.number_frames*100))
            else:   
                y = 250
                speaker_out.writeFrame(frame[y-100:y+height, x:x+width])
                self.frame += 1
                if self.frame % 30 == 0:
                    update_progress((int)(self.frame/self.number_frames*100))  
                
        speaker_out.close()
        self.files.append(speaker_filename)
        self.__speaker_video = SpeakerVideo(speaker_filename)
 def update(self, frame):
     # 将当前视频帧转换到HSV色彩空间
     hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
     # 计算行人HSV直方图的反向投影
     back_project = cv2.calcBackProject([hsv], [0], self.roi_hist, [0, 180],
                                        1)
     # 根据命令行参数选择漂移算法
     # 若命令行参数algorithm为c,则使用CAM漂移算法计算跟踪窗口,并绘制到当前帧
     if args.get("algorithm") == "c":
         # 使用CAM漂移算法计算跟踪窗口的坐标和大小
         ret, self.track_window = cv2.CamShift(back_project,
                                               self.track_window,
                                               self.term_crit)
         # 计算跟踪窗口的顶点坐标
         pts = cv2.boxPoints(ret)
         # 坐标值转化为整数
         pts = np.int0(pts)
         # 计算跟踪窗口的中心点,并设置为当前行人的中点属性
         self.center = center(pts)
         # 绘制跟踪框
         cv2.polylines(frame, [pts], True, 255, 1)
     # 若没有指定命令行参数,或命令行参数algorithm的值为m,则使用均值漂移算法
     if not args.get("algorithm") or args.get("algorithm") == "m":
         # 使用均值漂移算法计算跟踪窗口的坐标和大小
         ret, self.track_window = cv2.meanShift(back_project,
                                                self.track_window,
                                                self.term_crit)
         # 计算并设置当前行人的中点属性
         x, y, w, h = self.track_window
         self.center = center([[x, y], [x + w, y], [x, y + h],
                               [x + w, y + h]])
         # 绘制跟踪框
         cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
     # 使用卡尔曼滤波器校正中心点
     self.kalman.correct(self.center)
     # 使用卡尔曼滤波器预测行人位置
     prediction = self.kalman.predict()
     # 以预测点为圆心绘制圆
     cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4,
                (255, 0, 0), -1)
     # 在当前帧中显示行人信息文本阴影
     cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center),
                 (11, (self.id + 1) * 25 + 1), font, 0.6, (0, 0, 0), 1,
                 cv2.LINE_AA)
     # 在当前帧中显示行人信息文本
     cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center),
                 (10, (self.id + 1) * 25), font, 0.6, (0, 255, 0), 1,
                 cv2.LINE_AA)
Exemple #48
0
def run_main():
	# created a "threaded" video stream, allow the camera sensor to warmup
	vs = PiVideoStream((640,480)).start()
	time.sleep(2.0)
	
	# read the first frame of the video
	frame = vs.read()
	
	# Set the ROI
	c, r, w, h=200, 100, 70, 70
	track_window=(c, r, w, h)
	
	# Create mask and nomralized histogram
	roi = frame[r:r+h, c:c+w]
	hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
	mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180.,255.,255.)))
	roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
	cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
	
	# Setup the termination criteria, either 80 iteration or move by atleast 1pt
	term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
	
	while True:
		frame = vs.read()
	
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
		
		# apply meanshift to get the new location
		ret, track_window = cv2.meanShift(dst, track_window, term_crit)
		
		# apply camshift to get the new location
		#ret, track_window = cv2.CamShift(dst, track_window, term_crit)

		# draw it on image
		x, y, w, h = track_window
		cv2.rectangle(frame, (x, y), (x+w, y+h), 255, 2)
		cv2.putText(frame, 'Tracked M', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
		
		cv2.imshow('Tracking', frame)
		
		# if the 'q' key is pressed, break from the loop
		key = cv2.waitKey(1) & 0xFF
		if key == ord("q"):
			break
			
	vs.stop()
	cv2.destroyAllWindows()
def track(frame, M):
    global Mat, roi_hist, track_window
    Mat = M

    if frame == 0:
        # calculate the Histo
        roi = M[r:r+h, c:c+w]
        hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
        roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    else:
        if Mat is not None:
            hsv = cv2.cvtColor(Mat, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, track_window = cv2.meanShift(dst, track_window, term_crit)
        pass
	def track(self,im):
		im_hsv = cv2.cvtColor(im,cv2.COLOR_BGR2HSV)
		track_im = cv2.calcBackProject([im_hsv],[0],self.new_hist,[0,255],1)
		track_im_visualize = track_im.copy()
		# convert to (x,y,w,h)
		track_roi = (self.hist_last_detection[0],self.hist_last_detection[1],self.hist_last_detection[2]-self.hist_last_detection[0],self.hist_last_detection[3]-self.hist_last_detection[1])

		# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
		# this is done to plot intermediate results of mean shift
		for max_iter in range(1,10):
			term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, 1 )
			(ret, intermediate_roi) = cv2.meanShift(track_im,track_roi,term_crit)
			cv2.rectangle(track_im_visualize,(intermediate_roi[0],intermediate_roi[1]),(intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]),max_iter/10.0,2)

		self.hist_last_detection = [intermediate_roi[0],intermediate_roi[1],intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]]
		cv2.imshow("histogram",track_im_visualize)
Exemple #51
0
    def apply(self, frame, context):
        if context.frameno == 1:
            self.track_window = cv.selectROI('Select ROI', frame)
            cv.destroyWindow('Select ROI')
            x, y, w, h = self.track_window
            self.roi_hist = calc_hist(frame[y:y + h, x:x + w])
            return frame

        hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
        dst = cv.calcBackProject([hsv], [0], self.roi_hist, [0, 180], 1)
        ret, self.track_window = cv.meanShift(dst, self.track_window,
                                              self.term_crit)
        x, y, w, h = self.track_window
        cv.rectangle(frame, (x, y), (x + w, y + h), 255, 2)

        return frame
	def track(self,im):
		im_hsv = cv2.cvtColor(im,cv2.COLOR_BGR2HSV)
		track_im = cv2.calcBackProject([im_hsv],[0],self.query_hist,[0,255],1)

		track_im_visualize = track_im.copy()
		# convert to (x,y,w,h)
		track_roi = (self.last_detection[0],self.last_detection[1],self.last_detection[2]-self.last_detection[0],self.last_detection[3]-self.last_detection[1])
		# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
		# this is done to plot intermediate results of mean shift
		for max_iter in range(1,10):
			term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, 1 )
			(ret, intermediate_roi) = cv2.meanShift(track_im,track_roi,term_crit)
			cv2.rectangle(track_im_visualize,(intermediate_roi[0],intermediate_roi[1]),(intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]),max_iter/10.0,2)

		self.last_detection = [intermediate_roi[0],intermediate_roi[1],intermediate_roi[0]+intermediate_roi[2],intermediate_roi[1]+intermediate_roi[3]]
		cv2.imshow("track_win",track_im_visualize)
    def run(self):
        while True:
            ret, self.frame = self.cam.read()
            self.frame = cv2.resize(self.frame, (720, 480))
            vis = self.frame.copy()
            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

            # define range of white color in HSV
            lower_white = np.array([0,0,220], dtype=np.uint8)
            upper_white = np.array([0,0,255], dtype=np.uint8)
            mask = cv2.inRange(hsv, lower_white, upper_white)

            if self.circle != None:
                x_circle, y_circle, r = self.circle
                x0, y0, x1, y1 = x_circle-r/2, y_circle-r/2, x_circle+r/2, y_circle+r/2
                self.track_window = (x0, y0, x1, y1)
                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                roi_hist = cv2.calcHist([hsv],[0],mask,[16],[0,180])
                cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
                self.hist = roi_hist.reshape(-1)

                vis_roi = vis[y0:y1, x0:x1]
                cv2.bitwise_not(vis_roi, vis_roi)
                vis[mask == 0] = 0

            if self.tracking_state == 1:
                self.selection = None
                prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

                ret, self.track_window = cv2.meanShift(prob, self.track_window, term_crit)

                x, y, w, h = self.track_window
                vis = cv2.rectangle(vis, (x,y), (x+w,y+h), 255,2) 
            else:
                self.search()               

            cv2.imshow('frame', vis)

            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows()
    def track(self, pathid, start, stop, basepath, paths):
        frames = getframes(basepath, True)
        frame = frames[start]

        # setup initial location of window
        box = paths[pathid].boxes[start]
        initialrect = (box.xtl, box.ytl, box.xbr-box.xtl, box.ybr-box.ytl)
        c,r,w,h = initialrect
        rect = initialrect
 
        # set up the ROI for tracking
        roi = frame[r:r+h, c:c+w]
        hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
        roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
        cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
 
        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

        for i in range(start, stop):
            frame = frames[i]
            if frame is None:
                break

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

            # apply meanshift to get the new location
            _, rect = cv2.meanShift(dst, rect, term_crit)
            outrect = {
                'rect':(boxes[i].x, boxes[i].y, boxes[i].width, boxes[i].height),
                'frame':i,
                'generated':(i!=0)
            }
            ret.append(outrect)



            # Draw it on image
            #x,y,w,h = rect
            #cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
            #cv2.imshow('img2',frame)
            #cv2.waitKey(60)


        cv2.destroyAllWindows()
def run_main():
    cap = cv2.VideoCapture(0)
    cap.set(3, 320)
    cap.set(4, 240)

    # Read the first frame of the video
    ret, frame = cap.read()

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    c, r, w, h = 160, 120, 30, 30
    track_window = (c, r, w, h)

    # Create mask and normalized histogram
    roi = frame[r:r + h, c:c + w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)),
                       np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)

    while True:
        t = cv2.getTickCount()
        ret, frame = cap.read()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x, y, w, h = track_window
        cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
        cv2.putText(frame, 'Tracked', (x - 25, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1,
                    cv2.CV_AA)

        t = cv2.getTickCount() - t
        print "detection time = %gms" % (t / (cv2.getTickFrequency() * 1000.))
        cv2.imshow('Tracking', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #56
0
def run_main():
    cap = cv2.VideoCapture('upabove.mp4')

    # Read the first frame of the video
    ret, frame = cap.read()

    imsave("frame0001.png",cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

    imshow(frame)

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    c,r,w,h = 150,250,70,70
    track_window = (c,r,w,h)

    # Create mask and normalized histogram
    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    while True:
        ret, frame = cap.read()

        if not ret:
            break
        
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x,y,w,h = track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
            1, (255,255,255), 2)
        
        cv2.imshow('Tracking', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
def tracking(frame, roi_hist, track_window):
    # 归一化:原始图像,结果图像,映射到结果图像中的最小值,最大值,归一化类型
    # cv2.NORM_MINMAX对数组的所有值进行转化,使它们线性映射到最小值和最大值之间
    # 归一化后的图像便于显示,归一化后到0,255之间了
    # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
    #cv2.imwrite('dst.jpg', dst)
    # apply meanshift to get the new location
    ret, track_window = cv2.meanShift(dst, track_window, term_crit)
    # cv2.imshow("Backproject",dst)
    # Draw it on image
    x, y, w, h = track_window
    img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
    #cv2.imshow('img2', img2)
    return img2, track_window
Exemple #58
0
def meanShift():
    global frame, frame2, inputmode, trackWindow, roi_hist

    try :
        cap = cv2.VideoCapture("http://172.30.1.7:8891/?action=stream")
    except Exception as e:
        print(e)
        return

    ret, frame = cap.read()

    cv2.namedWindow('frame')
    cv2.setMouseCallback('frame', onMouse, param=(frame, frame2))

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        if trackWindow is not None:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, trackWindow = cv2.meanShift(dst, trackWindow, termination)

            x, y, w, h = trackWindow
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        cv2.imshow('frame', frame)
        k = cv2.waitKey(60) & 0xFF

        if k == 27:
            break
            
        if k == ord('i'):
            print('select Area')
            inputmode = True 
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame', frame)
                cv2.waitKey(0)

    cap.release()
    cv2.destroyAllWindows()
Exemple #59
0
def track_object(filename):
    vc = cv.VideoCapture(filename)

    # Read the first frame
    retval, frame = vc.read()
    if not retval:
        print(f'Failed to read video frames from file {filename}')
        return

    initialize_window(frame)

    col = upper_left[0]
    row = upper_left[1]
    width = lower_right[0] - upper_left[0]
    height = lower_right[1] - upper_left[1]
    track_window = (col, row, width, height)

    # Track RoI
    roi = frame[row:row + height, col:col + width]
    roi_in_hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
    mask = cv.inRange(roi_in_hsv, np.array((0., 60., 32.)),
                      np.array((180., 255., 255.)))
    roi_hist = cv.calcHist([roi_in_hsv], [0], mask, [180], [0, 180])
    cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX)

    term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)

    cv.namedWindow('tracked', cv.WINDOW_NORMAL)
    while True:
        retval, frame = vc.read()
        if not retval:
            break

        hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
        dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
        retval, track_window = cv.meanShift(dst, track_window, term_crit)
        x, y, w, h = track_window
        image = cv.rectangle(frame, (x, y), (x + w, y + h), RECTANGLE_COLOR, 2)
        cv.imshow('tracked', image)

        k = cv.waitKey(60) & 0xff
        if k == 27:
            break

    cv.destroyAllWindows()
    vc.release()
def mean_shift_object_track(frame, initalSx, initalSy, initalEx, initalEy):
    # 设置初试窗口位置和大小
    r, c = initalSx, initalSy
    h, w = initalEy - initalSy, initalEx - initalSx

    print(r, c, h, w)

    track_window = (r, c, h, w)

    # 设置追踪的区域
    roi = frame[r:r + w, c:c + h]
    # roi区域的hsv图像
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # 取值hsv值在(0,60,32)到(180,255,255)之间的部分
    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))
    # 计算直方图,参数为 图片(可多),通道数,蒙板区域,直方图长度,范围
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    # 归一化
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # 设置终止条件,迭代10次或者至少移动1次
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 50, 20)
    filesName = os.listdir('./images/raw/')
    for file in filesName:
        frame = cv2.imread('./images/raw/' + file, 1)
        # 计算每一帧的hsv图像
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # 计算反向投影
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        # 调用meanShift算法在dst中寻找目标窗口,找到后返回目标窗口
        ret, track_window = cv2.meanShift(dst, track_window, term_crit)
        # Draw it on image
        x, y, w, h = track_window
        # img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        # cv2.imshow('img2',img2)
        # cv2.waitKey()
        croped = frame[x:x + w, y:y + h]
        # cv2.imshow('croped', croped)
        # cv2.waitKey()
        cv2.imwrite('./images/crop/' + file, croped)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        cv2.destroyAllWindows()