示例#1
0
 def _opticalFlow(self):
     '''
     Compute the optical flow between frames using cv.CalcOpticalFlow
     
     @returns: a list of tracks for the new image
     @rtype: list of pv.Point()
     '''
     flags = 0
     
     grey = self.frame
     prev_grey = self.prev_frame
 
     pyramid = cv.CreateImage (cv.GetSize (grey), 8, 1)
     prev_pyramid = cv.CreateImage (cv.GetSize (grey), 8, 1)
 
     cv_points = []
     for each in self.tracks:
         cv_points.append((each.X(),each.Y()))
     
     points_b, _, _,= cv.CalcOpticalFlowPyrLK (
                 prev_grey, 
                 grey, 
                 prev_pyramid, 
                 pyramid,
                 cv_points,
                 (5,5),
                 3,#pyr number
                 (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01),
                 flags)
             
     result = []
     for pt in points_b:
         result.append(pv.Point(pt))
 
     return result
示例#2
0
def OpticalFlowForOrientation(imagenew,imageold,gfttar,weights=0,existence=0):

    pyrold = cv.CreateImage((imagenew.width,imagenew.height),
                 cv.IPL_DEPTH_32F,
                 1)
    pyrnew = cv.CreateImage((imagenew.width,imagenew.height),
                 cv.IPL_DEPTH_32F,
                 1)
    (gfttarnew,status,track_error)= cv.CalcOpticalFlowPyrLK(imageold,
                            imagenew,
                            pyrold,
                            pyrnew,
                            gfttar,
                            (10,10),
                            5,
                            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,20,0.03),
                            0)

    for i in range (0,len(status)):
        if status[i] == 0:
            gfttar.pop(i)
            gfttarnew.pop(i)


    return gfttarnew, gfttar
示例#3
0
def OpticalFlow(imagenew, imageold, gfttar, weights=0, existence=0):

    pyrold = cv.CreateImage((imagenew.width, imagenew.height),
                            cv.IPL_DEPTH_32F, 1)
    pyrnew = cv.CreateImage((imagenew.width, imagenew.height),
                            cv.IPL_DEPTH_32F, 1)
    (gfttarnew, status, track_error) = cv.CalcOpticalFlowPyrLK(
        imageold, imagenew, pyrold, pyrnew, gfttar, (10, 10), 5,
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

    #UpdatePointWeights(list_gftt_new,list_gftt)
    #DropPoints(gf
    return gfttarnew, weights, existence
示例#4
0
def lucas_kanade(img1,
                 img2,
                 corners1,
                 win=WIN,
                 num_pyramids=PYRAMIDS,
                 criteria=FLOW_CRITERIA,
                 flags=FLAGS):
    pyr_size = (img1.width + 8, img2.height / 3
                )  # magic formula provided by openCV
    pyr1 = cv.CreateImage(pyr_size, cv.IPL_DEPTH_32F, 1)
    pyr2 = cv.CreateImage(pyr_size, cv.IPL_DEPTH_32F, 1)
    return cv.CalcOpticalFlowPyrLK(img1, img2, pyr1, pyr2, corners1, win,
                                   num_pyramids, criteria, flags)
示例#5
0
    def verify_still_tracking(self):
        self.detected = 2

        # compute optical flow
        self.features, status, track_error = cv.CalcOpticalFlowPyrLK(
            self.prev_grey, self.grey, self.prev_pyramid, self.pyramid,
            self.features, (self.win_size, self.win_size), 3,
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), self.flags)

        # set back the points we keep
        self.features = [p for (st, p) in zip(status, self.features) if st]

        if len(self.features) < 4:
            self.tracking = False  # we lost it, restart search
            log.info("tracking -> not tracking: len features %d < 4" %
                     len(self.features))
        else:
            # make sure that in addition the distances are consistent
            ds1 = ptdst(self.features[0], self.features[1])
            ds2 = ptdst(self.features[2], self.features[3])

            if max(ds1, ds2) / min(ds1, ds2) > 1.4:
                self.tracking = False
                log.info(
                    "tracking -> not tracking: max/min ds1 %s, ds2 %s > 1.4" %
                    (ds1, ds2))

            ds3 = ptdst(self.features[0], self.features[2])
            ds4 = ptdst(self.features[1], self.features[3])

            if max(ds3, ds4) / min(ds3, ds4) > 1.4:
                self.tracking = False
                log.info(
                    "tracking -> not tracking: max/min ds3 %s, ds4 %s > 1.4" %
                    (ds3, ds4))

            if ds1 < 10 or ds2 < 10 or ds3 < 10 or ds4 < 10:
                self.tracking = False
                log.info(
                    "tracking -> not tracking: ds1 %s, ds2 %s, ds3 %s, ds4 %s"
                    % (ds1, ds2, ds3, ds4))

            if not self.tracking:
                self.detected = 0
    #features = []
    #for i in range(1, dst_img.width, 1):
    #  for j in range(1, dst_img.height, 1):
    #    features.append((i,j))

    #cornerMap = cv.CreateMat(src_im1.height, src_im1.width, cv.CV_32FC1)
    #cv.CornerHarris(src_im1,cornerMap,3)

    #features = []
    #for y in range(0, src_im1.height):
    #  for x in range(0, src_im1.width):
    #    harris = cv.Get2D(cornerMap, y, x)
    #    if harris[0] > 10e-6:
    #      features.append((x, y))

    r = cv.CalcOpticalFlowPyrLK(
        src_im1, src_im2, None, None, features, (50, 50), 0,
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 64, 0.01), 0)
    list = r[0]
    for i in range(len(list)):
        dis = math.sqrt(
            math.pow((features[i][0] - list[i][0]), 2) +
            math.pow((features[i][1] - list[i][1]), 2))
        cv.Line(dst_img, (int(features[i][0]), int(features[i][1])),
                (int(list[i][0]), int(list[i][1])), cv.CV_RGB(255, 0, 0), 1,
                cv.CV_AA, 0)

    cv.NamedWindow("w", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("w", dst_img)
    cv.WaitKey()
示例#7
0
    def track_lk(self, cv_image, face):
        feature_box = None
        """ Initialize intermediate images if necessary """
        if not face.pyramid:
            face.grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.features = []
        """ Create a grey version of the image """
        cv.CvtColor(cv_image, face.grey, cv.CV_BGR2GRAY)
        """ Equalize the histogram to reduce lighting effects """
        cv.EqualizeHist(face.grey, face.grey)

        if face.track_box and face.features != []:
            """ We have feature points, so track and display them """
            """ Calculate the optical flow """
            face.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                face.prev_grey, face.grey, face.prev_pyramid, face.pyramid,
                face.features, (self.win_size, self.win_size), 3,
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01),
                self.flags)
            """ Keep only high status points """
            face.features = [p for (st, p) in zip(status, face.features) if st]

        elif face.track_box and self.is_rect_nonzero(face.track_box):
            """ Get the initial features to track """
            """ Create a mask image to be used to select the tracked points """
            mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            """ Begin with all black pixels """
            cv.Zero(mask)
            """ Get the coordinates and dimensions of the track box """
            try:
                x, y, w, h = face.track_box
            except:
                return None

            if self.auto_face_tracking:
                #                """ For faces, the detect box tends to extend beyond the actual object so shrink it slightly """
                #                x = int(0.97 * x)
                #                y = int(0.97 * y)
                #                w = int(1 * w)
                #                h = int(1 * h)
                """ Get the center of the track box (type CvRect) so we can create the
                    equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """
                center_x = int(x + w / 2)
                center_y = int(y + h / 2)
                roi_box = ((center_x, center_y), (w, h), 0)
                """ Create a filled white ellipse within the track_box to define the ROI. """
                cv.EllipseBox(mask, roi_box, cv.CV_RGB(255, 255, 255),
                              cv.CV_FILLED)
            else:
                """ For manually selected regions, just use a rectangle """
                pt1 = (x, y)
                pt2 = (x + w, y + h)
                cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255, 255, 255),
                             cv.CV_FILLED)
            """ Create the temporary scratchpad images """
            eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
            temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

            if self.feature_type == 0:
                """ Find keypoints to track using Good Features to Track """
                face.features = cv.GoodFeaturesToTrack(
                    face.grey,
                    eig,
                    temp,
                    self.max_count,
                    self.quality,
                    self.good_feature_distance,
                    mask=mask,
                    blockSize=self.block_size,
                    useHarris=self.use_harris,
                    k=0.04)

            elif self.feature_type == 1:
                """ Get the new features using SURF """
                (surf_features, descriptors) = cv.ExtractSURF(
                    face.grey, mask, cv.CreateMemStorage(0),
                    (0, self.surf_hessian_quality, 3, 1))
                for feature in surf_features:
                    face.features.append(feature[0])
            #
            if self.auto_min_features:
                """ Since the detect box is larger than the actual face
                    or desired patch, shrink the number of features by 10% """
                face.min_features = int(len(face.features) * 0.9)
                face.abs_min_features = int(0.5 * face.min_features)
        """ Swapping the images """
        face.prev_grey, face.grey = face.grey, face.prev_grey
        face.prev_pyramid, face.pyramid = face.pyramid, face.prev_pyramid
        """ If we have some features... """
        if len(face.features) > 0:
            """ The FitEllipse2 function below requires us to convert the feature array
                into a CvMat matrix """
            try:
                self.feature_matrix = cv.CreateMat(1, len(face.features),
                                                   cv.CV_32SC2)
            except:
                pass
            """ Draw the points as green circles and add them to the features matrix """
            i = 0
            for the_point in face.features:
                if self.show_features:
                    cv.Circle(self.marker_image,
                              (int(the_point[0]), int(the_point[1])), 2,
                              (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
                try:
                    cv.Set2D(self.feature_matrix, 0, i,
                             (int(the_point[0]), int(the_point[1])))
                except:
                    pass
                i = i + 1
            """ Draw the best fit ellipse around the feature points """
            if len(face.features) > 6:
                feature_box = cv.FitEllipse2(self.feature_matrix)
            else:
                feature_box = None
            """ Publish the ROI for the tracked object """
            # try:
            #     (roi_center, roi_size, roi_angle) = feature_box
            # except:
            #     logger.info("Patch box has shrunk to zeros...")
            #     feature_box = None

            # if feature_box and not self.drag_start and self.is_rect_nonzero(face.track_box):
            #     self.ROI = RegionOfInterest()
            #     self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2)))
            #     self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2)))
            #     self.ROI.width = min(self.image_size[0], int(roi_size[0]))
            #     self.ROI.height = min(self.image_size[1], int(roi_size[1]))

            # self.pubROI.publish(self.ROI)

        if feature_box is not None and len(face.features) > 0:
            return feature_box
        else:
            return None
示例#8
0
     cv.ShowImage(window1, gray_image)
 else:
     cv.ShowImage(window1, render_image)
 #Image processing
 cv.CvtColor(img, gray_image, cv.CV_RGB2GRAY)
 cv.Copy(gray_image, register1_image)
 cv.Smooth(register1_image, register1_image, cv.CV_GAUSSIAN, 3, 3)
 cv.AbsDiff(register1_image, register2_image, accumulator)
 cv.InRangeS(accumulator, (threshold_limit1_lower), (threshold_limit1_upper), accumulator)
 cv.Dilate(accumulator, accumulator, None, 2)
 cv.Add(accumulator, sum_image, sum_image, accumulator)
 cv.SubS(sum_image, (fading_factor), sum_image)
 cv.InRangeS(sum_image, (threshold_limit2_lower), (threshold_limit2_upper), accumulator)
 cv.Copy(register1_image, register2_image)
 #Motion detection
 new_corners, status, track_error = cv.CalcOpticalFlowPyrLK(prev_image, gray_image, pyramid1, pyramid2, corners, (10,10), 2, (cv.CV_TERMCRIT_ITER, 10, 0), 0)
 counter = (counter + 1) % skip
 if(counter == 0):
     corners = cv.GoodFeaturesToTrack(gray_image, eigen_image, temp_image, cornerCount = corner_count, qualityLevel = quality, minDistance = min_distance) #Good features to track
     flag = True
 cv.Copy(img, render_image)
 cv.CvtColor(accumulator, render_image, cv.CV_GRAY2RGB)
 #cv.Copy(img, render_image)
 cv.Copy(gray_image, prev_image)
 #Drawing vectors and averaging the rotation...
 sum = 0
 summing_counter = 0
 if flag:
     flag = not flag
 else:
     for i in range(len(new_corners)):
示例#9
0
                                              quality, min_distance, None, 3,
                                              0, 0.04)

            # refine the corner locations
            features = cv.FindCornerSubPix(
                grey, features, (win_size, win_size), (-1, -1),
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

        elif features != []:
            # we have points, so display them
            # cv.ShowImage ('prev_grey', prev_grey)
            # cv.ShowImage ('grey', grey)

            # calculate the optical flow
            features, status, track_error = cv.CalcOpticalFlowPyrLK(
                prev_grey, grey, prev_pyramid, pyramid, features,
                (win_size, win_size), 3,
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), flags)
            #            print "num features: ", sum(status)
            #            print "status: ", status

            # set back the points we keep
            features = [p for (st, p) in zip(status, features) if st]

            # count = 0;
            # for feat in features:
            #     print "feature coordinates: ", feat, count
            #     count = count + 1

            if add_remove_pt:
                # we have a point to add, so see if it is close to
                # another one. If yes, don't use it
示例#10
0
def main():                                                   
	path = "../rawData/outdoor1/GOPR0003_0/"
	files = os.listdir(path)  
	"""
		Get SIFT features for the first frame
	"""
	imname = path+files[10]
	a =  time()
	sift.process_image(imname,'empire.sift')          
	l1,d1 = sift.read_features_from_file('empire.sift')   
	b = time()
	print "Time for getting SIFT features: ",b - a
	
	refine_l = []  
	""" define the bounding box and filter the points"""
	b_ver1 = (60,91)
	b_ver2 = (148,315)  
	w = (b_ver2[0] - b_ver1[0]) 
	h = (b_ver2[1] - b_ver1[1]) 
	initalIndicator = 1        #indicate the scale of the box
	for i,p in enumerate(l1):
		center = p[:2] 
		if center[0] > 60 and center[0] < 148 and center[1] > 91 and center[1] < 315:
			refine_l.append(tuple(center))
			
	color = (0,0,255)
	""" start at frame285 """
	startIndex = 10
	#writer = cv.CreateVideoWriter("myTrack.avi",cv.CV_FOURCC('M','J','P','G'),60,cv.GetSize (cv.LoadImage(path+files[startIndex])),1)
   
	while (startIndex<=2900):
		frameStartTime =  time()
		seq = [cv.LoadImage(path+files[startIndex]),
			cv.LoadImage(path+files[startIndex+1])]
		"""
			calculate optical flow
		"""                
		newFrameImageGS_32F1 = cv.CreateImage (cv.GetSize (seq[0]), 8, 1)
		newFrameImageGS_32F2 = cv.CreateImage (cv.GetSize (seq[0]), 8, 1)
		cv.CvtColor(seq[0],newFrameImageGS_32F1,cv.CV_RGB2GRAY)
		cv.CvtColor(seq[1],newFrameImageGS_32F2,cv.CV_RGB2GRAY)
		pyramid = cv.CreateImage (cv.GetSize (seq[0]), 8, 1)
		prev_pyramid = cv.CreateImage (cv.GetSize (seq[0]), 8, 1)
		flags = 0 
		     
		points, status1, errors,= cv.CalcOpticalFlowPyrLK (
		                    newFrameImageGS_32F1, 
		                    newFrameImageGS_32F2, 
		                    prev_pyramid, 
		                    pyramid,
		                    refine_l,
		                    (10,10),
		                    3,#pyr number
		                    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01),
		                    flags)
		
		"""center of mass of points"""
		p_x = 0.0
		p_y = 0.0
		sumWeight = 0.0
		for i,point in enumerate(points):
			myweight = windowWeight(point,b_ver1,b_ver2)
			p_x += myweight*point[0]
			p_y += myweight*point[1]
			sumWeight += myweight
		# p_x = [windowWeight(point,b_ver1,b_ver2)*point[0] for point in points]
		# p_y = [windowWeight(point,b_ver1,b_ver2)*point[1] for point in points]		   
		# weight = [windowWeight(point,b_ver1,b_ver2) for point in points]
		cm_x = p_x/sumWeight
		cm_y = p_y/sumWeight
		
		""" remove far away points """ 
		diag = mynorm(b_ver1,b_ver2)
		points = [point for point in points if mynorm(point,(cm_x,cm_y)) <= 0.7*diag]
			  	  	
		"""
			detect scale change by finding the density of points
		"""                                                     
		densityIndicator = sum([mynorm(point,(cm_x,cm_y)) for point in points])/len(points)
		if startIndex == 10:
			initalIndicator = densityIndicator
			
		#print "Scale indicator ", densityIndicator/initalIndicator  
 
		""" Calculate new bounding box """
		new_w = w*densityIndicator/initalIndicator
		new_h = h*densityIndicator/initalIndicator
		b_ver1 = (int(cm_x-new_w/2), int(cm_y-new_h/2))
		b_ver2 = (int(cm_x+new_w/2), int(cm_y+new_h/2))

		frameEndTime = time()
  	    
		print "Time to process one frame: ", frameEndTime - frameStartTime
		
		""" draw images and detected points"""
		for pt in refine_l:
			cv.Circle(seq[0], (int(pt[0]), int(pt[1])), 5, color, 0, cv.CV_AA, 0)
		cv.Circle(seq[0], (int(cm_x), int(cm_y)), 10, (0,255,255), 0, cv.CV_AA, 0)	 
		cv.Rectangle(seq[0],b_ver1,b_ver2,(255,0,0))
		cv.ShowImage('First', seq[0])   
		cv.WaitKey(30)
		for pt in points:
			cv.Circle(seq[1], (int(pt[0]), int(pt[1])), 5, color, 0, cv.CV_AA, 0)
		cv.Circle(seq[1], (int(cm_x), int(cm_y)), 10, (0,255,255), 0, cv.CV_AA, 0)	
		cv.Rectangle(seq[1],b_ver1,b_ver2,(255,0,0))
		cv.ShowImage('First', seq[1])

		refine_l = points
		startIndex += 1
示例#11
0
def main():
    path = "../rawData/outdoor1/GOPR0003_0/"
    files = os.listdir(path)
    """
		Get SIFT features for the first frame
	"""
    imname = path + files[10]
    a = time()
    sift.process_image(imname, 'empire.sift')
    l1, d1 = sift.read_features_from_file('empire.sift')
    b = time()
    print "Time for getting SIFT features: ", b - a
    refine_l = []
    #figure()
    #gray()
    b_ver1 = (60, 91)
    b_ver2 = (148, 315)
    for i, p in enumerate(l1):
        center = p[:2]
        if i % 30 == 0:
            # if center[0] < 60 or center[0] > 148:
            # 	continue
            # if center[1] < 91 or center[1] > 315:
            # 	continue
            refine_l.append(tuple(center))
            # plot(center[0],center[1],'ob')
            # 		imshow(im1)
            # 				axis('off')
        elif center[0] > 60 and center[0] < 148 and center[1] > 91 and center[
                1] < 315:
            refine_l.append(tuple(center))
    #sift.plot_features(im1,l1,circle=True)
    #show()

    startIndex = 10

    while (startIndex <= 90):
        seq = [
            cv.LoadImage(path + files[startIndex]),
            cv.LoadImage(path + files[startIndex + 1])
        ]
        #draw a box on image 1
        # cv.Rectangle(seq[0],(60,91),(148,315),(255,0,0))
        # 	cv.ShowImage('TestOpticFlow', seq[0])
        #
        # 	cv.WaitKey()
        # 	cv.DestroyAllWindows()
        """
			calculate optical flow
		"""
        color = (0, 0, 255)
        newFrameImageGS_32F1 = cv.CreateImage(cv.GetSize(seq[0]), 8, 1)
        newFrameImageGS_32F2 = cv.CreateImage(cv.GetSize(seq[0]), 8, 1)
        cv.CvtColor(seq[0], newFrameImageGS_32F1, cv.CV_RGB2GRAY)
        cv.CvtColor(seq[1], newFrameImageGS_32F2, cv.CV_RGB2GRAY)
        pyramid = cv.CreateImage(cv.GetSize(seq[0]), 8, 1)
        prev_pyramid = cv.CreateImage(cv.GetSize(seq[0]), 8, 1)
        flags = 0
        #a =  time()

        points, status, errors, = cv.CalcOpticalFlowPyrLK(
            newFrameImageGS_32F1,
            newFrameImageGS_32F2,
            prev_pyramid,
            pyramid,
            refine_l,
            (10, 10),
            6,  #pyr number
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01),
            flags)
        #b = time()
        #print "Time for calculating optical flow: ",b - a

        #a =  time()
        fp = make_homog(transpose(array(refine_l)))
        tp = make_homog(transpose(array(points)))
        H = H_from_points(fp, tp)
        #H = Haffine_from_points(fp,tp)
        #b = time()
        #print "Time for calculating homography: ",b - a
        #print H[0:2,0:2]
        H_solve_s_and_ang(H[0:2, 0:2])
        #print H[0:2,2]
        disp = (int(H[0:2, 2][0]), int(H[0:2, 2][1]))

        cv.Rectangle(seq[0], b_ver1, b_ver2, (255, 0, 0))
        for pt in refine_l:
            cv.Circle(seq[0], (int(pt[0]), int(pt[1])), 5, color, 0, cv.CV_AA,
                      0)
        cv.ShowImage('First', seq[0])
        cv.WaitKey(30)

        b_ver1 = (b_ver1[0] + disp[0], b_ver1[1] + disp[1])
        b_ver2 = (b_ver2[0] + disp[0], b_ver2[1] + disp[1])

        cv.Rectangle(seq[1], b_ver1, b_ver2, (255, 0, 0))
        #cv.DestroyWindow('First')
        for pt in points:
            cv.Circle(seq[1], (int(pt[0]), int(pt[1])), 5, color, 0, cv.CV_AA,
                      0)
        cv.ShowImage('First', seq[1])
        #sleep(0.7)
        cv.WaitKey(5)
        #cv.DestroyWindow('First')

        startIndex += 1
示例#12
0
    def run(self):
        image = None
        MAX_COUNT = 500
        win_size = (32, 32)
        line_draw = 2
        frame = cv.QueryFrame(self.capture)
        image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        image.origin = frame.origin
        grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
        edges = cv.CreateImage(cv.GetSize(frame), 8, 1)
        prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
        prev_grey2 = cv.CreateImage(cv.GetSize(frame), 8, 1)
        prev_grey3 = cv.CreateImage(cv.GetSize(frame), 8, 1)
        pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
        prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
        points = []
        prev_points = []
        count = 0
        criteria = (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)
        while True:
            frame = cv.QueryFrame(self.capture)
            # cv.Rectangle( frame, self.last_rect[0], self.last_rect[1], cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
            # cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 15, 0)
            cv.Copy(frame, image)
            cv.CvtColor(image, grey, cv.CV_BGR2GRAY)
            if count == 0:
                eig = cv.CreateImage(cv.GetSize(grey), 32, 1)
                temp = cv.CreateImage(cv.GetSize(grey), 32, 1)
                quality = 0.01
                min_distance = 10
                points = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT,
                                                quality, min_distance, None, 3,
                                                0, 0.04)
                points = cv.FindCornerSubPix(grey, points, win_size, (-1, -1),
                                             criteria)
            else:
                flags = 0
                points, status, track_error = cv.CalcOpticalFlowPyrLK(
                    prev_grey, grey, prev_pyramid, pyramid, prev_points,
                    win_size, 2, criteria, flags)
                diff_points = []
                for i, j in enumerate(points):
                    print j
                    if not j == prev_points[i]:
                        diff_points.append(j)
                print 'len %d' % len(diff_points)

            prev_points == points
            count = len(points)
            print count

            prev_grey = grey
            prev_pyramid = pyramid
            prev_points = points
            if line_draw:
                cv.Canny(grey, edges, 30, 150, 3)
                if line_draw == 1:
                    cv.CvtColor(edges, image, cv.CV_GRAY2BGR)
                elif line_draw > 1:
                    cv.Merge(edges, prev_grey2, prev_grey3, None, image)
                    cv.Copy(prev_grey2, prev_grey3, None)
                    cv.Copy(edges, prev_grey2, None)
            cv.ShowImage("Target", image)
            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
示例#13
0
              GAUSSIAN_BLUR_FACTOR)
    cv.ConvertImage(thumbnail, frame2_1C, cv.CV_CVTIMG_FLIP)
    t = Thread(target=motion_bbox, args=(
        output,
        motion,
    ))
    t.run()

    #obtencion de caracteristicas del primer frame
    frame1_features = cv.GoodFeaturesToTrack(frame1_1C, eig_image, temp_image,
                                             NFEATURES, 0.1, 5, None, 3, False)

    #busqueda de caracteristicas del primer frame, en el segundo usando el algoritmo de Lucas Kanade

    frame2_features, status, track_error = cv.CalcOpticalFlowPyrLK(
        frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, (3, 3), 5,
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

    #recorrido de las caracteristicas y dibujo de las flechas
    #utilizando el algoritmo de http://ai.stanford.edu/~dstavens/cs223b/optical_flow_demo.cpp
    totals = []
    for pt1, pt2 in bbox_list:  #recorrido de los bounding box finales
        cv.Rectangle(output, pt1, pt2, cv.CV_RGB(255, 0, 0),
                     1)  #se dibuja un rectangulo en ese bounding box
        center = (min(pt1[0], pt2[0]) + abs(pt1[0] - pt2[0]) / 2,
                  min(pt1[1], pt2[1]) + abs(pt1[1] - pt2[1]) / 2)
        angles = []
        for i in range(len(frame2_features)):
            p = frame1_features[i]
            q = frame2_features[i]
            p = int(p[0]), int(p[1])
示例#14
0
        cv.CvtColor(aa, a, cv.CV_BGR2GRAY)
        cv.CvtColor(bb, b, cv.CV_BGR2GRAY)
        # map = cv.CreateMat(2, 3, cv.CV_32FC1)
        # cv.GetRotationMatrix2D((256, 256), 10, 1.0, map)
        # b = cv.CloneMat(a)
        # cv.WarpAffine(a, b, map)

        eig_image = cv.CreateMat(a.height, a.width, cv.CV_32FC1)
        temp_image = cv.CreateMat(a.height, a.width, cv.CV_32FC1)

        prevPyr = cv.CreateMat(a.height / 3, a.width + 8, cv.CV_8UC1)
        currPyr = cv.CreateMat(a.height / 3, a.width + 8, cv.CV_8UC1)
        prevFeatures = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 400,
                                              0.01, 0.01)
        (currFeatures, status, track_error) = cv.CalcOpticalFlowPyrLK(
            a, b, prevPyr, currPyr, prevFeatures, (10, 10), 3,
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)
        if 1:  # enable visualization
            print
            print sum(status), "Points found in curr image"
            for prev, this in zip(prevFeatures, currFeatures):
                iprev = tuple([int(c) for c in prev])
                ithis = tuple([int(c) for c in this])
                cv.Circle(a, iprev, 3, 255)
                cv.Circle(a, ithis, 3, 0)
                cv.Line(a, iprev, ithis, 128)

            snapL([a, b])

        fc = (fc + 1) % len(frames)
        #exit