Example #1
0
localMax = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U)
cv.Cmp(
    dst_32f, dilated, localMax, cv.CV_CMP_EQ
)  #compare allow to keep only non modified pixel which are local maximum values which are corners.

threshold = 0.01 * maxv
cv.Threshold(dst_32f, dst_32f, threshold, 255, cv.CV_THRESH_BINARY)

cornerMap = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U)
cv.Convert(dst_32f, cornerMap)  #Convert to make the and
cv.And(cornerMap, localMax, cornerMap)  #Delete all modified pixels

radius = 3
thickness = 2

l = []
for x in range(
        cornerMap.height
):  #Create the list of point take all pixel that are not 0 (so not black)
    for y in range(cornerMap.width):
        if cornerMap[x, y]:
            l.append((y, x))

for center in l:
    cv.Circle(im, center, radius, (255, 255, 255), thickness)

cv.ShowImage("Image", im)
cv.ShowImage("CornerHarris Result", dst_32f)
cv.ShowImage("Unique Points after Dilatation/CMP/And", cornerMap)

cv.WaitKey(0)
def draw_subdiv_point(img, fp, color):
    cv.Circle(img, (cv.Round(fp[0]), cv.Round(fp[1])), 3, color, cv.CV_FILLED,
              8, 0)
Example #3
0
 def draw(self, img, pixmapper, bounds):
     '''draw the trail'''
     for p in self.points:
         (px, py) = pixmapper(p)
         if px >= 0 and py >= 0 and px < img.width and py < img.height:
             cv.Circle(img, (px, py), 1, self.colour)
Example #4
0
    def run(self):
        # Initialize
        log_file_name = "tracker_output.log"
        log_file = open( log_file_name, 'a' )
            #fps = 25

        #cap = cv2.VideoCapture( '../000104-.avi'

        frame = cv.QueryFrame( self.capture )
        frame_size = cv.GetSize( frame )
        foreground = cv.CreateImage(cv.GetSize(frame),8,1)
        foremat = cv.GetMat(foreground)
        Nforemat = numpy.array(foremat, dtype=numpy.float32)
	gfilter=sys.argv[2]
	gfilter_string=gfilter
	gfilter=float(gfilter)
	print "Processing Tracker with filter: " + str(gfilter)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame( self.capture )

	# Create Background Subtractor
        fgbg = cv2.BackgroundSubtractorMOG()

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )


        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage( display_image )


        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)


        # The difference between the running average and the current frame:
        difference = cv.CloneImage( display_image )


        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook=[]
        frame_count=0
        last_frame_entity_list = []
        fps = 25


        t0 = 165947


        # For toggling display:
        image_list = [ "camera", "shadow", "white", "threshold", "display", "yellow" ]
        image_index = 0   # Index into image_list




        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
        text_coord = ( 5, 15 )
        text_color = cv.CV_RGB(255,255,255)
        text_coord2 = ( 5, 30 )
        text_coord3 = ( 5, 45 )

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'C:/OpenCV2.2/data/haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )


        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 20


        while True:


            # Capture frame from webcam
            camera_image = cv.QueryFrame( self.capture )
            #ret, frame = cap.read()

            frame_count += 1
            frame_t0 = time.time()
            mat = cv.GetMat(camera_image)
            Nmat = numpy.array(mat, dtype=numpy.uint8)


            # Create an image with interactive feedback:
            display_image = cv.CloneImage( camera_image )

            # NEW INSERT - FGMASK
            fgmask = fgbg.apply(Nmat,Nforemat,-1)
            fgmask = cv.fromarray(fgmask)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage( display_image )


            # Smooth to get rid of false positives
            cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 ) #Changed from 19 AND MADE MEDIAN FILTER


            # Smooth to get rid of false positives

#            color_image = numpy.asarray( cv.GetMat( color_image ) )
#            (mu, sigma) = cv2.meanStdDev(color_image)
#            edges = cv2.Canny(color_image, mu - sigma, mu + sigma)
#            lines = cv2.HoughLines(edges, 1, pi / 180, 70)


            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg( color_image, running_average_image, gfilter, None )


            # Convert the scale of the moving average.
            cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )


            # Subtract the current frame from the moving average.
            cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )


            # Convert the image to greyscale.
            cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
            # Smooth Before thresholding
            cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 19 )
            # Threshold the image to a black and white motion mask:
            cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
            # Smooth and threshold again to eliminate "sparkles"
            #cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )  #changed from 19 - AND put smooth before threshold
            cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)


            grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
            non_black_coords_array = numpy.where( grey_image_as_array > 3 )
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )

            frame_hsv = cv.CreateImage(cv.GetSize(color_image),8,3)
            cv.CvtColor(color_image,frame_hsv,cv.CV_BGR2HSV)
            imgthreshold_yellow=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white2=cv.CreateImage(cv.GetSize(color_image),8,1)
            cv.InRangeS(frame_hsv,cv.Scalar(0,0,196),cv.Scalar(255,255,255),imgthreshold_white)  # changed scalar from 255,15,255 to 255,255,255
            cv.InRangeS(frame_hsv,cv.Scalar(41,43,224),cv.Scalar(255,255,255),imgthreshold_white2)
            cv.InRangeS(frame_hsv,cv.Scalar(20,100,100),cv.Scalar(30,255,255),imgthreshold_yellow)
            #cvCvtColor(color_image, yellowHSV, CV_BGR2HSV)
            #lower_yellow = np.array([10, 100, 100], dtype=np.uint8)
            #upper_yellow = np.array([30, 255, 255], dtype=np.uint8)
            #mask_yellow = cv2.inRange(yellowHSV, lower_yellow, upper_yellow)
            #res_yellow = cv2.bitwise_and(color_image, color_image, mask_yellow = mask_yellow)


            points = []   # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []


            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )

            i=0
            while contour:

#                c = contour[i]
#                m = cv2.moments(c)
#                Area  = m['m00']
#                print( Area )

                bounding_rect = cv.BoundingRect( list(contour) )
                point1 = ( bounding_rect[0], bounding_rect[1] )
                point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )


                bounding_box_list.append( ( point1, point2 ) )
                polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )


                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)


                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
                cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
#        if Area > 3000:
#            cv2.drawContours(imgrgb,[cnt],0,(255,255,255),2)
#            print(Area)

                i=i+1
                contour = contour.h_next()




            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append( box_width * box_height )


                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)


            average_box_area = 0.0
            if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)


            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]


                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )


            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )


            bounding_box_list = merge_collided_bboxes( trimmed_box_list )


            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )


            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len( bounding_box_list )


            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.


            if frame_t0 - last_target_change_t < .35:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0


            # Clip to the user-supplied maximum:
            estimated_target_count = min( estimated_target_count, max_targets )


            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.


            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []


            if len(points):


                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max( estimated_target_count, 1 )  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook


                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans( array( points ), k_or_guess )


                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = ( int(center_point[0]), int(center_point[1]) )
                    center_points.append( center_point )
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)


            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []


            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []


                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :


                        # This point is within the box.
                        center_points_in_box.append( center_point )


                # Now see if there are more than one.  If so, merge them.
                if len( center_points_in_box ) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])


                    average_x = int( float(sum( x_list )) / len( x_list ) )
                    average_y = int( float(sum( y_list )) / len( y_list ) )


                    trimmed_center_points.append( (average_x, average_y) )


                    # Record that they were removed:
                    removed_center_points += center_points_in_box


                if len( center_points_in_box ) == 1:
                    trimmed_center_points.append( center_points_in_box[0] ) # Just use it.


            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append( center_point )


            # Draw what we found:
            #for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)


            # Determine if there are any new (or lost) targets:
            actual_target_count = len( trimmed_center_points )
            last_target_count = actual_target_count


            # Now build the list of physical entities (objects)
            this_frame_entity_list = []


            # An entity is list: [ name, color, last_time_seen, last_known_coords ]


            for target in trimmed_center_points:


                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}


                for entity in last_frame_entity_list:


                    entity_coords= entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]


                    distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
                    entity_distance_dict[ distance ] = entity


                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()


                for distance in distance_list:


                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]


                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] ) #Commented Out 3/20/2016
                        continue


                    #print "Target %s pixel(b,g,r) : USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1]) # Commented Out 3/20/2016
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append( nearest_possible_entity )
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_count, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break


                if entity_found == False:
                    # It's a new entity.
                    color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
                    name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
                    last_time_seen = frame_t0
                    if imgthreshold_white[target[1],target[0]] == 0.0:
                        # It's a real detect (not a line marker)

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_white[target[1],target[0]] == 255.0:
                        # It's a white line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("white-line-image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_yellow[target[1],target[0]] == 255.0:
                        # It's a yellow line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
                        cv.SaveImage("yellow-line-image-test%s.png"%(new_entity[0]), camera_image)

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.


            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_count, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append( entity )


            # For next frame:
            last_frame_entity_list = this_frame_entity_list


            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)




            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break


            # Toggle which image to show
            if chr(c) == 'd':
                image_index = ( image_index + 1 ) % len( image_list )


            image_name = image_list[ image_index ]


            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
            elif image_name == "shadow":
                image = fgmask
                cv.PutText( image, "No Shadow", text_coord, text_font, text_color )
            elif image_name == "white":
                #image = difference
                image = imgthreshold_white
                cv.PutText( image, "White Threshold", text_coord, text_font, text_color )
            elif image_name == "display":
                #image = display_image
                image = display_image
                cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
                cv.PutText( image, str(frame_t0), text_coord2, text_font, text_color )
                cv.PutText( image, str(frame_count), text_coord3, text_font, text_color )
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
                image = display_image  # Re-use display image here
                cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
            elif image_name == "yellow":
                # Do face detection
                #detect_faces( camera_image, haar_cascade, mem_storage )
                image = imgthreshold_yellow  # Re-use camera image here
                cv.PutText( image, "Yellow Threshold", text_coord, text_font, text_color )


            #cv.ShowImage( "Target", image )		Commented out 3/19

#            self.writer.write( image )
#            out.write( image );
#            cv.WriteFrame( self.writer, image );
#            if self.writer:
#                cv.WriteFrame( self.writer, image );
#                video.write( image );

            log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
#            if not self.writer:
#                delta_t = frame_t1 - frame_t0
#                if delta_t < ( 1.0 / 15.0 ): time.sleep( ( 1.0 / 15.0 ) - delta_t ):

            if frame_count == 155740:
                cv2.destroyWindow("Target")
#                    cv.ReleaseVideoWriter()
#                    self.writer.release()
#                    log_file.flush()
                break

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float( frame_count ) / time_delta
        print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )
Example #5
0
        cv.InRangeS(hsv, (0, 140, 10), (170, 180, 60), thr)

        moments = cv.Moments(cv.GetMat(thr, 1), 0)

        area = cv.GetCentralMoment(moments, 0, 0)

        cv.Line(frame, (80, 0), (80, 120), (0, 0, 255), 3, 8, 0)

        if (area > 10000):

            x = cv.GetSpatialMoment(moments, 1, 0) / area
            y = cv.GetSpatialMoment(moments, 0, 1) / area

            #			overlay = cv.CreateImage(cv.GetSize(frame),8,3)

            cv.Circle(frame, (int(x), int(y)), 2, (255, 255, 255), 20)

            #			cv.Add(frame,overlay,frame)

            #			cv.Merge(thr,None,None,None,frame)
            if (int(x) < 80):
                value = 80 - int(x)
                cv.PutText(frame, "Left[" + str(value) + "]", (int(x), int(y)),
                           font, (255, 255, 0))

                x_count += 1

                if (x_count > 10):
                    print "Left Offset=", value

                    pub_string = "l" + chr(int(value))
Example #6
0
    moments = cv.Moments(tr, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area

        print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area)

        #create an overlay to mark the center of the tracked object
        overlay = cv.CreateImage(cv.GetSize(F), 8, 3)

        cv.Circle(overlay, (int(x), int(y)), 2, (255, 255, 255), 20)
        cv.Circle(tr, (int(x), int(y)), 50, (255, 255, 255), -20)
        cv.Add(F, overlay, F)
        #add the thresholded image back to the img so we can see what was
        #left after it was applied
        cv.Merge(tr, None, None, None, F)

    cv2.imshow('Varviotsing', f)
    cv2.imshow("thresh", thresh)

    if cv2.waitKey(25) == 27:
        break

cv2.destroyAllWindows()
c.release()
Example #7
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )
        
        print "hello"
        
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        
        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)
        
        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        
        
        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)
        
        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)
        
        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)
        
        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []
        
        t0 = time.time()
        
        # For toggling display:
        image_list = [ "camera", "difference", "threshold", "display", "faces" ]
        image_index = 3  # Index into image_list
    
    
        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        # ## Face detection stuff
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('E:\\Softwares\\opencv\\data\\haarcascades\\haarcascade_frontalface_alt.xml')
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )
        
        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5
        
        while True:
            
            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)
            
            frame_count += 1
            frame_t0 = time.time()
            
            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)
            
            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)
            
            # Use the Running Average as the static background            
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)
            
            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image, running_average_in_display_color_depth, 1.0, 0.0)
            
            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth, difference)
            
            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)
            
            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1], non_black_coords_array[0])
            
            points = []  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            
            while contour:
                
                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3])
                
                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage, cv.CV_POLY_APPROX_DP)
                
                # To track polygon points only (instead of every pixel):
                # points += list(polygon_points)
                
                # Draw the contours:
                # ##cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()
            
            
            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)
                
                # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
            
            average_box_area = 0.0
            if len(box_areas): average_box_area = float(sum(box_areas)) / len(box_areas)
            
            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                
                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1: trimmed_box_list.append(box)
            
            # Draw the trimmed box list:
            # for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
                
            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1], cv.CV_RGB(0, 255, 0), 1)
            
            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)
            
            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.
            
            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0
            
            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)
            
            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.
            
            # Using the numpy values directly (treating all pixels as points):    
            points = non_black_coords_array
            center_points = []
            
            if len(points):
                
                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count, 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count: 
                    k_or_guess = codebook
                
                # points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)
                
                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)
            
            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.  
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []
                        
            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []
                
                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :
                        
                        # This point is within the box.
                        center_points_in_box.append(center_point)
                
                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])
                    
                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))
                    
                    trimmed_center_points.append((average_x, average_y))
                    
                    # Record that they were removed:
                    removed_center_points += center_points_in_box
                    
                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(center_points_in_box[0])  # Just use it.
            
            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)
            
            # Draw what we found:
            # for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)
            
            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count
            
            # Now build the list of physical entities (objects)
            this_frame_entity_list = []
            
            # An entity is list: [ name, color, last_time_seen, last_known_coords ]
            
            for target in trimmed_center_points:
            
                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}
                
                for entity in last_frame_entity_list:
                    
                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]
            
                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[ distance ] = entity
                
                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()
                
                for distance in distance_list:
                    
                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]
                    
                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue
                    
                    # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break
                
                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) + str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0
                    
                    new_entity = [ name, color, last_time_seen, target ]
                    this_frame_entity_list.append(new_entity)
                    # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
            
            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.
            
            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)
            
            # For next frame:
            last_frame_entity_list = this_frame_entity_list
            
            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5, cv.CV_RGB(c[0], c[1], c[2]), 3)
            
            
            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            
            # Toggle which image to show
#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#             
#             image_name = image_list[ image_index ]
#             
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )                
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )
                
                
            image1 = display_image
            detect_faces(camera_image, haar_cascade, mem_storage)
            image2 = camera_image
            
            cv.ShowImage("Target 1", image1)
            cv.ShowImage("Target 2", image2)
            
#             if self.writer: 
#                 cv.WriteFrame( self.writer, image );
            
            # log_file.flush()
            
            # If only using a camera, then there is no time.sleep() needed, 
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()
            

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)
                
                
                
                
                
                
            [rows, cols] = cv.GetSize(frame)
            image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)        
            cv.Copy(frame, image)
            cv.ShowImage("camera", frame)
            leftImage = cv.CreateImage((image.width, image.height), 8, 1)
            cv.CvtColor(image, leftImage, cv.CV_BGR2GRAY)
              
            frame2 = cv.QueryFrame(self.capture2)
            print type(frame2)
            image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels)        
            cv.Copy(frame2, image2)
            cv.ShowImage("camera2", frame2)
            rightImage = cv.CreateImage((image2.width, image2.height), 8, 1)
            cv.CvtColor(image2, rightImage, cv.CV_BGR2GRAY)
               
               
            disparity_left = cv.CreateMat(leftImage.height, leftImage.width, cv.CV_16S)
            disparity_right = cv.CreateMat(rightImage.height, rightImage.width, cv.CV_16S)
               
            # data structure initialization
            state = cv.CreateStereoGCState(16, 2)
#             print leftImage.width 
#             print leftImage.height
#             print rightImage.width
#             print rightImage.height
            
            # running the graph-cut algorithm
            cv.FindStereoCorrespondenceGC(leftImage, rightImage, disparity_left, disparity_right, state)
               
            disp_left_visual = cv.CreateMat(leftImage.height, leftImage.width, cv.CV_8U)
            cv.ConvertScale(disparity_left, disp_left_visual, -16);
        #     cv.Save("disparity.pgm", disp_left_visual);  # save the map
        #     
            # cutting the object farthest of a threshold (120)
            cut(disp_left_visual, leftImage, 120)
               
            # cv.NamedWindow('Disparity map', cv.CV_WINDOW_AUTOSIZE)
            cv.ShowImage('Disparity map', disp_left_visual)     
#                 
                
            # minimum value for the intensity
            maxValue = 0
            maxPoint = None
                
            # now for all the moving object centers get the average intensity value
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
#                 cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
#                 cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
#                 cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
#                 cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)
                # cv.Avg(arr)
                print center_point
                print type(disp_left_visual)
                print size(disp_left_visual)
                print center_point
                print "value " + str(cv.Get2D(disp_left_visual, center_point[1], center_point[0]))
                value = cv.Get2D(disp_left_visual, center_point[1], center_point[0])[0] + cv.Get2D(disp_left_visual, center_point[1], center_point[0])[1] + cv.Get2D(disp_left_visual, center_point[1], center_point[0])[2]
                if value > maxValue:
                    maxValue = value
                    maxPoint = center_point
            
            print "min value is " + str(maxValue)
            print " min point is " + str(maxPoint)
            
            if maxPoint != None:
                cv.Circle(display_image, maxPoint, 17, cv.CV_RGB(100, 100, 100), 1)
                
                #distance = 1 / maxValue
                # find if it is right or left or in the middle
                
                if maxValue > 100:
                
                    middle = width / 2
                    if maxPoint[0] > middle + 20:
                        message = "look left"
                    elif maxPoint[0] < middle - 20:
                        message = "look right"
                    else:
                        message = "look middle"
                        
                    print "message " + message
                
                
                
                
                
            
        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta, processed_fps)
Example #8
0
            if add_remove_pt:
                # we have a point to add, so see if it is close to
                # another one. If yes, don't use it
                def ptptdist(p0, p1):
                    dx = p0[0] - p1[0]
                    dy = p0[1] - p1[1]
                    return dx**2 + dy**2

                if min([ptptdist(pt, p) for p in features]) < 25:
                    # too close
                    add_remove_pt = 0

            # draw the points as green circles
            for the_point in features:
                cv.Circle(image, (int(the_point[0]), int(the_point[1])), 3,
                          (0, 255, 0, 0), -1, 8, 0)

        if add_remove_pt:
            # we want to add a point
            # refine this corner location and append it to 'features'

            features += cv.FindCornerSubPix(
                grey, [pt], (win_size, win_size), (-1, -1),
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
            # we are no longer in "add_remove_pt" mode
            add_remove_pt = False

        # swapping
        prev_grey, grey = grey, prev_grey
        prev_pyramid, pyramid = pyramid, prev_pyramid
        need_to_init = False
Example #9
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC  # get current time in seconds
    size = cv.GetSize(img)  # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        cv.Zero(mhi)  # clear MHI at the beginning
        orient = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY)  # convert frame to grayscale
    idx2 = (last + 1) % N  # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh)  # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1,
                 cv.CV_THRESH_BINARY)  # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION)  # update MHI
    cv.CvtScale(mhi, mask, 255. / MHI_DURATION,
                (MHI_DURATION - timestamp) * 255. / MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100:  # reject very small components
            color = cv.CV_RGB(255, 0, 0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(
                orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(
                silh_roi, None, cv.CV_L1,
                None)  # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2),
                      (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude * 1.2), color, 3,
                      cv.CV_AA, 0)
            cv.Line(
                dst, center,
                (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                 cv.Round(center[1] -
                          magnitude * sin(angle * cv.CV_PI / 180))), color, 3,
                cv.CV_AA, 0)
Example #10
0
        nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + abs(
            int(prev_points[i][1]) - int(curr_points[i][1]))
        if status[i] and nb > 2:
            prev_points[k] = prev_points[i]
            curr_points[k] = curr_points[i]
            k += 1

    prev_points = prev_points[:k]
    curr_points = curr_points[:k]
    #At the end only interesting points are kept

    #Draw all the previously kept lines otherwise they would be lost the next frame
    for (pt1, pt2) in lines:
        cv.Line(frame, pt1, pt2, (255, 255, 255))

    #Draw the lines between each points at t-1 and t
    for prevpoint, point in zip(prev_points, curr_points):
        prevpoint = (int(prevpoint[0]), int(prevpoint[1]))
        cv.Circle(frame, prevpoint, 15, 0)
        point = (int(point[0]), int(point[1]))
        cv.Circle(frame, point, 3, 255)
        cv.Line(frame, prevpoint, point, (255, 255, 255))
        lines.append(
            (prevpoint, point))  #Append current lines to the lines list

    cv.Copy(gray, prev_gray)  #Put the current frame prev_gray
    prev_points = curr_points

    cv.ShowImage("The Video", frame)
    #cv.WriteFrame(writer, frame)
    cv.WaitKey(wait)