コード例 #1
0
    def run(self):

        copy = cv.CloneImage(self.image)

        while True:
            if self.drag_start and is_rect_nonzero(self.selection):
                copy = cv.CloneImage(self.image)
                sub = cv.GetSubRect(copy, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)

                cv.ConvertScale(copy, copy, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(copy, (x, y), (x + w, y + h), (255, 255, 255))

            cv.ShowImage("Image", copy)
            c = cv.WaitKey(1)
            if c == 27 or c == 1048603 or c == 10:  #Break if user enters 'Esc'.
                break
コード例 #2
0
def Harris(image):
    gray = cv.CreateImage(cv.GetSize(image), 8, 1)
    harris = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)

    cv.CornerHarris(gray, harris, 5, 5, 0.1)

    #for y in range(0, 640):
    #    for x in range(0, 480):
    #      harris = cv.Get2D(harris_c, y, x) # get the x,y value
    #      # check the corner detector response
    #      if harris[0] > 10e-06:
    #       # draw a small circle on the original image
    #           cv.Circle(harris, (x, y), 2, cv.RGB(155, 0, 25))

    # I recommend using cvMinMaxLoc to find the min and max values and then use
    # cvConvertScale to shift the range [min..max] to [0..255] into an 8bit
    # image.

    print cv.MinMaxLoc(harris)
    gray2 = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.ConvertScale(harris, gray2)  # scale/shift?

    cv.NamedWindow('Harris')
    cv.ShowImage('Harris', gray)
コード例 #3
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        i = 1
        o_x = 0
        o_y = 0
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)
                #print track_box
                trace_val = track_box[0]
                f_x = trace_val[0]
                f_y = trace_val[1]
                print 'value1', f_x
                print 'value2', f_y
                if i % 10 == 0:
                    o_x = f_x
                    o_y = f_y
                if (f_x != o_x):
                    a = (f_x - o_x) / float(10)
                    round(a)
                    cam.Azimuth(-a)
                if (f_y != o_y):
                    a = (f_y - o_y) / float(10)
                    round(a)
                    cam.Elevation(-a)
                ren1.ResetCameraClippingRange()
                renWin.Render()
                i += 1

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
コード例 #4
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        print "hitting run section"
        x = 0
        while True:
            #print x
            #x = x + 1
            frame = cv.QueryFrame(self.capture)
            cv.Flip(frame, frame, 1)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                print self.track_window
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect
                print self.track_window
            try:
                #prints the center x and y value of the tracked ellipse
                coord = track_box[0]
                print "center = {}".format(coord)
                if (coord[0] < 320):
                    print "move right"
                # ser.write("R")
                elif (coord[0] == 320):
                    print "do nothing"
                else:
                    print "move left"
                # ser.write("L")
            except UnboundLocalError:
                print "track_box is None"

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                print track_box
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
コード例 #5
0
    def run(self):
        # Capture first frame to get size
        frame = self.get_image2()
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(frame_size, 8, 3)
        grey_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]
            print "getting Image"
            color_image = self.get_image2()
            print "got image"

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.30, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
コード例 #6
0
def detectObject(filename):
    img=cv.LoadImage(filename)
    '''
    #get color histogram
    '''
   
#     im32f=np.zeros((img.shape[:2]),np.uint32)
    hist_range=[[0,256],[0,256],[0,256]]
    im32f=cv.CreateImage(cv.GetSize(img), cv2.IPL_DEPTH_32F, 3)
    cv.ConvertScale(img, im32f)
    
    
    hist=cv.CreateHist([32,32,32],cv.CV_HIST_ARRAY,hist_range,3)
    '''
    #create three histogram'''
    b=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    g=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    r=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    
   
    '''
    #create image backproject 32f, 8u
    '''
    backproject32f=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_32F,1)
    backproject8u=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create binary
    '''
    bw=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create kernel image
    '''
    kernel=cv.CreateStructuringElementEx(3, 3, 1, 1, cv2.MORPH_ELLIPSE)
    cv.Split(im32f, b, g, r,None)

    planes=[b,g,r]
    cv.CalcHist(planes, hist)
    '''
    #find min and max histogram bin.
    '''
    minval=maxval=0.0
    min_idx=max_idx=0
    minval, maxval, min_idx, max_idx=cv.GetMinMaxHistValue(hist)
    '''
    # threshold histogram.  this sets the bin values that are below the threshold
    to zero
    '''
    cv.ThreshHist(hist, maxval/32.0)
    '''
    #backproject the thresholded histogram, backprojection should contian higher values for
    #background and lower values for the foreground
    '''
    cv.CalcBackProject(planes, backproject32f, hist)
    '''
    #convert to 8u type
    '''
    val_min=val_max=0.0
    idx_min=idx_max=0
    val_min,val_max,idx_min,idx_max=cv.MinMaxLoc(backproject32f)
    cv.ConvertScale(backproject32f, backproject8u,255.0/maxval)
    '''
    #threshold backprojected image. this gives us the background
    '''
    cv.Threshold(backproject8u, bw, 10, 255, cv2.THRESH_BINARY)
    '''
    #some morphology on background
    '''
    cv.Dilate(bw, bw,kernel,1)
    cv.MorphologyEx(bw, bw, None,kernel, cv2.MORPH_CLOSE, 2)
    '''
    #get the foreground
    '''
    cv.SubRS(bw,cv.Scalar(255,255,255),bw)
    cv.MorphologyEx(bw, bw, None, kernel,cv2.MORPH_OPEN,2)
    cv.Erode(bw, bw, kernel, 1)
    '''
    #find contours of foreground
    #Grabcut
    '''
    size=cv.GetSize(bw)
    color=np.asarray(img[:,:])
    fg=np.asarray(bw[:,:])
#     mask=cv.CreateMat(size[1], size[0], cv2.CV_8UC1)
    '''
    #Make anywhere black in the grey_image (output from MOG) as likely background
    #Make anywhere white in the grey_image (output from MOG) as definite foreground
    '''
    rect = (0,0,0,0)
   
    mat_mask=np.zeros((size[1],size[0]),dtype='uint8')
    mat_mask[:,:]=fg
    
    mat_mask[mat_mask[:,:] == 0] = 2
    mat_mask[mat_mask[:,:] == 255] = 1
    
    '''
    #Make containers 
    '''                               
    bgdModel = np.zeros((1, 13 * 5))
    fgdModel = np.zeros((1, 13 * 5))
    cv2.grabCut(color, mat_mask, rect, bgdModel, fgdModel,cv2.GC_INIT_WITH_MASK)
    '''
    #Multiple new mask by original image to get cut
    '''
    mask2 = np.where((mat_mask==0)|(mat_mask==2),0,1).astype('uint8')  
    gcfg=np.zeros((size[1],size[0]),np.uint8)
    gcfg=mask2
    
    img_cut = color*mask2[:,:,np.newaxis]

    contours, hierarchy=cv2.findContours(gcfg ,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    
    for cnt in contours:
        print cnt
        rect_box = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect_box)
        box = np.int0(box)
        cv2.drawContours(color,[box], 0,(0,0,255),2)
    cv2.imshow('demo', color)
    cv2.waitKey(0)
コード例 #7
0
	def detect(self):	

		self.log("Start detection thread")
		### open cam ###
		cap = vc.VideoCapture(self._videoInput, self).capture
		if cap == None:
			self.log("Can't open camera!")
			self.active = False
			return
		
		cap.setFPS(self._fps)
		cap.setVideoSize(self._videoSize)
		cap.setBin(self._bin)
		cap.setGain(self._gain)
		
		
		### init ###
		frame, ts = cap.getFrame()
		
		if not frame:
			self.log("Error reading first frame")
			self.active = False
			return
		frameSize = cv.GetSize(frame)
		
		darkframe = None
		if self._darkframe:
			try:
				darkframe = cv.LoadImage(self._darkframe)
				if frameSize != cv.GetSize(darkframe):
					darkframe = None
					self.log("Darkframe has wrong size")
			except:
				self.log("Darkframe not found")
				
		mask = None
		'''
		if self._mask:
			try:
				tmp = cv.LoadImage(self._mask)
				mask = cv.CreateImage( frameSize, cv.IPL_DEPTH_8U, 1)
				cv.CvtColor( tmp, mask, cv.CV_RGB2GRAY )
				if frameSize != cv.GetSize(mask):
					raise Exception();
			except:
				self.log("Mask not found or wrong size")
		'''
		small, smallSize = self.workingThumb(frame, frameSize)
		runAvg = cv.CreateImage( smallSize, cv.IPL_DEPTH_32F, small.channels)
		runAvgDisplay = cv.CreateImage(smallSize, small.depth, small.channels)
		differenceImg = cv.CreateImage(smallSize, small.depth, small.channels)
		grayImg = cv.CreateImage(smallSize, cv.IPL_DEPTH_8U, 1)
		historyBuffer = imagestack.Imagestack(self._prevFrames)
		capbuf = None
		videoGap = self._maxVideoGap
		postFrames = 0
		frameCount = 1
		detect = False
		newVideo = True
		
		### testwindow
		if self._showWindow:
			self.log("Show window")
			cv.NamedWindow("Thread " + str(self._thread), 1)
			
		### server
		if self._runServer:
			self.log("Start Server on port %d" % self._serverPort)
			self._server = httpserver.httpserver(self._serverPort)

		### capture loop ###
		while self._run:
			frame, ts = cap.getFrame()
			if ts == None:
				ts = time.time()
				
			### create small image for detection
			small, smallSize = self.workingThumb(frame, frameSize)
			
			videoGap += 1

			if small:
				frameCount += 1

				if 1/float(frameCount) < self._avgLevel and not detect:
					self.log("start detection")
					detect = True
				
				### substract darkframe
				if darkframe:
					cv.Sub( frame, darkframe, frame )
	
				if self._deNoiseLevel > 0:
					cv.Smooth( small, small, cv.CV_MEDIAN, self._deNoiseLevel)
				cv.RunningAvg( small, runAvg, self._avgLevel, mask )
				cv.ConvertScale( runAvg, runAvgDisplay, 1.0, 0.0 )
				cv.AbsDiff( small, runAvgDisplay, differenceImg )
				
				if differenceImg.depth == grayImg.depth:
					grayImg = differenceImg
				else:
					cv.CvtColor( differenceImg, grayImg, cv.CV_RGB2GRAY )
				cv.Threshold( grayImg, grayImg, self._detectThresh, 255, cv.CV_THRESH_BINARY )
				contour = cv.FindContours( grayImg, cv.CreateMemStorage(0), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)	
				

				## draw bounding rect
				while contour:
					bounding_rect = cv.BoundingRect( list(contour) )
					area = bounding_rect[2]*bounding_rect[3]
					if area > self._minRectArea and area < self._maxRectArea and detect:
						videoGap = 0
						#print(str(area))
						self.log("motion detected...")
						if self._drawRect:
							self.drawBoundingRect(frame, bounding_rect, smallSize = smallSize)

					contour = contour.h_next()
		
				## add text notations
				ms = "%04d" % int( (ts-int(ts)) * 10000 )
				t = time.strftime("%Y-%m-%d %H:%M:%S." + ms + " UTC", time.gmtime(ts))
				frame = self.addText(frame, self._texttl, t)

				
				## save / show frame
				if videoGap < self._maxVideoGap:
					if newVideo:
						self.log("Found motion, start capturing")
						capbuf = []
						newVideo = False						
						directory = os.path.join(self._videoDir,
											"%d/%02d/%s" % (time.gmtime(ts).tm_year, time.gmtime(ts).tm_mon, t))

						if not self.isWriteable(directory):
							self._log(directory + "is not writeable!")
							self._run = False
							continue

						capbuf.extend(historyBuffer.getImages())

					capbuf.append({'img' : frame, 'time' : t})
				else:
					if postFrames < self._postFrames and not newVideo:
						capbuf.append({'img' : frame, 'time' : t})
						postFrames += 1
					elif not newVideo:
						self.log("Stop capturing")
						### write images to hdd in new thread ###
						thread.start_new(self.saveVideo, (directory, capbuf))
						capbuf = None
						postFrames = 0
						newVideo = True


				######## Add Frame to history buffer ########
				historyBuffer.add(cv.CloneImage( frame ), t)


				######## Window ########
				if self._showWindow:
					cv.ShowImage("Thread " + str(self._thread), frame)
					cv.ShowImage("Thread %d avg" % self._thread, runAvgDisplay)
					cv.ShowImage("Thread %d diff" % self._thread, differenceImg)
					cv.WaitKey(1)
				
				######## Update Server ########
				if self._server:
					self._server.updateImage(cv.CloneImage( frame ))
				
				self.log("Proc: " + str(time.time() - ts))

			else:
				self.log("no more frames (" + str(frameCount) +" frames)")
				break

		self.log("Close camera")
		cap.close()
		
		if self._server:
			self.log("Close server")
			self._server.shutdown()
		
		self.log("end detection thread " + str(self._thread))
		self.active = False
コード例 #8
0
    def run(self):
        # Initialize
        log_file_name = "tracker_output.log"
        log_file = open( log_file_name, 'a' )
            #fps = 25

        #cap = cv2.VideoCapture( '../000104-.avi'

        frame = cv.QueryFrame( self.capture )
        frame_size = cv.GetSize( frame )
        foreground = cv.CreateImage(cv.GetSize(frame),8,1)
        foremat = cv.GetMat(foreground)
        Nforemat = numpy.array(foremat, dtype=numpy.float32)
	gfilter=sys.argv[2]
	gfilter_string=gfilter
	gfilter=float(gfilter)
	print "Processing Tracker with filter: " + str(gfilter)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame( self.capture )

	# Create Background Subtractor
        fgbg = cv2.BackgroundSubtractorMOG()

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )


        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage( display_image )


        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)


        # The difference between the running average and the current frame:
        difference = cv.CloneImage( display_image )


        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook=[]
        frame_count=0
        last_frame_entity_list = []
        fps = 25


        t0 = 165947


        # For toggling display:
        image_list = [ "camera", "shadow", "white", "threshold", "display", "yellow" ]
        image_index = 0   # Index into image_list




        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
        text_coord = ( 5, 15 )
        text_color = cv.CV_RGB(255,255,255)
        text_coord2 = ( 5, 30 )
        text_coord3 = ( 5, 45 )

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'C:/OpenCV2.2/data/haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )


        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 20


        while True:


            # Capture frame from webcam
            camera_image = cv.QueryFrame( self.capture )
            #ret, frame = cap.read()

            frame_count += 1
            frame_t0 = time.time()
            mat = cv.GetMat(camera_image)
            Nmat = numpy.array(mat, dtype=numpy.uint8)


            # Create an image with interactive feedback:
            display_image = cv.CloneImage( camera_image )

            # NEW INSERT - FGMASK
            fgmask = fgbg.apply(Nmat,Nforemat,-1)
            fgmask = cv.fromarray(fgmask)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage( display_image )


            # Smooth to get rid of false positives
            cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 ) #Changed from 19 AND MADE MEDIAN FILTER


            # Smooth to get rid of false positives

#            color_image = numpy.asarray( cv.GetMat( color_image ) )
#            (mu, sigma) = cv2.meanStdDev(color_image)
#            edges = cv2.Canny(color_image, mu - sigma, mu + sigma)
#            lines = cv2.HoughLines(edges, 1, pi / 180, 70)


            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg( color_image, running_average_image, gfilter, None )


            # Convert the scale of the moving average.
            cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )


            # Subtract the current frame from the moving average.
            cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )


            # Convert the image to greyscale.
            cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
            # Smooth Before thresholding
            cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 19 )
            # Threshold the image to a black and white motion mask:
            cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
            # Smooth and threshold again to eliminate "sparkles"
            #cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )  #changed from 19 - AND put smooth before threshold
            cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)


            grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
            non_black_coords_array = numpy.where( grey_image_as_array > 3 )
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )

            frame_hsv = cv.CreateImage(cv.GetSize(color_image),8,3)
            cv.CvtColor(color_image,frame_hsv,cv.CV_BGR2HSV)
            imgthreshold_yellow=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white2=cv.CreateImage(cv.GetSize(color_image),8,1)
            cv.InRangeS(frame_hsv,cv.Scalar(0,0,196),cv.Scalar(255,255,255),imgthreshold_white)  # changed scalar from 255,15,255 to 255,255,255
            cv.InRangeS(frame_hsv,cv.Scalar(41,43,224),cv.Scalar(255,255,255),imgthreshold_white2)
            cv.InRangeS(frame_hsv,cv.Scalar(20,100,100),cv.Scalar(30,255,255),imgthreshold_yellow)
            #cvCvtColor(color_image, yellowHSV, CV_BGR2HSV)
            #lower_yellow = np.array([10, 100, 100], dtype=np.uint8)
            #upper_yellow = np.array([30, 255, 255], dtype=np.uint8)
            #mask_yellow = cv2.inRange(yellowHSV, lower_yellow, upper_yellow)
            #res_yellow = cv2.bitwise_and(color_image, color_image, mask_yellow = mask_yellow)


            points = []   # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []


            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )

            i=0
            while contour:

#                c = contour[i]
#                m = cv2.moments(c)
#                Area  = m['m00']
#                print( Area )

                bounding_rect = cv.BoundingRect( list(contour) )
                point1 = ( bounding_rect[0], bounding_rect[1] )
                point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )


                bounding_box_list.append( ( point1, point2 ) )
                polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )


                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)


                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
                cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
#        if Area > 3000:
#            cv2.drawContours(imgrgb,[cnt],0,(255,255,255),2)
#            print(Area)

                i=i+1
                contour = contour.h_next()




            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append( box_width * box_height )


                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)


            average_box_area = 0.0
            if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)


            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]


                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )


            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )


            bounding_box_list = merge_collided_bboxes( trimmed_box_list )


            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )


            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len( bounding_box_list )


            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.


            if frame_t0 - last_target_change_t < .35:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0


            # Clip to the user-supplied maximum:
            estimated_target_count = min( estimated_target_count, max_targets )


            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.


            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []


            if len(points):


                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max( estimated_target_count, 1 )  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook


                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans( array( points ), k_or_guess )


                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = ( int(center_point[0]), int(center_point[1]) )
                    center_points.append( center_point )
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)


            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []


            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []


                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :


                        # This point is within the box.
                        center_points_in_box.append( center_point )


                # Now see if there are more than one.  If so, merge them.
                if len( center_points_in_box ) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])


                    average_x = int( float(sum( x_list )) / len( x_list ) )
                    average_y = int( float(sum( y_list )) / len( y_list ) )


                    trimmed_center_points.append( (average_x, average_y) )


                    # Record that they were removed:
                    removed_center_points += center_points_in_box


                if len( center_points_in_box ) == 1:
                    trimmed_center_points.append( center_points_in_box[0] ) # Just use it.


            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append( center_point )


            # Draw what we found:
            #for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)


            # Determine if there are any new (or lost) targets:
            actual_target_count = len( trimmed_center_points )
            last_target_count = actual_target_count


            # Now build the list of physical entities (objects)
            this_frame_entity_list = []


            # An entity is list: [ name, color, last_time_seen, last_known_coords ]


            for target in trimmed_center_points:


                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}


                for entity in last_frame_entity_list:


                    entity_coords= entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]


                    distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
                    entity_distance_dict[ distance ] = entity


                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()


                for distance in distance_list:


                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]


                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] ) #Commented Out 3/20/2016
                        continue


                    #print "Target %s pixel(b,g,r) : USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1]) # Commented Out 3/20/2016
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append( nearest_possible_entity )
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_count, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break


                if entity_found == False:
                    # It's a new entity.
                    color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
                    name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
                    last_time_seen = frame_t0
                    if imgthreshold_white[target[1],target[0]] == 0.0:
                        # It's a real detect (not a line marker)

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_white[target[1],target[0]] == 255.0:
                        # It's a white line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("white-line-image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_yellow[target[1],target[0]] == 255.0:
                        # It's a yellow line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
                        cv.SaveImage("yellow-line-image-test%s.png"%(new_entity[0]), camera_image)

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.


            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_count, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append( entity )


            # For next frame:
            last_frame_entity_list = this_frame_entity_list


            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)




            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break


            # Toggle which image to show
            if chr(c) == 'd':
                image_index = ( image_index + 1 ) % len( image_list )


            image_name = image_list[ image_index ]


            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
            elif image_name == "shadow":
                image = fgmask
                cv.PutText( image, "No Shadow", text_coord, text_font, text_color )
            elif image_name == "white":
                #image = difference
                image = imgthreshold_white
                cv.PutText( image, "White Threshold", text_coord, text_font, text_color )
            elif image_name == "display":
                #image = display_image
                image = display_image
                cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
                cv.PutText( image, str(frame_t0), text_coord2, text_font, text_color )
                cv.PutText( image, str(frame_count), text_coord3, text_font, text_color )
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
                image = display_image  # Re-use display image here
                cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
            elif image_name == "yellow":
                # Do face detection
                #detect_faces( camera_image, haar_cascade, mem_storage )
                image = imgthreshold_yellow  # Re-use camera image here
                cv.PutText( image, "Yellow Threshold", text_coord, text_font, text_color )


            #cv.ShowImage( "Target", image )		Commented out 3/19

#            self.writer.write( image )
#            out.write( image );
#            cv.WriteFrame( self.writer, image );
#            if self.writer:
#                cv.WriteFrame( self.writer, image );
#                video.write( image );

            log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
#            if not self.writer:
#                delta_t = frame_t1 - frame_t0
#                if delta_t < ( 1.0 / 15.0 ): time.sleep( ( 1.0 / 15.0 ) - delta_t ):

            if frame_count == 155740:
                cv2.destroyWindow("Target")
#                    cv.ReleaseVideoWriter()
#                    self.writer.release()
#                    log_file.flush()
                break

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float( frame_count ) / time_delta
        print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )