Ejemplo n.º 1
0
    def test_divider(self):
        image = cv.LoadImage(
            os.path.abspath(os.environ['BORG'] +
                            '/Brain/data/hog_test/noface.jpg'))
        subRect = (0, 0, 250, 187)
        subimage = self.divider.crop(image, subRect)
        subimage1 = image

        result = self.divider.divide(image, 2, 2)

        if cv.GetSize(subimage) != cv.GetSize(result[0]):
            self.fail(
                "The subimage sizes are not correct. Either correctly crop the image manually or check divider function"
            )

        dif = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro")
        print len(result)
        print result

        dif = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage1, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro", overlap=10)
Ejemplo n.º 2
0
Archivo: lrf.py Proyecto: Mnemonic7/lrf
def difference_image(img1, img2):
    print " simg1 = simplify(img1)"
    simg1 = simplify(img1)
    print " simg2 = simplify(img2)"
    simg2 = simplify(img2)

    #dbg_image('simg1',simg1)
    #dbg_image('simg2',simg2)

    #create image buffers
    img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    simg3 = cv.CloneImage(img3)
    bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    eimg3 = cv.CloneImage(bitimage)

    #process
    print " cv.AbsDiff(simg2,simg1,img3)"
    cv.AbsDiff(simg2, simg1, img3)
    print " cv.Smooth(img3,simg3)"
    cv.Smooth(img3, simg3)
    #dbg_image('simg3',simg3)
    # these threshold values must be calibrated
    #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV)
    print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)"
    cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY)
    #dbg_image('bitimage',bitimage)
    print " cv.Erode(bitimage,eimg3)"
    cv.Erode(bitimage, eimg3)
    #dbg_image('eimg3',eimg3)
    return eimg3
Ejemplo n.º 3
0
    def processImage(self, curframe):
        cv.Smooth(curframe, curframe)  #Remove false positives

        if not self.absdiff_frame:  #For the first time put values in difference, temp and moving_average
            self.absdiff_frame = cv.CloneImage(curframe)
            self.previous_frame = cv.CloneImage(curframe)
            cv.Convert(
                curframe, self.average_frame
            )  #Should convert because after runningavg take 32F pictures
        else:
            cv.RunningAvg(curframe, self.average_frame,
                          0.05)  #Compute the average

        cv.Convert(self.average_frame,
                   self.previous_frame)  #Convert back to 8U frame

        cv.AbsDiff(curframe, self.previous_frame,
                   self.absdiff_frame)  # moving_average - curframe

        cv.CvtColor(
            self.absdiff_frame, self.gray_frame,
            cv.CV_RGB2GRAY)  #Convert to gray otherwise can't do threshold
        cv.Threshold(self.gray_frame, self.gray_frame, 50, 255,
                     cv.CV_THRESH_BINARY)

        cv.Dilate(self.gray_frame, self.gray_frame, None,
                  15)  #to get object blobs
        cv.Erode(self.gray_frame, self.gray_frame, None, 10)
Ejemplo n.º 4
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
Ejemplo n.º 5
0
    def processImage(self, frame):
        cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)

        #Absdiff to get the difference between to the frames
        cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)

        #Remove the noise and do the threshold
        cv.Smooth(self.res, self.res, cv.CV_BLUR, 5, 5)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
        cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)
Ejemplo n.º 6
0
	def process_image(self):
		"""Process the image from the camera in order to remove noise."""
		
		cv.CvtColor(self.colorFrame, self.currentGrayFrame, cv.CV_RGB2GRAY)
		
		# remove noise, etc.
		self.currentGrayFrame = self.reduce_image_noise(self.currentGrayFrame)
		
		# find the difference between the current and previous frame
		cv.AbsDiff(self.currentGrayFrame, self.previousGrayFrame, self.resultImage)
		
		# calculate the binary image that shows where there is change in the image
		cv.Threshold(self.resultImage, self.resultImage, 10, 255, cv.CV_THRESH_BINARY_INV)
		
		cv.Copy(self.currentGrayFrame, self.previousGrayFrame)
Ejemplo n.º 7
0
    def motionDetect(self, img):
        cv.Smooth(img, img, cv.CV_GAUSSIAN, 3, 0)

        cv.RunningAvg(img, self.movingAvg, 0.020, None)
        cv.ConvertScale(self.movingAvg, self.tmp, 1.0, 0.0)
        cv.AbsDiff(img, self.tmp, self.diff)
        cv.CvtColor(self.diff, self.grayImage, cv.CV_RGB2GRAY)
        cv.Threshold(self.grayImage, self.grayImage, 70,255, cv.CV_THRESH_BINARY)
        cv.Dilate(self.grayImage, self.grayImage, None, 18)#18   
        cv.Erode(self.grayImage, self.grayImage, None, 10)#10
        storage = cv.CreateMemStorage(0)
        contour = cv.FindContours(self.grayImage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
#        points = []                                                                                      
        while contour:
            boundRect = cv.BoundingRect(list(contour))
            contour = contour.h_next()
            pt1 = (boundRect[0], boundRect[1])
            pt2 = (boundRect[0] + boundRect[2], boundRect[1] + boundRect[3])
            cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,255,0), 1)

        return img
Ejemplo n.º 8
0
    def processaImagem(self):
        """
        Crio uma imagem cinza a partir da atual para o programa ficar mais rapido, crio uma imagem com a
        diferenca da imagem anterior e a imagem atual, e binarizo a imagem cinza para filtrar pixels pequenos.
        """
        # Remove os falsos positivos.
        cv.Smooth(self.imagem_atual, self.imagem_atual)

        # Aqui eu coloco um tempo de execucao entre as imagens.
        cv.RunningAvg(self.imagem_atual, self.imagem_auxiliar, 0.05)

        # Covertendo de volta a imagem para poder trabalhar.
        cv.Convert(self.imagem_auxiliar, self.imagem_anterior)

        # Cria uma nova imagem com a diferenca entre a imagem anterior e a atual.
        cv.AbsDiff(self.imagem_atual, self.imagem_anterior, self.imagem_diferenca)

        # Converte a imagem atual em escala de cinza.
        cv.CvtColor(self.imagem_diferenca, self.imagem_cinza, cv.CV_RGB2GRAY)

        # Binariza a imagem. Para poder filtrar pixels pequenos.
        cv.Threshold(self.imagem_cinza, self.imagem_cinza, 50, 255, cv.CV_THRESH_BINARY)
import cv2.cv as cv  #or simply import cv

im = cv.LoadImage("../img/lena.jpg")
im2 = cv.LoadImage("../img/fruits-larger.jpg")
cv.ShowImage("Image1", im)
cv.ShowImage("Image2", im2)

res = cv.CreateImage(cv.GetSize(im2), 8, 3)

cv.Add(
    im, im2, res
)  #Add every pixels together (black is 0 so low change and white overload anyway)
cv.ShowImage("Add", res)

cv.AbsDiff(im, im2, res)  # Like minus for each pixel im(i) - im2(i)
cv.ShowImage("AbsDiff", res)

cv.Mul(im, im2, res)  #Multiplie each pixels (almost white)
cv.ShowImage("Mult", res)

cv.Div(im, im2,
       res)  #Values will be low so the image will likely to be almost black
cv.ShowImage("Div", res)

cv.And(im, im2, res)  #Bit and for every pixels
cv.ShowImage("And", res)

cv.Or(im, im2, res)  # Bit or for every pixels
cv.ShowImage("Or", res)

cv.Not(im, res)  # Bit not of an image
Ejemplo n.º 10
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        #nframes =+ 1

        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        def totuple(a):
            try:
                return tuple(totuple(i) for i in a)
            except TypeError:
                return a

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, .1, None)
                cv.ShowImage("BG", moving_average)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            #cv.ShowImage("BG",difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.ShowImage("BG1", grey_image)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY)
            #cv.ShowImage("BG2", grey_image)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 8)
            cv.Erode(grey_image, grey_image, None, 3)
            cv.ShowImage("BG3", grey_image)

            storage = cv.CreateMemStorage(0)
            global contour
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            points = []

            while contour:
                global bound_rect
                bound_rect = cv.BoundingRect(list(contour))
                polygon_points = cv.ApproxPoly(list(contour), storage,
                                               cv.CV_POLY_APPROX_DP)
                contour = contour.h_next()

                global pt1, pt2
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])

                #size control
                if (bound_rect[0] - bound_rect[2] >
                        10) and (bound_rect[1] - bound_rect[3] > 10):

                    points.append(pt1)
                    points.append(pt2)

                    #points += list(polygon_points)
                    global box, box2, box3, box4, box5
                    box = cv.MinAreaRect2(polygon_points)
                    box2 = cv.BoxPoints(box)
                    box3 = np.int0(np.around(box2))
                    box4 = totuple(box3)
                    box5 = box4 + (box4[0], )

                    cv.FillPoly(grey_image, [
                        list(polygon_points),
                    ], cv.CV_RGB(255, 255, 255), 0, 0)
                    cv.PolyLine(color_image, [
                        polygon_points,
                    ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                    cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2)
                    #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

                    if len(points):
                        #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                        center1 = (pt1[0] + pt2[0]) / 2
                        center2 = (pt1[1] + pt2[1]) / 2
                        #print center1, center2, center_point
                        #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                        #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                        #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                        cv.Circle(color_image, (center1, center2), 5,
                                  cv.CV_RGB(0, 0, 255), -1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                #cv.DestroyAllWindows()
                break
Ejemplo n.º 11
0
	def detect(self):	

		self.log("Start detection thread")
		### open cam ###
		cap = vc.VideoCapture(self._videoInput, self).capture
		if cap == None:
			self.log("Can't open camera!")
			self.active = False
			return
		
		cap.setFPS(self._fps)
		cap.setVideoSize(self._videoSize)
		cap.setBin(self._bin)
		cap.setGain(self._gain)
		
		
		### init ###
		frame, ts = cap.getFrame()
		
		if not frame:
			self.log("Error reading first frame")
			self.active = False
			return
		frameSize = cv.GetSize(frame)
		
		darkframe = None
		if self._darkframe:
			try:
				darkframe = cv.LoadImage(self._darkframe)
				if frameSize != cv.GetSize(darkframe):
					darkframe = None
					self.log("Darkframe has wrong size")
			except:
				self.log("Darkframe not found")
				
		mask = None
		'''
		if self._mask:
			try:
				tmp = cv.LoadImage(self._mask)
				mask = cv.CreateImage( frameSize, cv.IPL_DEPTH_8U, 1)
				cv.CvtColor( tmp, mask, cv.CV_RGB2GRAY )
				if frameSize != cv.GetSize(mask):
					raise Exception();
			except:
				self.log("Mask not found or wrong size")
		'''
		small, smallSize = self.workingThumb(frame, frameSize)
		runAvg = cv.CreateImage( smallSize, cv.IPL_DEPTH_32F, small.channels)
		runAvgDisplay = cv.CreateImage(smallSize, small.depth, small.channels)
		differenceImg = cv.CreateImage(smallSize, small.depth, small.channels)
		grayImg = cv.CreateImage(smallSize, cv.IPL_DEPTH_8U, 1)
		historyBuffer = imagestack.Imagestack(self._prevFrames)
		capbuf = None
		videoGap = self._maxVideoGap
		postFrames = 0
		frameCount = 1
		detect = False
		newVideo = True
		
		### testwindow
		if self._showWindow:
			self.log("Show window")
			cv.NamedWindow("Thread " + str(self._thread), 1)
			
		### server
		if self._runServer:
			self.log("Start Server on port %d" % self._serverPort)
			self._server = httpserver.httpserver(self._serverPort)

		### capture loop ###
		while self._run:
			frame, ts = cap.getFrame()
			if ts == None:
				ts = time.time()
				
			### create small image for detection
			small, smallSize = self.workingThumb(frame, frameSize)
			
			videoGap += 1

			if small:
				frameCount += 1

				if 1/float(frameCount) < self._avgLevel and not detect:
					self.log("start detection")
					detect = True
				
				### substract darkframe
				if darkframe:
					cv.Sub( frame, darkframe, frame )
	
				if self._deNoiseLevel > 0:
					cv.Smooth( small, small, cv.CV_MEDIAN, self._deNoiseLevel)
				cv.RunningAvg( small, runAvg, self._avgLevel, mask )
				cv.ConvertScale( runAvg, runAvgDisplay, 1.0, 0.0 )
				cv.AbsDiff( small, runAvgDisplay, differenceImg )
				
				if differenceImg.depth == grayImg.depth:
					grayImg = differenceImg
				else:
					cv.CvtColor( differenceImg, grayImg, cv.CV_RGB2GRAY )
				cv.Threshold( grayImg, grayImg, self._detectThresh, 255, cv.CV_THRESH_BINARY )
				contour = cv.FindContours( grayImg, cv.CreateMemStorage(0), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)	
				

				## draw bounding rect
				while contour:
					bounding_rect = cv.BoundingRect( list(contour) )
					area = bounding_rect[2]*bounding_rect[3]
					if area > self._minRectArea and area < self._maxRectArea and detect:
						videoGap = 0
						#print(str(area))
						self.log("motion detected...")
						if self._drawRect:
							self.drawBoundingRect(frame, bounding_rect, smallSize = smallSize)

					contour = contour.h_next()
		
				## add text notations
				ms = "%04d" % int( (ts-int(ts)) * 10000 )
				t = time.strftime("%Y-%m-%d %H:%M:%S." + ms + " UTC", time.gmtime(ts))
				frame = self.addText(frame, self._texttl, t)

				
				## save / show frame
				if videoGap < self._maxVideoGap:
					if newVideo:
						self.log("Found motion, start capturing")
						capbuf = []
						newVideo = False						
						directory = os.path.join(self._videoDir,
											"%d/%02d/%s" % (time.gmtime(ts).tm_year, time.gmtime(ts).tm_mon, t))

						if not self.isWriteable(directory):
							self._log(directory + "is not writeable!")
							self._run = False
							continue

						capbuf.extend(historyBuffer.getImages())

					capbuf.append({'img' : frame, 'time' : t})
				else:
					if postFrames < self._postFrames and not newVideo:
						capbuf.append({'img' : frame, 'time' : t})
						postFrames += 1
					elif not newVideo:
						self.log("Stop capturing")
						### write images to hdd in new thread ###
						thread.start_new(self.saveVideo, (directory, capbuf))
						capbuf = None
						postFrames = 0
						newVideo = True


				######## Add Frame to history buffer ########
				historyBuffer.add(cv.CloneImage( frame ), t)


				######## Window ########
				if self._showWindow:
					cv.ShowImage("Thread " + str(self._thread), frame)
					cv.ShowImage("Thread %d avg" % self._thread, runAvgDisplay)
					cv.ShowImage("Thread %d diff" % self._thread, differenceImg)
					cv.WaitKey(1)
				
				######## Update Server ########
				if self._server:
					self._server.updateImage(cv.CloneImage( frame ))
				
				self.log("Proc: " + str(time.time() - ts))

			else:
				self.log("no more frames (" + str(frameCount) +" frames)")
				break

		self.log("Close camera")
		cap.close()
		
		if self._server:
			self.log("Close server")
			self._server.shutdown()
		
		self.log("end detection thread " + str(self._thread))
		self.active = False
Ejemplo n.º 12
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )

        print "hello"

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)

        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 3  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            cv.ShowImage("difference ", difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            #             cv.SegmentMotion(non_black_coords_array, None, storage, timestamp, seg_thresh)

            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show


#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#
#             image_name = image_list[ image_index ]
#
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )

            image1 = display_image

            cv.ShowImage("Target 1", image1)

            #             if self.writer:
            #                 cv.WriteFrame( self.writer, image );

            # log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
Ejemplo n.º 13
0
start_time = time.time()

#take two pictures
call([
    "sudo raspistill -n -t 0 -h 324 -w 432 -o /home/pi/opencv_programs/Pictures/output.jpg"
],
     shell=True)
time.sleep(5)
call([
    "sudo raspistill -n -t 0 -h 324 -w 432 -o /home/pi/opencv_programs/Pictures/output2.jpg"
],
     shell=True)

#load two pictures and convert to greyscale
image1 = cv.CaptureFromFile("output.jpg")
image2 = cv.CaptureFromFile("output2.jpg")
query1 = cv.QueryFrame(image1)
query2 = cv.QueryFrame(image2)
grey1 = cv.CreateImage(cv.GetSize(query1), query1.depth, 1)
grey2 = cv.CreateImage(cv.GetSize(query2), query2.depth, 1)
cv.CvtColor(query1, grey1, cv.CV_RGB2GRAY)
cv.CvtColor(query2, grey2, cv.CV_RGB2GRAY)

#Absolute Difference of the two images
difference = cv.CreateImage(cv.GetSize(query1), query1.depth, 1)
cv.AbsDiff(grey1, grey2, difference)
cv.SaveImage("Difference.jpg", difference)

curr_time = time.time()
print curr_time - start_time
Ejemplo n.º 14
0
frame1gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY)

res = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)

frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)

w = frame2gray.width
h = frame2gray.height
nb_pixels = frame2gray.width * frame2gray.height

while True:
    frame2 = cv.QueryFrame(capture)
    cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY)

    cv.AbsDiff(frame1gray, frame2gray, res)
    cv.ShowImage("After AbsDiff", res)

    cv.Smooth(res, res, cv.CV_BLUR, 5, 5)
    element = cv.CreateStructuringElementEx(5 * 2 + 1, 5 * 2 + 1, 5, 5,
                                            cv.CV_SHAPE_RECT)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE)
    cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV)

    cv.ShowImage("Image", frame2)
    cv.ShowImage("Res", res)

    # -----------
    nb = 0
    for y in range(h):
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  # Surface area of the image
        cursurface = 0  # Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  # Remove false positives

            if not difference:  # For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  # Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  # to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  # Save contours

            while contours:  # For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  # Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
                ring = IntrusionAlarm()
                ring.run()

            # print avg,"%"
            cursurface = 0  # Put back the current surface to 0

            # Draw the contours on the image
            _red = (0, 0, 255)
            # Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  # 1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Virtual Eye", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            elif c == 99:
                cv2.destroyWindow('Warning!!!')
Ejemplo n.º 16
0
    def run(self):
        # Capture first frame to get size
        frame = self.get_image2()
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(frame_size, 8, 3)
        grey_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]
            print "getting Image"
            color_image = self.get_image2()
            print "got image"

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.30, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Ejemplo n.º 17
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )

        print "hello"

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)

        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 3  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            #             cv.ShowImage("background ", running_average_image)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            cv.ShowImage("difference ", difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)

            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            levels = 10
            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                # points += list(polygon_points)

                # Draw the contours:
                cv.DrawContours(color_image, contour, cv.CV_RGB(255, 0, 0),
                                cv.CV_RGB(0, 255, 0), levels, 3, 0, (0, 0))
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            # for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                # points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            # for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show


#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#
#             image_name = image_list[ image_index ]
#
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )

            image1 = display_image

            cv.ShowImage("Target 1", image1)

            #             if self.writer:
            #                 cv.WriteFrame( self.writer, image );

            # log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
Ejemplo n.º 18
0
    def run(self):
        # Initialize
        log_file_name = "tracker_output.log"
        log_file = open( log_file_name, 'a' )
            #fps = 25

        #cap = cv2.VideoCapture( '../000104-.avi'

        frame = cv.QueryFrame( self.capture )
        frame_size = cv.GetSize( frame )
        foreground = cv.CreateImage(cv.GetSize(frame),8,1)
        foremat = cv.GetMat(foreground)
        Nforemat = numpy.array(foremat, dtype=numpy.float32)
	gfilter=sys.argv[2]
	gfilter_string=gfilter
	gfilter=float(gfilter)
	print "Processing Tracker with filter: " + str(gfilter)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame( self.capture )

	# Create Background Subtractor
        fgbg = cv2.BackgroundSubtractorMOG()

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )


        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage( display_image )


        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)


        # The difference between the running average and the current frame:
        difference = cv.CloneImage( display_image )


        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook=[]
        frame_count=0
        last_frame_entity_list = []
        fps = 25


        t0 = 165947


        # For toggling display:
        image_list = [ "camera", "shadow", "white", "threshold", "display", "yellow" ]
        image_index = 0   # Index into image_list




        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
        text_coord = ( 5, 15 )
        text_color = cv.CV_RGB(255,255,255)
        text_coord2 = ( 5, 30 )
        text_coord3 = ( 5, 45 )

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'C:/OpenCV2.2/data/haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )


        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 20


        while True:


            # Capture frame from webcam
            camera_image = cv.QueryFrame( self.capture )
            #ret, frame = cap.read()

            frame_count += 1
            frame_t0 = time.time()
            mat = cv.GetMat(camera_image)
            Nmat = numpy.array(mat, dtype=numpy.uint8)


            # Create an image with interactive feedback:
            display_image = cv.CloneImage( camera_image )

            # NEW INSERT - FGMASK
            fgmask = fgbg.apply(Nmat,Nforemat,-1)
            fgmask = cv.fromarray(fgmask)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage( display_image )


            # Smooth to get rid of false positives
            cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 ) #Changed from 19 AND MADE MEDIAN FILTER


            # Smooth to get rid of false positives

#            color_image = numpy.asarray( cv.GetMat( color_image ) )
#            (mu, sigma) = cv2.meanStdDev(color_image)
#            edges = cv2.Canny(color_image, mu - sigma, mu + sigma)
#            lines = cv2.HoughLines(edges, 1, pi / 180, 70)


            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg( color_image, running_average_image, gfilter, None )


            # Convert the scale of the moving average.
            cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )


            # Subtract the current frame from the moving average.
            cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )


            # Convert the image to greyscale.
            cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
            # Smooth Before thresholding
            cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 19 )
            # Threshold the image to a black and white motion mask:
            cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
            # Smooth and threshold again to eliminate "sparkles"
            #cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )  #changed from 19 - AND put smooth before threshold
            cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)


            grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
            non_black_coords_array = numpy.where( grey_image_as_array > 3 )
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )

            frame_hsv = cv.CreateImage(cv.GetSize(color_image),8,3)
            cv.CvtColor(color_image,frame_hsv,cv.CV_BGR2HSV)
            imgthreshold_yellow=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white2=cv.CreateImage(cv.GetSize(color_image),8,1)
            cv.InRangeS(frame_hsv,cv.Scalar(0,0,196),cv.Scalar(255,255,255),imgthreshold_white)  # changed scalar from 255,15,255 to 255,255,255
            cv.InRangeS(frame_hsv,cv.Scalar(41,43,224),cv.Scalar(255,255,255),imgthreshold_white2)
            cv.InRangeS(frame_hsv,cv.Scalar(20,100,100),cv.Scalar(30,255,255),imgthreshold_yellow)
            #cvCvtColor(color_image, yellowHSV, CV_BGR2HSV)
            #lower_yellow = np.array([10, 100, 100], dtype=np.uint8)
            #upper_yellow = np.array([30, 255, 255], dtype=np.uint8)
            #mask_yellow = cv2.inRange(yellowHSV, lower_yellow, upper_yellow)
            #res_yellow = cv2.bitwise_and(color_image, color_image, mask_yellow = mask_yellow)


            points = []   # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []


            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )

            i=0
            while contour:

#                c = contour[i]
#                m = cv2.moments(c)
#                Area  = m['m00']
#                print( Area )

                bounding_rect = cv.BoundingRect( list(contour) )
                point1 = ( bounding_rect[0], bounding_rect[1] )
                point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )


                bounding_box_list.append( ( point1, point2 ) )
                polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )


                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)


                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
                cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
#        if Area > 3000:
#            cv2.drawContours(imgrgb,[cnt],0,(255,255,255),2)
#            print(Area)

                i=i+1
                contour = contour.h_next()




            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append( box_width * box_height )


                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)


            average_box_area = 0.0
            if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)


            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]


                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )


            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )


            bounding_box_list = merge_collided_bboxes( trimmed_box_list )


            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )


            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len( bounding_box_list )


            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.


            if frame_t0 - last_target_change_t < .35:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0


            # Clip to the user-supplied maximum:
            estimated_target_count = min( estimated_target_count, max_targets )


            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.


            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []


            if len(points):


                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max( estimated_target_count, 1 )  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook


                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans( array( points ), k_or_guess )


                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = ( int(center_point[0]), int(center_point[1]) )
                    center_points.append( center_point )
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)


            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []


            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []


                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :


                        # This point is within the box.
                        center_points_in_box.append( center_point )


                # Now see if there are more than one.  If so, merge them.
                if len( center_points_in_box ) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])


                    average_x = int( float(sum( x_list )) / len( x_list ) )
                    average_y = int( float(sum( y_list )) / len( y_list ) )


                    trimmed_center_points.append( (average_x, average_y) )


                    # Record that they were removed:
                    removed_center_points += center_points_in_box


                if len( center_points_in_box ) == 1:
                    trimmed_center_points.append( center_points_in_box[0] ) # Just use it.


            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append( center_point )


            # Draw what we found:
            #for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)


            # Determine if there are any new (or lost) targets:
            actual_target_count = len( trimmed_center_points )
            last_target_count = actual_target_count


            # Now build the list of physical entities (objects)
            this_frame_entity_list = []


            # An entity is list: [ name, color, last_time_seen, last_known_coords ]


            for target in trimmed_center_points:


                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}


                for entity in last_frame_entity_list:


                    entity_coords= entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]


                    distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
                    entity_distance_dict[ distance ] = entity


                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()


                for distance in distance_list:


                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]


                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] ) #Commented Out 3/20/2016
                        continue


                    #print "Target %s pixel(b,g,r) : USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1]) # Commented Out 3/20/2016
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append( nearest_possible_entity )
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_count, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break


                if entity_found == False:
                    # It's a new entity.
                    color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
                    name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
                    last_time_seen = frame_t0
                    if imgthreshold_white[target[1],target[0]] == 0.0:
                        # It's a real detect (not a line marker)

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_white[target[1],target[0]] == 255.0:
                        # It's a white line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("white-line-image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_yellow[target[1],target[0]] == 255.0:
                        # It's a yellow line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
                        cv.SaveImage("yellow-line-image-test%s.png"%(new_entity[0]), camera_image)

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.


            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_count, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append( entity )


            # For next frame:
            last_frame_entity_list = this_frame_entity_list


            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)




            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break


            # Toggle which image to show
            if chr(c) == 'd':
                image_index = ( image_index + 1 ) % len( image_list )


            image_name = image_list[ image_index ]


            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
            elif image_name == "shadow":
                image = fgmask
                cv.PutText( image, "No Shadow", text_coord, text_font, text_color )
            elif image_name == "white":
                #image = difference
                image = imgthreshold_white
                cv.PutText( image, "White Threshold", text_coord, text_font, text_color )
            elif image_name == "display":
                #image = display_image
                image = display_image
                cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
                cv.PutText( image, str(frame_t0), text_coord2, text_font, text_color )
                cv.PutText( image, str(frame_count), text_coord3, text_font, text_color )
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
                image = display_image  # Re-use display image here
                cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
            elif image_name == "yellow":
                # Do face detection
                #detect_faces( camera_image, haar_cascade, mem_storage )
                image = imgthreshold_yellow  # Re-use camera image here
                cv.PutText( image, "Yellow Threshold", text_coord, text_font, text_color )


            #cv.ShowImage( "Target", image )		Commented out 3/19

#            self.writer.write( image )
#            out.write( image );
#            cv.WriteFrame( self.writer, image );
#            if self.writer:
#                cv.WriteFrame( self.writer, image );
#                video.write( image );

            log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
#            if not self.writer:
#                delta_t = frame_t1 - frame_t0
#                if delta_t < ( 1.0 / 15.0 ): time.sleep( ( 1.0 / 15.0 ) - delta_t ):

            if frame_count == 155740:
                cv2.destroyWindow("Target")
#                    cv.ReleaseVideoWriter()
#                    self.writer.release()
#                    log_file.flush()
                break

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float( frame_count ) / time_delta
        print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )
Ejemplo n.º 19
0
index = 1 # 记帧数,或者用GetCaptureProperty方法
while True:
    # 1. 当前帧数
    print('frame:', index)
    if index == nbFrames:
        print('finish.')
        break

    # print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES) # frame == index

    # 2. 当前帧frame2赋给frame2gray
    frame2 = cv.QueryFrame(capture)
    cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY) # 原图frame2转换为frame2gray格式

    # 3. 图像做帧差法,结果为res
    cv.AbsDiff(frame1gray, frame2gray, res) # 比较frame1gray 和 frame2gray 的差,结果输出给res
    # res = cv2.absdiff(frame1gray, frame2gray) # 此为cv2新版api
    cv.ShowImage("After AbsDiff", res)

    # 4. 保存res作为前景目标foreground
    # cv.Convert(res, gray)
    # out_foreground.write(np.uint8(res)) # 保存为前景目标
    # out_foreground.write(np.asarray(cv.GetMat(res)))
    cv.WriteFrame(out_foreground, cv.GetImage(res)) # res格式为cvmat格式,转化为iplimage格式

    # 5. 平滑处理
    # cv.Smooth(res, res, cv.CV_BLUR, 5, 5) # 光滑一下res

    # 6. 形态学变换,开闭操作
    element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5,  cv.CV_SHAPE_RECT) # CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)
Ejemplo n.º 20
0
foreground = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
output = cv.CreateImage((width, height), 8, 1)

begin = True
threshold = 10

for f in xrange(nbFrames):
    frame = cv.QueryFrame(capture)

    out_foreground.write(np.uint8(frame))

    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    if begin:
        cv.Convert(gray, background)  #Convert gray into background format
        begin = False

    cv.Convert(background,
               backImage)  #convert existing background to backImage

    cv.AbsDiff(backImage, gray, foreground)  #Absdiff to get differences

    cv.Threshold(foreground, output, threshold, 255, cv.CV_THRESH_BINARY_INV)

    cv.Acc(foreground, background, output)  #Accumulate to background

    cv.ShowImage("Output", output)
    cv.ShowImage("Gray", gray)
    c = cv.WaitKey(wait)
    if c == 27:  #Break if user enters 'Esc'.
        break