Example #1
2
 image_section_1 = frame[63:267, 53:267]
 image_section_2 = frame[63:277, 353:567]
 image_section_1_grey = cv2.cvtColor(image_section_1, cv2.COLOR_BGR2GRAY)
 image_section_2_grey = cv2.cvtColor(image_section_2, cv2.COLOR_BGR2GRAY)
 image_section_1_grey = cv2.GaussianBlur(image_section_1_grey, (7, 7), 50)
 image_section_2_grey = cv2.GaussianBlur(image_section_2_grey, (7, 7), 50)
 if number_of_runs < 100:
     if (bg1 is None) and (bg2 is None):
         bg1 = image_section_1_grey.copy().astype("float")
         bg2 = image_section_2_grey.copy().astype("float")
         continue
     if number_of_runs == 99:
         print("Calibrated...")
     elif number_of_runs < 99:
         print("Calibrating Background...")
     cv2.accumulateWeighted(image_section_1_grey, bg1, 0.5)
     cv2.accumulateWeighted(image_section_2_grey, bg2, 0.5)
 else:
     diff1 = cv2.absdiff(bg1.astype("uint8"), image_section_1_grey)
     diff2 = cv2.absdiff(bg2.astype("uint8"), image_section_2_grey)
     threshold1 = cv2.threshold(diff1, 30, 255, cv2.THRESH_BINARY)[1]
     threshold2 = cv2.threshold(diff2, 30, 255, cv2.THRESH_BINARY)[1]
     cv2.imshow("T1", threshold1)
     cv2.imshow("T2", threshold2)
     key_pressed = cv2.waitKey(1) & 0xFF
     if key_pressed == ord('s') or check == True:
         check = True
         i = 0
         while os.path.exists("online/" + "%s.jpg" % i):
             i += 1
         FILE_1 = "./online/" + str(i) + ".jpg"
def track(bs,img_copy,img, avg):
	x = -1
	y = -1

	img_copy = cv2.GaussianBlur(img_copy,(5,5),0)
	cv2.accumulateWeighted(img_copy,avg,0.4)
	res = cv2.convertScaleAbs(avg)

	res = bs.bg_subtractor.apply(res, None, 0.05)

	gradient = cv2.morphologyEx(res, cv2.MORPH_GRADIENT, kernel)

	processed_img = cv2.GaussianBlur(gradient,(5,5),0)

	_,threshold_img = cv2.threshold( processed_img, 20, 255, cv2.THRESH_BINARY )

	if np.count_nonzero(threshold_img) > 5:

		contours, hierarchy = cv2.findContours(threshold_img, cv2.RETR_TREE, 
			cv2.CHAIN_APPROX_SIMPLE)

		# totally not from stack overflow
		areas = [cv2.contourArea(c) for c in contours]
		# max_index  = np.argmax(areas)
		max_index = np.argmin(areas)
		# Make sure it's big enough
		if cv2.contourArea(contours[max_index]) >= MIN_BLOB_SIZE_ROBOT:
			# img_out = np.zeros(img_thresh.shape).astype(np.uint8)
			cv2.drawContours(img, contours, max_index, (255, 255, 255), -1)
			x, y = getCentroid(contours[max_index])

	return x, y
Example #3
0
def runningAverage(image, average, alpha):
    ''' Calculates running average of given pictures stream.

    Using cv2.accumulateWeighted.

    Parameters
    -----------
    image : np.array
        new image to be averaged along with past image stream,
    average : np.array
        past averaged image,
    alpha : int
        control parameter of the running average, it describes
        how fast previous images would be forgotten, 1 - no average,
        0 - never forget anything.

    Returns
    --------
    image : np.array
        averaged image as numpy array.
    '''
    average = np.float32(average)
    cv2.accumulateWeighted(image, average, alpha)
    #print 'Alpha value is {}.'.format(alpha)
    image = cv2.convertScaleAbs(average)

    return image
def track2(bs,img_copy,img, avg):
	x = -1
	y = -1

	img_copy = cv2.GaussianBlur(img_copy,(5,5),0)
	cv2.accumulateWeighted(img_copy,avg,0.4)
	res = cv2.convertScaleAbs(avg)
	res = cv2.absdiff(img, res)
	_,processed_img = cv2.threshold( res, 7, 255, cv2.THRESH_BINARY )
	processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
	_,processed_img = cv2.threshold( processed_img, 240, 255, cv2.THRESH_BINARY )

	processed_img = bs.bg_subtractor.apply(processed_img, None, 0.05)
	
	# img_thresh = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)
	
	if np.count_nonzero(processed_img) > 5:
		# Get the largest contour
		contours, hierarchy = cv2.findContours(processed_img, cv2.RETR_TREE, 
			cv2.CHAIN_APPROX_SIMPLE)
		areas = [cv2.contourArea(c) for c in contours]
		max_index = np.argmax(areas)

		# Make sure it's big enough
		if cv2.contourArea(contours[max_index]) >= MIN_BLOB_SIZE:
			cv2.drawContours(img, contours, max_index, (255, 255, 255), -1)
			x, y = getCentroid(contours[max_index])

	return x, y
    def BackGroundSub(self, camera_imageROI):
        ## accumulated averaging
        if self.subMethod in ["Acc", "Both"]:
            # Create an image with interactive feedback:
            self.display_image = camera_imageROI.copy()

            # Create a working "color image" to modify / blur
            self.color_image = self.display_image.copy()

            # Smooth to get rid of false positives
            self.color_image = cv2.GaussianBlur(self.color_image, (3, 3), 0)

            # Use the Running Average as the static background
            cv2.accumulateWeighted(self.color_image, self.running_average_image, self.accAvg)
            self.running_average_in_display_color_depth = cv2.convertScaleAbs(self.running_average_image)

            # Subtract the current frame from the moving average.
            self.difference = cv2.absdiff(self.color_image, self.running_average_in_display_color_depth)

            # if vis: display("difference",5000,difference)

            # Convert the image to greyscale.
            self.grey_image = cv2.cvtColor(self.difference, cv2.COLOR_BGR2GRAY)

            # Threshold the image to a black and white motion mask:
            ret, self.grey_image = cv2.threshold(self.grey_image, self.threshT, 255, cv2.THRESH_BINARY)

        ##Mixture of Gaussians
        if self.subMethod in ["MOG", "KNN", "Both"]:
            self.grey_image = self.fgbg.apply(camera_imageROI)

        # Dilate the areas to merge bounded objects
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.grey_image = cv2.morphologyEx(self.grey_image, cv2.MORPH_OPEN, kernel)
        return self.grey_image
def perform_tracking(vidreader,smoothMasks,num_blocks,num_prev_frames,num_objects=1):
	start = time.time(); 
	tracker = tracker_instance(TrackerMethods.MIXTURE_BASED); 	tracker.numMixtures = num_objects;
	prev_rects = [np.array([vidreader.width/4,vidreader.height/4,\
						vidreader.width/2,vidreader.height/2],\
						dtype=np.float32) for idx in range(num_objects)];
	prev_centers =  [np.array([vidreader.width/2,vidreader.height/2],
						dtype=np.float32) for idx in range(num_objects)];
	vidreader.__reset__();	N = vidreader.frames;
	skip_frames = num_prev_frames + num_blocks/2;
	frame_idx = 0; numFrames= len(smoothMasks);
	frame_all_window_center = []; window_size = np.zeros((num_objects,2));
	while(frame_idx < numFrames):
		print 'Tracking ... {0}%\r'.format((frame_idx*100/N)),
		(cnt,frames) = vidreader.read(frame_idx+skip_frames,num_blocks);
		if cnt > 1:
			window_frames = tracker.track_object(frames,smoothMasks[frame_idx:frame_idx+cnt]);
			s = frame_idx; e = s + cnt; 
			for window_frame in window_frames:
				all_window_center = [];
				for (idx,_window) in enumerate(window_frame):
					(window,lbl) = _window
					rect = np.array([window[0],window[1],window[2]-window[0],window[3]-window[1]],dtype=np.float32);
					cv2.accumulateWeighted(rect,prev_rects[idx],0.1,None);
					window_center = prev_rects[idx][:2]+(prev_rects[idx][2:]/2); 
					cv2.accumulateWeighted(window_center,prev_centers[idx],0.3,None);
					window_center = prev_centers[idx].copy();
					window_size[idx] = np.maximum(window_size[idx],prev_rects[idx][2:]);
					all_window_center.extend([window_center]);
				frame_all_window_center.extend([all_window_center]);
		frame_idx += num_blocks;
	time_taken = time.time()-start;	
	print "Tracking .... [DONE] in ",time_taken," seconds"
	return frame_all_window_center,window_size
Example #7
0
    def detect_motion_new(self, winName, interval=1):
        #Implemented after talking with Dr Smart about image averaging and background extraction

        min_contour_area = 25
        max_contour_area = 1250
        retval = False
        threshold = 65
        try:
            _image_static = None
            _image_static = self.save_image(persist=False)
            _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
            _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

            accumulator = numpy.float32(_image_static)
            while True:
                sleep(interval)
                _image_static = self.save_image(persist=False)
                _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
                _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

                cv2.accumulateWeighted(numpy.float32(_image_static), accumulator, 0.1)

                _image_static = cv2.convertScaleAbs(accumulator)

                _image_dynamic = self.save_image(persist=False)
                _image_dynamic1 = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                _image_dynamic1 = cv2.GaussianBlur(_image_dynamic1, (21, 21), 0)

                # ideas from http://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html#gsc.tab=0
                _delta = cv2.absdiff(_image_dynamic1, _image_static)

                _threshold = cv2.threshold(_delta, 17, 255, cv2.THRESH_BINARY)[1]
                # dilate the thresholded image to fill in holes, then find contour on thresholded image
                _threshold = cv2.dilate(_threshold, None, iterations=5)

                (img, contours, _) = cv2.findContours(_threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

                dyn = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                # loop over the contours
                for contour in contours:
                    # if the contour is too small, ignore it
                    _area = cv2.contourArea(contour)
                    if _area < min_contour_area: # or _area > max_contour_area:
                        continue  # skip to the next

                    # compute the bounding box for the contour, draw it on the frame,

                    (x, y, w, h) = cv2.boundingRect(contour)
                    #cv2.rectangle(dyn, (x, y), (x + w, y + h), (0, 12, 255), 2)
                    cv2.ellipse(dyn, (x+5, y+25), (10, 20), 90, 0, 360, (255, 0, 0), 2)

                cv2.imshow(winName, numpy.hstack([dyn, _threshold]))

                key = cv2.waitKey(10)
                if key == 27:
                    cv2.destroyWindow(winName)
                    break

        except Exception as ex:
            print(ex)
Example #8
0
def aveprocess(im):
    global globim
    if globim == None:
        globim = np.array(im)
    else:
        cv2.accumulateWeighted(im,globim,0.0000001)
    return globim
Example #9
0
  def run(self):
    stream = urllib.urlopen(config.VIDEO.INPUT)
    running_average = np.zeros((config.CONTROLLER.HEIGHT,config.CONTROLLER.WIDTH, 3), np.float64) # image to store running avg

    bytes = ''
    while True:
      bytes+=stream.read(1024)
      a = bytes.find('\xff\xd8')
      b = bytes.find('\xff\xd9')
      if a!=-1 and b!=-1:
        img = DetectorProcess.processFrame(bytes[a:b+2])
        bytes = bytes[b+2:]

        cv2.accumulateWeighted(img, running_average, .2, None)
        diff = DetectorProcess.diffImg(img, running_average.astype(np.uint8))
        contours, hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

        contourAreas = {cv2.contourArea(c):c for c in contours}
        if len(contours) != 0:
          maxarea = max(contourAreas.keys())
          if maxarea > config.VIDEO.AREA_THRESHOLD:
            largestContour = contourAreas[maxarea]
            ccenter, cradius = cv2.minEnclosingCircle(largestContour)
            self.x.value = ccenter[0]
            self.y.value = ccenter[1]
Example #10
0
def threshave(im):
    global threshim
    if threshim == None:
        threshim = np.array(im)
    else:
        cv2.accumulateWeighted(im,threshim,0.1)
    return threshim
Example #11
0
def hough(avg1):

  

  image = frame.array
  image = image[240:480,0:640]
  cv2.accumulateWeighted(image,avg1,0.1)   
  res1 = cv2.convertScaleAbs(avg1)
  
  image = image - res1
  #gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
  #image2 = cv2.threshold(image, 0, 255, 0)
  image2 = cv2.Canny(image,50,150,apertureSize = 3)
  
  #fgmask = fgbg.apply(image)
  gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
  edges = cv2.Canny(gray,50,150,apertureSize = 3)
  lines = cv2.HoughLines(edges,1,np.pi/180,200,80)

  if lines is not None:
    for x in range(0,len(lines)):
      for rho,theta in lines[x]:
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a*rho
        y0 = b*rho
        x1 = int(x0+1000*(-b))
        y1 = int(y0+1000*(a))
        x2 = int(x0 - 1000*(-b))
        y2 = int(y0-1000*(a))
        
        cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)
  return res1
Example #12
0
	def imgDiff(self, frame):

		running_average_in_display = frame
		avg = np.float32(frame)
		display_image = frame.copy()

		#smooth image
		blur = cv2.GaussianBlur(display_image,(5,5),0)

		#calculate running avg
		cv2.accumulateWeighted(blur,avg,0.5)
		res = cv2.convertScaleAbs(avg)
		cv2.convertScaleAbs(avg,running_average_in_display, 1.0, 0.0)

		#get the difference between avg and image
		difference = cv2.absdiff(display_image, running_average_in_display)

		#convert image to grayscale
		img_grey = cv2.cvtColor(difference, cv2.COLOR_RGB2GRAY)

		#compute threshold
		ret,img_grey = cv2.threshold( img_grey, 2, 255, cv2.THRESH_BINARY )

		#smooth and threshold again to eliminate sparkles
		img_grey = cv2.GaussianBlur(img_grey,(5,5),0)
		ret,img_grey = cv2.threshold( img_grey, 240, 255, cv2.THRESH_BINARY )
Example #13
0
def diffaccWeight(f,t, avg):
	x_pos = -1
	y_pos = -1
	
	f = cv2.GaussianBlur(f,(5,5),0)
	cv2.accumulateWeighted(f,avg,0.4)
	res = cv2.convertScaleAbs(avg)
	res2 = cv2.absdiff(t, res.copy())
	ret,img_grey2 = cv2.threshold( res2, 7, 255, cv2.THRESH_BINARY )
	img_grey2 = cv2.GaussianBlur(img_grey2,(5,5),0)
	ret2,img_grey2 = cv2.threshold( img_grey2, 240, 255, cv2.THRESH_BINARY )

	img_thresh = ctcv.bg_subtractor.apply(img_grey2, None, 0.05)

	if np.count_nonzero(img_thresh) > 5:
		# Get the largest contour
		contours, hierarchy = cv2.findContours(img_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

		# totally not from stack overflow
		areas = [cv2.contourArea(c) for c in contours]
		i_max  = np.argmax(areas)
		max_index = ctcv.getLargestContourIndex(img_thresh)

		# Make sure it's big enough
		if cv2.contourArea(contours[max_index]) >= MIN_BLOB_SIZE:
			cv2.drawContours(t, contours, max_index, (255, 255, 255), -1)
			x_pos, y_pos = ctcv.getCentroid(contours[max_index])

	return t, x_pos, y_pos
Example #14
0
    def getBackground(self):
        """
        **SUMMARY**

        Get Background of the Image. For more info read 
        http://opencvpython.blogspot.in/2012/07/background-extraction-using-running.html

        **PARAMETERS**
        No Parameters

        **RETURNS**
        
        Image - SimpleCV.ImageClass.Image

        **EXAMPLE**

        >>> while (some_condition):
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> ts.getBackground().show()
        """
        from SimpleCV import Image

        imgs = self.trackImages(cv2_numpy=True)
        f = imgs[0]
        avg = np.float32(f)
        for img in imgs[1:]:
            f = img
            cv2.accumulateWeighted(f, avg, 0.01)
            res = cv2.convertScaleAbs(avg)
        return Image(res, cv2image=True)
Example #15
0
    def processImage(self, frame, avg_frame, frame_type=FRAME_TYPES[0]):
        # Blur and average with previous frames
        src_frame = cv.GaussianBlur(frame, (19, 19), 0)
        cv.accumulateWeighted(src_frame, avg_frame, AVG_WEIGHT)
        conv_frame = cv.convertScaleAbs(avg_frame)

        # Subtract current and average frames
        diff_frame = cv.absdiff(src_frame, conv_frame)

        # Convert to grayscale then to black/white
        gray_frame = cv.cvtColor(diff_frame, cv.COLOR_RGB2GRAY)
        _, bw_frame = cv.threshold(gray_frame, BW_THRESHOLD, 255, cv.THRESH_BINARY)

        # Calculate contours
        bw_copy = bw_frame.copy()
        contours, hier = cv.findContours(bw_copy, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)

        # Draw countours and bounding boxes
        main_frame = frame.copy()
        for contour in contours:
            rect = cv.boundingRect(contour)
            top  = rect[0:2]
            bot  = (rect[0] + rect[2], rect[1] + rect[3])
            cv.rectangle(main_frame, top, bot, (255, 0, 0), 1)
        cv.drawContours(main_frame, contours, -1, (0, 255, 0), -1)

        # Select desired frame to display
        frames = dict(zip(FRAME_TYPES, (main_frame, frame, src_frame, conv_frame, gray_frame, bw_frame)))
        out_frame = frames[frame_type]

        return (out_frame, avg_frame)
def canny(video_file, start_sec, speed, lower_threshold, higher_threshold):
    ESCAPE_KEY = 27

    video = cv2.VideoCapture(video_file)
    fps   = video.get(cv2.cv.CV_CAP_PROP_FPS)
    delay = int(1000 / (fps * speed))

    if start_sec > 0:
        video.set(cv2.cv.CV_CAP_PROP_POS_MSEC, start_sec * 1000)

    _, frame = video.read()
    average = np.float32(frame)
        
    while True:
        _, frame = video.read()
        if frame is None:
            break

        cv2.imshow('input', frame)

        cv2.accumulateWeighted(frame, average, 0.01)
        background = cv2.convertScaleAbs(average)
        cv2.imshow('background', background)

        foreground = cv2.subtract(frame, background)
        cv2.imshow('foreground', foreground)

        grayscale = cv2.cvtColor(foreground, cv2.cv.CV_BGR2GRAY)
        edges = cv2.Canny(grayscale, lower_threshold, higher_threshold)
        cv2.imshow('canny', edges)

        if cv2.waitKey(delay) == ESCAPE_KEY:
            break
Example #17
0
def main():
	average = Average()
	while(True):
		images = []
		for host in remoteHosts:	# get new images
			try:
				with cd('~/build18/images'):
					picPath = runCommand("/home/pi/build18/images/updatelatest.sh", host)
					print picPath
					pic = getFile(picPath, "~/roomal/build18/temp.jpeg", host)
			except:
				print "Error with ssh"
		# stitch images
		print 
		newImage = cv2.imread("temp.jpeg")

		cv2.accumulateWeighted(newImage, avg, 0.1)

		res = cv2.convertScaleAbs(avg)
		cv2.imshow('img', newImage)
		cv2.imshow('avg', res)
		k = cv2.waitKey(10)
 		
		if k == 27:
			break
 def learnPicture(self, img):
     if self.startImgSet:
         weight = self.giveAlpha()
         cv2.accumulateWeighted(img, self.background, weight)
     else:
         self.background = np.array(img, dtype=np.float32)
         self.startImgSet = True
def getBackgroundImage(vid,numFrames):
	
	print "\n\n\n\n-----------------------\n\ninitializing background detection\n"
	
	# set a counter
	i = 0
	_,frame = vid.read()
	# initialize an empty array the same size of the pic to update
	update = np.float32(frame)
	
	# loop through the first numFrames frames to get the background image
	while i < numFrames:
		# grab a frame
		_,frame = vid.read()
		
		# main function
		cv2.accumulateWeighted(frame,update,0.001)
		final = cv2.convertScaleAbs(update)
		# increment the counter
		i += 1
		print i
		# print something every 100 frames so the user knows the gears are grinding
		if i%100 == 0:
			print "detecting background -- on frame " + str(i) + " of " + str(numFrames)
	return final
def remove_bg(raw_img, avg):

	cv2.accumulateWeighted(raw_img,avg,0.0005)
	bg_img = cv2.convertScaleAbs(avg)
	#cv2.imshow('bg_img',bg_img)

	return bg_img, avg
Example #21
0
    def _get_initial_master(self, start, samples, alpha=0.02):
        '''
        Samples previous frames from the video to get an inital master frame
        
        start = frame number to start at
        samples = how many frames to sample
        alpha = regulates the weighting of the images
        
        '''
        # Get empty frame
        self.cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, start)
        sucess, initial_frame = self.cap.read()
        while not sucess:
            sucess, initial_frame = self.cap.read()
        initial_frame = np.float32(initial_frame)
        # Generate value within start and start+samples
        sam_array = np.arange(start, start+samples)
        count = 0.
        for frame_no in sam_array:
            # get frame
            sucess, frame = self.cap.read()
            if not sucess:
               continue
            cv2.accumulateWeighted(np.float32(frame), initial_frame, alpha)
            count += 1

        return initial_frame
Example #22
0
 def moving_ave_chi(self, max_frames=-1, fps=1, alpha=0.02, plot=False):
     '''
     Uses moving average for master frame
     Calculates fps (frames per second)
     '''
     if max_frames < 1:
         max_frames = self.frames
     # set number of steps to skip
     stride = int(round(self.f_rate / float(fps)))
     # get initial aveage frame
     self.moving_ave_frame = self._get_initial_master(0, 120)
    
     # out puts
     self.frame_no = []
     self.frame_chi = []
     self.frame_time = []
     # start chi square
     x,y,z = self.moving_ave_frame.shape
     ddof = float(x * y)
     for frame_no in xrange(0, max_frames, stride):
         # Get chi square
         self.cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frame_no)
         success, frame = self.cap.read()
         if not success:
             continue
         self.frame_no.append(frame_no)
         self.frame_time.append(self.cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC))
         # Cal chi
         self.frame_chi.append(chisquare(frame, self.moving_ave_frame).sum()/ddof)
         # Cal new ave frame
         cv2.accumulateWeighted(frame, self.moving_ave_frame, alpha)
         # Show moving Ave Frame and chi plot
         if plot:
             cv2.imshow('Ave Frame',self.moving_ave_frame)
Example #23
0
    def run(self):
        videoFrame = self.capture.read()[1]
        videoFrameSize = videoFrame.shape
        
        # Capture cam image
        _,colourImage = self.capture.read()
        #blur image to remove false positives
        colourImage = cv2.GaussianBlur(colourImage, (5,5), 0)
        
        #create moving average image with depth 32
        movingAverageImage = np.float32(colourImage)
        #create grey image with depth 8
        greyImage = np.zeros(videoFrameSize, np.uint8)
        
        differenceImage = colourImage
        tempImage = colourImage
        
        
        while True:
            
            _,colourImage = self.capture.read()
            
            # smooth colour image to get rid of false negatives 
            colourImage = cv2.GaussianBlur(colourImage, (5,5), 0)
            
            cv2.accumulateWeighted(colourImage, movingAverageImage, 0.02)
            
            tempImage = cv2.convertScaleAbs(movingAverageImage)
            
            differenceImage = cv2.absdiff(colourImage, tempImage)
            
            #convert differenceImage to gray scale
            greyImage = cv2.cvtColor(differenceImage, cv2.COLOR_RGB2GRAY)
            
            #convert grayImage to binary black and white
            cv2.threshold(greyImage, 70, 255, cv2.THRESH_BINARY,greyImage)
            
            
            cv2.dilate(greyImage, None, greyImage, None, 18)
            cv2.erode(greyImage, None, greyImage, None, 10)
            
            contour,_ = cv2.findContours(greyImage, cv2.cv.CV_RETR_CCOMP, cv2.cv.CV_CHAIN_APPROX_SIMPLE)
            
            
            while contour:
                areas = [cv2.contourArea(c) for c in contour]
                max_index = np.argmax(areas)
                cnt=contour[max_index]
                x,y,w,h = cv2.boundingRect(cnt)
                contour.pop()
                cv2.rectangle(colourImage,(x,y),(x+w,y+h),(0,255,0),2)

            
            cv2.imshow("Camera1",colourImage)

            # Listen for ESC key
            c = cv2.waitKey(7) % 0x100
            if c == 27:
                break
Example #24
0
    def _thread(cls):
        print 'Gonna try for a new cam'
        with picamera.PiCamera() as camera:
            print "New Camera"

            camera.resolution = (320, 240)
            camera.hflip = True
            camera.vflip = True
            camera.start_preview()
            stream = picamera.array.PiRGBArray(camera, size=camera.resolution)

            avg = None
            for f in camera.capture_continuous(stream, 'bgr', use_video_port=True):

                frame = f.array
                timestamp = datetime.datetime.now()
                text = "Unoccupied"

                frame = imutils.resize(frame, width=500)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)

                if avg is None:
                    print "starting avg"
                    avg = gray.copy().astype("float")
                    stream.truncate(0)
                    continue

                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                thresh = cv2.threshold(frameDelta, 5, 255,
                    cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                im, cnts, other = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE)

                for c in cnts:
                    if cv2.contourArea(c) < 1000:
                        continue
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
                    text = "Occupied"

                ts = timestamp.strftime("%A %d %B %y %I:%M:%S%p")
                cv2.putText(frame, "Status: {}".format(text), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

                cls.frame = cv2.imencode(".png", frame)[1].tostring()

                stream.seek(0)
                stream.truncate()
                if time.time() - cls.last_access > app.config['TIMEOUT']:
                    break

        print 'Setting thread to None'
        cls.thread = None
	def process(self,cur_frame,prev_frames):
		assert(isinstance(prev_frames,list)),"prev_frames is not a list"
		acum_frame = float32(prev_frames[0])
		for frame in prev_frames[1:]:
			accumulateWeighted(float32(frame),acum_frame,self.alpha,None);
		absScaleFrame = convertScaleAbs(acum_frame)
		diff = self.__frame_differencing__(absScaleFrame,cur_frame)
		return diff;
	def update(self, image):
		# if the background model is None, initialize it
		if self.bg is None:
			self.bg = image.copy().astype("float")
			return

		# update the background model by accumulating the weighted average
		cv2.accumulateWeighted(image, self.bg, self.accumWeight)
Example #27
0
File: Fly.py Project: isk2/Flylab
    def UpdateFlyMean(self, npRoiReg):
        #global globalLock
        #with globalLock:
        #    self.rcForeground = rospy.get_param('tracking/rcForeground', 0.01)
        alphaForeground = 1 - N.exp(-self.dt.to_sec() / self.rcForeground)

        if (self.npfRoiMean is None):
            self.npfRoiMean = N.float32(npRoiReg)
        cv2.accumulateWeighted(N.float32(npRoiReg), self.npfRoiMean, alphaForeground)
Example #28
0
    def __init__(self, display, cam=0):
        View.__init__(self, display)
        self.c = cv2.VideoCapture(cam)
        self._,self.f = self.c.read()
 
        self.avg1 = np.float32(self.f)
        for c in range(50):
            self._,self.f = self.c.read()
            cv2.accumulateWeighted(self.f,self.avg1,0.01)
 def getAverage(self):
   
   avg1 = np.float32(self.frame) # 32 bit accumulator
   for i in range(20): self.ret, self.frame = self.capture.read() # dummy to warm up sensor
   for i in range(self.BGsample):
     ret, frame = self.capture.read()
     cv2.accumulateWeighted(frame,avg1,0.5)
     res = cv2.convertScaleAbs(avg1)
   
   self.average = cv2.cvtColor(res, cv2.COLOR_RGBA2GRAY)
Example #30
0
def setbg():
    retval, im = cap.read()
    avg1 = np.float32(im)
    print "setting background"
    for i in range(100): 
         retval, im = cap.read()
         cv2.accumulateWeighted(im,avg1, 0.1)
         res1 = cv2.convertScaleAbs(avg1)
         cv2.waitKey(10)
    cv2.imshow("Background",res1)
    return res1
 def processImage(self, image, alpha=0.05):
     self.currentFrame = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
     self.previousFrame = np.uint8(self.averageFrame)
     self.result = cv2.absdiff(self.previousFrame, self.currentFrame)
     self.result = cv2.blur(self.result, (5, 5))
     self.result = cv2.morphologyEx(self.result, cv2.MORPH_OPEN, None)
     self.result = cv2.morphologyEx(self.result, cv2.MORPH_CLOSE, None)
     retval, self.result = cv2.threshold(self.result, 10, 255, cv2.THRESH_BINARY)
     # self.averageFrame = cv2.accumulateWeighted(self.currentFrame, self.averageFrame, alpha)
     cv2.accumulateWeighted(self.currentFrame, self.averageFrame, alpha)
     self.nonZero = cv2.countNonZero(self.result)
Example #32
0
# 計算畫面面積
area = width * height

# 初始化平均影像
ret, frame = webcam.read()
avg = cv2.blur(frame, blur_pixel)
avg_float = np.float32(avg)

# 調整平均值
for x in range(10):
  ret, frame = webcam.read()
  if ret == False:
    break
  blur = cv2.blur(frame, blur_pixel)
  cv2.accumulateWeighted(blur, avg_float, 0.10)
  avg = cv2.convertScaleAbs(avg_float)

while(webcam.isOpened()):
  # 讀取一幅影格
  ret, frame = webcam.read()

  # 若讀取至影片結尾,則跳出
  if ret == False:
    break

  # 模糊處理
  blur = cv2.blur(frame, blur_pixel)

  # 計算目前影格與平均影像的差異值
  diff = cv2.absdiff(avg, blur)
Example #33
0
def run_avg(image, alpha):
    global background

    # compute weighted avg accumulate it and update the background
    cv2.accumulateWeighted(image, background, alpha)
Example #34
0
    def _thread(cls):
        ind = 0  # initial count in detection Max is nd

        gray = cv2.cvtColor(cls.npframe, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        center = [cls.imsize[0] / 2, cls.imsize[1] / 2]
        scale = 0.6

        # if the Average frame is Non, initialize it
        if cls.avg is None:
            print " [INFO] starting background model..."
            cls.avg = gray.copy().astype("float")

        # accumlate the weighted average between the current frame and the
        # previous frames, then compute the difference between the current frame and running averag
        cv2.accumulateWeighted(gray, cls.avg, 0.50)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(cls.avg))

        #threshold the delta image, dilate the threshold image to fill in holes
        # then find contours on the threshold image
        thresh = cv2.threshold(frameDelta, cls.conf["delta_thresh"], 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)

        #(_,cnts,_) = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,
        #	cv2.CHAIN_APPROX_SIMPLE)

        # Loop over the Contours
        #text = "NA"
        #for c in cnts:
        #	# if the contour is too small, ignore it
        #	if cv2.contourArea(c) > cls.conf["min_area"]:
        #		# compute the bounding box for the contour, draw it on the frame, and update the
        #		(x, y, w, h) = cv2.boundingRect(c)
        #		cv2.rectangle(gray,(x,y),(x+w,y+h),(0,255,0),1)
        #		text = "MOTION"

        # Find if motion on Left or RIght of face
        LR_Motion = Left_Right(cls.imsize, cls.ffaceRects, cls.pfaceRects,
                               thresh)

        if LR_Motion == 0:
            LED_LR().Turn_LeftOn()
        elif LR_Motion == 1:
            LED_LR().Turn_RightOn()
        elif LR_Motion == 2:
            LED_LR().Turn_BothOn()
        elif LR_Motion == 3:
            LED_LR().Turn_Off()

        # Draw the Text on Frame
        # cv2.putText(cls.npframe, "Motion Status: {}".format(text),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)
        #showMydetections(cls.ffaceRects,cls.pfaceRects,cls.npframe)
        #cv2.imshow("Motion Det", gray)
        #cv2.waitKey(50)

        #cv2.imshow("Motion thesh",thresh)
        #cv2.waitKey(50)

        cls.thread = None
Example #35
0
def motionDetection():
    motionCounter = 0
    running = True
    lastUploaded = datetime.datetime.now()
    avg = None

    camera = cv2.VideoCapture(0)
    
    while running:
        
        _, img = camera.read()
        frame = img
        timestamp = datetime.datetime.now()

        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the average frame is None, initialize it
        if avg is None:
            avg = gray.copy().astype("float")
            rawCapture.truncate(0)
            continue

        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        motionDetected = False

        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            motionDetected = True

        # check to see if the room is occupied
        if motionDetected:
            # check to see if enough time has passed between uploads
            if (timestamp - lastUploaded).seconds >= 3.0:
                # increment the motion counter
                motionCounter += 1

                # check to see if the number of frames with consistent motion is
                # high enough
                if motionCounter >= 8:
                    motionDetectedEvent()
                    lastUploaded = timestamp
                    motionCounter = 0

        # otherwise, the room is not occupied
        else:
            motionCounter = 0

        # clear the stream in preparation for the next frame
        rawCapture.truncate(0)
Example #36
0
	# resize the frame, convert it to grayscale, and blur it
	frame = imutils.resize(frame, width=500)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)

	# if the average frame is None, initialize it
	if avg is None:
		print "[INFO] starting background model..."
		avg = gray.copy().astype("float")
		continue

	# accumulate the weighted average between the current frame and
	# previous frames, then compute the difference between the current
	# frame and running average
	cv2.accumulateWeighted(gray, avg, 0.5)
	frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

	# threshold the delta image, dilate the thresholded image to fill
	# in holes, then find contours on thresholded image
	thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
		cv2.THRESH_BINARY)[1]
	thresh = cv2.dilate(thresh, None, iterations=2)
	(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)

	# loop over the contours
	for c in cnts:
		# if the contour is too small, ignore it
		if cv2.contourArea(c) < conf["min_area"]:
			continue
Example #37
0
def show_video(argv):
    """
	Main Function for all video processing.  Defaults for this file are adjusted here.
	"""
    tracker = blobs.BlobTracker()

    #  Default Options for Running in Demo Mode
    video = "demo.avi"
    background = "demo_0.png"
    output = "blob"
    method = "acc"

    file_name_base = ""

    try:
        opts, args = getopt.getopt(argv, "v:b:o:m:")
    except getopt.GetoptError:
        print "Getopt Error"
        exit(2)

    for opt, arg in opts:
        if opt == "-v":
            video = arg
        elif opt == "-b":
            background = arg
        elif opt == "-o":
            output = arg
        elif opt == "-m":
            method = arg

    masks = pt_config.masks

    print video, " ", background, " ", output

    file_name_base = "results/" + video.split("/")[-1].split(
        ".")[-2] + "_" + method

    c = cv2.VideoCapture(video)

    #c.set(1, 26)
    _, f = c.read()
    #_,f = c.read()

    #cv2.imshow("back", f)

    if method == "ext":
        #  Use a predetermined background image
        c_zero = cv2.imread(background)
        #c_zero = f
    else:
        #  Use the growing accumulated average
        c_zero = np.float32(f)

    c.set(0, 000.0)
    width = int(c.get(3))
    height = int(c.get(4))
    fps = c.get(5)
    fourcc = c.get(6)
    frames = c.get(7)

    #  Print out some initial information about the video to be processed.
    print fourcc, fps, width, height, frames

    # Celtic Connection Errosion/Dilation
    # for_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(20,20))
    # for_di = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(20,40))

    # MOG Errosion/Dilation
    # for_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    # for_di = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,20))

    if method == "mog":
        #  Setup MOG element for generated background subtractions
        # bgs_mog = cv2.BackgroundSubtractorMOG(4,3,.99,5)
        bgs_mog = cv2.BackgroundSubtractorMOG2()

        # MOG Erosion.Dilation
        for_er = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (pt_config.mog_er_w, pt_config.mog_er_h))
        for_di = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (pt_config.mog_di_w, pt_config.mog_di_h))

    else:
        # ACC or EXT Erosion and Dilation
        for_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                           (pt_config.er_w, pt_config.er_h))
        for_di = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                           (pt_config.di_w, pt_config.di_h))

    orange = np.dstack(((np.zeros((height, width)), np.ones(
        (height, width)) * 128, np.ones((height, width)) * 255)))
    ones = np.ones((height, width, 3))

    trails = np.zeros((height, width, 3)).astype(np.uint8)
    start_t = time.clock()
    current_frame = 0

    while 1:

        #s = raw_input()

        #  Get the next frame of the video
        _, f = c.read()

        #  Do some caculations to determin and print out progress.
        current_frame = c.get(1)
        t = time.clock() - start_t
        remainder = 1.0 - current_frame / float(frames)
        current = current_frame / float(frames)
        remaining = int(((t / current) * remainder) / 60.0)

        if int(current_frame) % 25 == 0:
            print "Percentage: ", int(
                (current_frame / frames) * 100), " Traces: ", len(
                    tracker.traces), "Time left (m): ", remaining

        if method == "mog":
            grey_image = bgs_mog.apply(f)
            #  Turn this into a black and white image (white is movement)
            thresh, im_bw = cv2.threshold(grey_image, 225, 255,
                                          cv2.THRESH_BINARY)

        #  If using the accumulated image (basic motion detection) infer the background image for this frame
        else:

            if method == "acc":
                cv2.accumulateWeighted(f, c_zero, 0.01)

            #im_zero = cv2.convertScaleAbs(c_zero)
            im_zero = c_zero.astype(np.uint8)

            cv2.imshow("Im_zero", im_zero)

            #  Get the first diff image - this is raw motion
            d1 = cv2.absdiff(f, im_zero)

            #  Convert this to greyscale
            gray_image = cv2.cvtColor(d1, cv2.COLOR_BGR2GRAY)

            #  ksize aperture linear size, must be odd
            #gray_smooth = cv2.medianBlur(gray_image, 5)

            #  Turn this into a black and white image (white is movement)
            thresh, im_bw = cv2.threshold(gray_image, 15, 255,
                                          cv2.THRESH_BINARY)

        # TODO Add Booleans to show or hide processing images
        cv2.imshow("Threshholded Image", im_bw)
        #cv2.imshow("Background", im_zero)
        #cv2.imshow('Background Subtracted', d1)
        #cv2.imshow("Thresholded", im_bw)

        #  Erode and Dilate Image to make blobs clearer.  Adjust erosion and dilation values in pt_config
        im_er = cv2.erode(im_bw, for_er)
        im_dl = cv2.dilate(im_er, for_di)

        # mask out ellipitical regions
        for mask in masks:
            cv2.ellipse(im_dl, (mask[0], mask[1]), (mask[2], mask[3]), 0, 0,
                        360, (0, 0, 0), -1)

        cv2.imshow("Eroded/Dilated Image", im_dl)

        contours, hierarchy = cv2.findContours(im_dl, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        my_blobs = []
        for cnt in contours:
            try:
                x, y, w, h = cv2.boundingRect(cnt)

                #print "Rect: " , w , " " , h
                cv2.rectangle(f, (x, y), (x + w, y + h), (255, 0, 0), 2)

                moments = cv2.moments(cnt)
                x = int(moments['m10'] / moments['m00'])
                y = int(moments['m01'] / moments['m00'])

                my_blobs.append((x, y))
            except:
                print "Bad Rect"

        #print len(my_blobs)

        if len(my_blobs) > 0:
            tracker.track_blobs(my_blobs, [0, 0, width, height], current_frame)

            for v in tracker.virtual_blobs:
                size = 5
                if v.got_updated:
                    size = 10
                cv2.rectangle(f, (int(v.x), int(v.y)),
                              (int(v.x + size), int(v.y + size)), v.color,
                              size)

        if pt_config.draw_video:

            #total_trails = np.zeros((height,width,3), np.uint8)
            #alpha_trails = np.zeros((height,width,3), np.float64)
            #inv_alpha = np.zeros((height, width, 3), np.float64)

            for id in tracker.traces:

                ox = None
                oy = None

                #this_trail = np.zeros((height,width,3), np.uint8)
                #blank = np.zeros((height,width,3), np.uint8)
                #alpha = np.zeros((height, width, 3), np.uint8)

                if len(tracker.traces[id]) > 2:
                    for pos in tracker.traces[id][-3:]:

                        x = int(pos[0])
                        y = int(pos[1])

                        if ox and oy:
                            sx = int(0.8 * ox + 0.2 * x)
                            sy = int(0.8 * oy + 0.2 * y)

                            #  Colours are BGRA
                            cv2.line(trails, (sx, sy), (ox, oy), (0, 128, 255),
                                     1)
                            #cv2.line(alpha, (sx, sy), (ox, oy), (255,255,255), 2)

                            oy = sy
                            ox = sx
                        else:
                            ox, oy = x, y

                #alpha_trails = alpha_trails + 0.001*alpha

            # f = (1-alpha)*f + alpha*orange
            #inv_alpha = cv2.subtract(ones, alpha_trails)
            #cv2.multiply(inv_alpha, f, f, 1, cv2.CV_8UC3)
            #alpha_trails = cv2.multiply(alpha_trails, orange, alpha_trails)
            #f = cv2.add(f, alpha_trails, f, None, cv2.CV_8UC3)

            cv2.add(f, trails, f)
            cv2.drawContours(f, contours, -1, (0, 255, 0), 1)

            #  draw frame
            cv2.rectangle(f, (pt_config.FRAME_WIDTH, pt_config.FRAME_WIDTH),
                          (width - pt_config.FRAME_WIDTH,
                           height - pt_config.FRAME_WIDTH), (0, 0, 0), 2)

            #  Current output	(show the current active masks)
            for mask in masks:
                cv2.ellipse(f, (mask[0], mask[1]), (mask[2], mask[3]), 0, 0,
                            360, (0, 0, 255), -1)

            if pt_config.show_grid:
                for i in range(width / pt_config.grid_spacing):
                    cv2.line(f, (i * pt_config.grid_spacing, 0),
                             (i * pt_config.grid_spacing, height), (0, 0, 0))

                for j in range(height / pt_config.grid_spacing):
                    cv2.line(f, (0, j * pt_config.grid_spacing),
                             (width, j * pt_config.grid_spacing), (0, 0, 0))

            cv2.imshow('output', f)
            cv2.waitKey(delay=1)

        if frames == current_frame:

            #  Save the pic
            cv2.imwrite(file_name_base + "_last_frame.png", f)

            #  TODO
            #  Write a log of the values used to generate these traces

            write_traces(tracker.traces, file_name_base)
            write_log(method, file_name_base)

            #  Save tracker traces

            break

        #  Kill switch
        #if current_frame%10==0 and pt_config.draw_video:

        k = 0
        k = cv2.waitKey(1)

        #print "K is: " , k

        if k == 27:  # escape to close
            print "We're QUITING!"
            break

    cv2.destroyAllWindows()
    c.release()
Example #38
0
    def frame_queue_input(self, ns, frame_queues):
        no_of_frame_queues = len(frame_queues)
        last_frame_id_in_queues = [0.0] * no_of_frame_queues
        avg_frame_gap_in_queues = [0.0] * no_of_frame_queues
        logging.info('frame_queue_input - process name: '+ str(multiprocessing.current_process().name) + str(no_of_frame_queues))
        video_capture = cv2.VideoCapture(ns.conf['CAM_DEV_ID'])
        logging.info("frame_queue_input - Frame default resolution: (" + str(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) + "; " + str(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ")")
        video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, ns.conf['CAM_WIDTH'])
        video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, ns.conf['CAM_HEIGHT'])
        frame_w = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_h = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        logging.info("frame_queue_input - Frame resolution set to: (" + str(frame_w) + "; " + str(frame_h) + ")")
        frame_no = 0
        max_frame_no = no_of_frame_queues * 300
        frame_discarded = 0
        fps_count_start_time = time.time()
        show_frames = ns.conf['show_frames']
        show_detection_box = ns.conf['show_detection_box']

        avg = None
        delta_thresh = 5
        # min_area_start is the start point of threshold, it should auto adjust between min_area and ns.contourArea
        min_area_start = 100
        # frame_pre_afd_shrink to use a fixed width for detection to ensure the performance
        frame_pre_afd_shrink = frame_w / 180
        frame_no_motion_start = 0
        motion_trigger = 2
        min_area = min_area_start
        laplacian_frame_id = 0.0
        laplacian_switch_frame_gap_max = 0.4
        laplacian_values = []
        laplacian_value = 0.0

        logging.info("frame_queue_input - sleep for few seconds, wait for camera...")
        time.sleep(2)
        while True:
            ret = False
            contourArea = 9999999
            try:
                ret, frame = video_capture.read()
            except Exception as e:
                logging.error('frame_queue_input error....')
                pass

            if ret:

                frame_id = time.time()
                frame_no += 1

                q_idx = frame_no % no_of_frame_queues
                # if not frame_queues[q_idx].full():

                _start_detect = time.time()
                motion_locations = []
                motion_detected = False

                frame_pre_afd = cv2.resize(frame, (0, 0), fx=(1.0 / frame_pre_afd_shrink), fy=(1.0 / frame_pre_afd_shrink))
                gray = cv2.cvtColor(frame_pre_afd, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)
                laplacian_value = cv2.Laplacian(frame_pre_afd, cv2.CV_64F).var()

                if avg is None:
                    avg = gray.copy().astype("float")
                else:
                    cv2.accumulateWeighted(gray, avg, 0.5)
                    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
                    thresh = cv2.threshold(frameDelta, delta_thresh, 255, cv2.THRESH_BINARY)[1]
                    thresh = cv2.dilate(thresh, None, iterations=2)
                    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                    # cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                    if len(cnts) > 1:
                        cnts = cnts[1]
                        if len(cnts) > 0:
                            for c in cnts:
                                ca = cv2.contourArea(c)
                                if ca > min_area:
                                    motion_detected = True
                                    contourArea = min(contourArea, ca)
                                    (x, y, w, h) = cv2.boundingRect(c)
                                    motion_locations.append((x, y, w, h))
                            if motion_detected:
                                frame_no_motion_start = frame_no
                            elif min_area > min_area_start:
                                min_area = min_area - 5

                frame_obj = {'id': frame_id, 'frame': frame,'contourArea': contourArea, 'laplacian_value': laplacian_value}

                push_frame_to_queue = False

                if frame_id - laplacian_frame_id >= laplacian_switch_frame_gap_max:
                    laplacian_frame_id = frame_id
                    laplacian_values = []
                    ns.laplacianSwitch = 0.0

                if motion_detected or (frame_no - frame_no_motion_start < motion_trigger):
                    push_frame_to_queue = True
                    if frame_id - laplacian_frame_id < laplacian_switch_frame_gap_max:
                        laplacian_values.append(laplacian_value)
                        laplacian_frame_id = frame_id
                        laplacian_values_len = len(laplacian_values)
                        if laplacian_values_len > 6:
                            laplacianSwitch = sorted(laplacian_values)[
                                int(laplacian_values_len * 2 / 3)]
                            ns.laplacianSwitch = laplacianSwitch
                            logging.debug("frame_queue_input - laplacianSwitch condition met " + str(laplacianSwitch))
                            if laplacian_values_len > 200:
                                laplacian_values = laplacian_values[-20:]
                        else:
                            ns.laplacianSwitch = 0.0
                    if ns.laplacianSwitch > 0 and laplacian_value < ns.laplacianSwitch:
                        push_frame_to_queue = False
                        logging.debug("frame_queue_input - laplacianSwitch stopped queuing this frame " + str(frame_id))

                if push_frame_to_queue:
                    logging.debug("frame_queue_input -  motion_detected or motion_triggered: " + str(motion_detected) + " " + str(frame_no - frame_no_motion_start < motion_trigger) + " "+ str(frame_id) + " put frame_obj to frame_queue: " + str(q_idx) + " id: " +  str(frame_id) +  "deplayed: " + str(time.time() - frame_id))
                    try:
                        frame_queues[q_idx].put(frame_obj, False)
                        last_frame_id_in_queues[q_idx] = frame_id
                    except Queue.Full:
                        frame_discarded += 1
                        gap_since_last_push = frame_id - \
                            last_frame_id_in_queues[q_idx]
                        avg_frame_gap_in_queues[q_idx] = (
                            gap_since_last_push + avg_frame_gap_in_queues[q_idx]) / 2.0
                        if (frame_no > max_frame_no):
                            frame_processed = frame_no - frame_discarded
                            frame_process_rate = frame_processed * 1.0 / frame_no
                            fps_count_duration = time.time() - fps_count_start_time
                            fps_raw = frame_no / fps_count_duration
                            fps_processed = frame_processed / fps_count_duration
                            frame_discarded = 0
                            frame_no = q_idx
                            frame_no_motion_start = frame_no
                            fps_count_start_time = time.time()
                            logging.info("frame_queue_input - avg_frame_gap_in_queues:"+ str(avg_frame_gap_in_queues)+ "fps_raw:"+ str(fps_raw)+ "fps_processed:" + str(fps_processed))
                        ns_contourArea = ns.contourArea
                        if min_area < ns_contourArea * 1.5:
                            adjust_rate = min(
                                max((ns_contourArea - min_area) / 3, 1), 100)
                            min_area = min(min_area + adjust_rate,
                                           ns_contourArea * 1.5)
                            if ns_contourArea > 999999:
                                min_area = min(min_area, min_area_start * 6)
                        logging.debug("frame_queue_input - frame_queues full, skipping:"+ str(q_idx) + "frame_id:" + str(frame_id) + "min_area: " + str(min_area) + " ns_contourArea: " + str(ns_contourArea))

                if show_frames and ret:
                    if show_detection_box:
                        fls = ns.face_matched['face_locations']
                        names = ns.face_matched['names']
                        frame_shape = frame.shape
                        for loc, name in zip(fls, names):
                            (top, right, bottom, left) = service.scale_location_rate(frame_shape, loc, frame_shrink_rate=ns.conf['frame_shrink_rate'], rate=1.2)
                            cv2.rectangle(frame, (left, top),(right, bottom), (0, 0, 255), 2)
                            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                            font = cv2.FONT_HERSHEY_DUPLEX
                            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
                    cv2.imshow('smartcam', frame)

                if show_frames and cv2.waitKey(1) & 0xFF == ord('q'):
                    logging.info('Key Q pressed, there is a opencv bug to close the window on MAC')
                    show_frames = False
                    cv2.destroyAllWindows()
Example #39
0
    def run_avg(self, image, accumWeight):

        if self.bg is None:
            self.bg = image.copy().astype("float")

        cv2.accumulateWeighted(image, self.bg, accumWeight)
def show_cam():
    # Screen capture code provided by Hunter Lloyd https://ecat.montana.edu/d2l/le/content/524639/viewContent/3826523/View
    capture = cv.VideoCapture(0)
    status, image = capture.read()  # Reads in the capture
    running_average = np.float32(image)
    while True:
        status, image = capture.read()  # Reads in the capture
        hsv = cv.cvtColor(
            image, cv.COLOR_BGR2HSV)  # Converts the image from BGR to HSV
        cv.namedWindow("HSV")  # Creatues window for HSV conversion
        cv.moveWindow("HSV", 643, 20)
        cv.imshow("HSV", hsv)  # Shows the image

        cv.namedWindow('Grey Blank Image')
        grey_scale = cv.cvtColor(
            image, cv.COLOR_BGR2GRAY)  # Converts image to grey scale
        cv.imshow('Grey Blank Image', grey_scale)  # Shows grey scale

        cv.namedWindow('Blurred Image')
        # Blurred code source from https://docs.opencv.org/3.1.0/d4/d13/tutorial_py_filtering.html
        blurred = cv.GaussianBlur(
            image, (7, 7), 0)  # Blurs the image with Gaussian Blur function
        cv.imshow('Blurred Image', blurred)
        cv.namedWindow('Running Average')

        cv.accumulateWeighted(
            blurred, running_average, .320
        )  # Gets the weighted accumulate average between the float32 image and the standard image
        cv.imshow('Running Average', running_average)

        cv.namedWindow('Convert Scale to Abs')
        eight_bit_image = cv.convertScaleAbs(
            running_average
        )  # converts the running average to an eight bit image
        cv.imshow('Convert Scale to Abs', eight_bit_image)

        cv.namedWindow('Abs Diff')
        image_difference = cv.absdiff(
            eight_bit_image, image
        )  # Gets the absolute difference between the eight bit image and the original image
        cv.imshow('Abs Diff', image_difference)

        image_difference = cv.cvtColor(
            image_difference, cv.COLOR_BGR2GRAY
        )  # converts the absolute difference image to grey scale
        cv.namedWindow('Grey Scale Abs Diff')
        cv.imshow('Grey Scale Abs Diff', image_difference)

        return_val, image_threshold = cv.threshold(
            image_difference, 25, 255, cv.THRESH_BINARY
        )  # Thresholds the image above a threshold value above 30. Binary used for following contour conversion
        threshold_blurred = cv.GaussianBlur(image_threshold, (7, 7),
                                            0)  # Blurs the Threshold image
        return_val, image_threshold = cv.threshold(
            threshold_blurred, 50, 255, cv.THRESH_BINARY
        )  # Thresholds the blurred image again with a threshold value above 200
        cv.namedWindow("Threshold")
        cv.imshow("Threshold", image_threshold)

        # Parts of Find contours code sourced from https://docs.opencv.org/3.1.0/d4/d73/tutorial_py_contours_begin.html
        contour_image = image_threshold
        contours, hierarchy = cv.findContours(
            image_threshold.copy(),
            cv.RETR_TREE,
            cv.CHAIN_APPROX_SIMPLE,
        )  # RETR_EXTERNAL
        # Draw counters helped from user Mahm00d source: https://stackoverflow.com/questions/34961349/draw-contours-in-opencv-around-recognized-polygon
        cv.drawContours(contour_image, contours, -1, (0, 255, 0),
                        3)  # Draws the contours
        # if contours:
        #     print(contours)
        for c in contours:  # iterate through to find opposite corners for rectangle.
            if cv.contourArea(c) < 10000:
                continue
            (x, y, w, h) = cv.boundingRect(c)
            cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255),
                         3)  # draws rectangle within thresholds

        cv.namedWindow(
            "Unfiltered video")  # creates unfiltered video from webcam
        cv.moveWindow("Unfiltered video", 0, 20)
        cv.imshow("Unfiltered video", image)  # sHows the unfiltered video

        # show_unfiltered(image)
        # show_hsv(image)

        k = cv.waitKey(1)
        if k == 27:
            break
    def run(self):
        # Capture first frame to get size
        #self.capture.open()
        _, frame = self.capture.read()

        width = self.capture.get(3)
        height = self.capture.get(4)
        surface = width * height #Surface area of the image
        cursurface = 0 #Hold the current surface that have changed

        grey_image = np.zeros([int(height), int(width), 1], np.uint8)
        moving_average = np.zeros([int(height), int(width), 3], np.float32)
        difference = None

        while True:
            _, color_image = self.capture.read()
            color_copy = color_image.copy()
            color_image = cv.GaussianBlur(color_image, (21, 21), 0)

            if difference is None: #For the first time put values in difference, temp and moving_average
                difference = color_image.copy()
                temp = color_image.copy()
                cv.convertScaleAbs(color_image, moving_average, 1.0, 0.0)
            else:
                cv.accumulateWeighted(color_image, moving_average, 0.020, None) #Compute the average

            # Convert the scale of the moving average.
            cv.convertScaleAbs(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.absdiff(color_image, temp, difference)

            #Convert the image so that it can be thresholded
            cv.cvtColor(difference, cv.COLOR_RGB2GRAY, grey_image)
            cv.threshold(grey_image, 70, 255, cv.THRESH_BINARY, grey_image)
            kernel = np.ones((5, 5), np.uint8)


            cv.dilate(grey_image, kernel, 18) #to get object blobs
            # grey_image = cv.dilate(grey_image, kernel, 18)
            cv.erode(grey_image, kernel, 10)
            # grey_image = cv.erode(grey_image, kernel, 18)

            # Find contours

            contours = cv.findContours(grey_image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
            contours = contours[0] if imutils.is_cv2() else contours[1]
            backcontours = contours #Save contours

            for contour in contours: #For all contours compute the area and get center point
                cursurface += cv.contourArea(contour)
                M = cv.moments(contour)
                if M["m00"] != 0:
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                else:
                    cX, cY = 0, 0

                # draw contour and midpoint circle
                cv.drawContours (color_copy, [contour], -2, (0, 255, 0), 2)
                cv.circle(color_copy, (cX, cY), 3, (255, 0, 0), -1)

            cv.imshow("Target", color_copy)

            # Listen for ESC or ENTER key
            c = cv.waitKey(7) % 0x100
            if c == 27 or c == 10:
                break
def dark_or_light_objects_only(self, color='dark'):
    if self.params['circular_mask_x'] != 'none':
        if self.image_mask is None:
            self.image_mask = np.zeros_like(self.imgScaled)
            cv2.circle(self.image_mask, (self.params['circular_mask_x'],
                                         self.params['circular_mask_y']),
                       int(self.params['circular_mask_r']), [1, 1, 1], -1)
        self.imgScaled = self.image_mask * self.imgScaled

    # If there is no background image, grab one, and move on to the next frame
    if self.backgroundImage is None:
        reset_background(self)
        return
    if self.reset_background_flag:
        reset_background(self)
        self.reset_background_flag = False
        return
    if self.add_image_to_background_flag:
        add_image_to_background(self, color)
        self.add_image_to_background_flag = False
        return

    if self.params['backgroundupdate'] != 0:
        cv2.accumulateWeighted(
            np.float32(self.imgScaled), self.backgroundImage,
            self.params['backgroundupdate']
        )  # this needs to be here, otherwise there's an accumulation of something in the background
    if self.params['medianbgupdateinterval'] != 0:
        t = rospy.Time.now().secs
        if not self.__dict__.has_key('medianbgimages'):
            self.medianbgimages = [self.imgScaled]
            self.medianbgimages_times = [t]
        if t - self.medianbgimages_times[-1] > self.params[
                'medianbgupdateinterval']:
            self.medianbgimages.append(self.imgScaled)
            self.medianbgimages_times.append(t)
        if len(self.medianbgimages) > 3:
            self.backgroundImage = copy.copy(
                np.float32(np.median(self.medianbgimages, axis=0)))
            self.medianbgimages.pop(0)
            self.medianbgimages_times.pop(0)
            print 'reset background with median image'

    try:
        kernel = self.kernel
    except:
        kern_d = self.params['morph_open_kernel_size']
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kern_d, kern_d))
        self.kernel = kernel

    if color == 'dark':
        self.threshed = cv2.compare(np.float32(
            self.imgScaled), self.backgroundImage - self.params['threshold'],
                                    cv2.CMP_LT)  # CMP_LT is less than
    elif color == 'light':
        self.threshed = cv2.compare(np.float32(
            self.imgScaled), self.backgroundImage + self.params['threshold'],
                                    cv2.CMP_GT)  # CMP_GT is greater than
    elif color == 'darkorlight':
        #absdiff = cv2.absdiff(np.float32(self.imgScaled), self.backgroundImage)
        #retval, self.threshed = cv2.threshold(absdiff, self.params['threshold'], 255, 0)
        #self.threshed = np.uint8(self.threshed)
        dark = cv2.compare(np.float32(self.imgScaled),
                           self.backgroundImage - self.params['threshold'],
                           cv2.CMP_LT)  # CMP_LT is less than
        light = cv2.compare(np.float32(self.imgScaled),
                            self.backgroundImage + self.params['threshold'],
                            cv2.CMP_GT)  # CMP_GT is greater than
        self.threshed = dark + light

    convert_to_gray_if_necessary(self)

    # noise removal
    self.threshed = cv2.morphologyEx(self.threshed,
                                     cv2.MORPH_OPEN,
                                     kernel,
                                     iterations=1)

    # sure background area
    #sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    #dist_transform = cv2.distanceTransform(opening,cv.CV_DIST_L2,3)
    #dist_transform = dist_transform / (np.max(dist_transform)) * 255
    #ret, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0)

    # Finding unknown region
    #sure_fg = np.uint8(sure_fg)

    #self.threshed = sure_fg
    erode_and_dialate(self)

    # publish the processed image
    c = cv2.cvtColor(np.uint8(self.threshed), cv2.COLOR_GRAY2BGR)
    # commented for now, because publishing unthresholded difference

    if OPENCV_VERSION == 2:  # cv bridge not compatible with open cv 3, at least at this time
        img = self.cvbridge.cv2_to_imgmsg(
            c, 'bgr8')  # might need to change to bgr for color cameras
        self.pubProcessedImage.publish(img)

    extract_and_publish_contours(self)
Example #43
0
def loop(args, camera, hat, statsd):
    avg = None
    raw_capture = PiRGBArray(camera, size=args.resolution)
    log.info("Starting capture")
    s3 = None
    if args.enable_s3:
        s3 = boto3.client('s3')

    for f in camera.capture_continuous(raw_capture,
                                       format="bgr",
                                       use_video_port=True):
        frame = f.array

        # resize, grayscale & blur out noise
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the average frame is None, initialize it
        if avg is None:
            log.info("Initialising average frame")
            avg = gray.copy().astype("float")
            raw_capture.truncate(0)
            continue

        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, avg, 0.5)
        frame_delta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frame_delta, args.delta_threshold, 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        (contours, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
        motion = False
        for c in contours:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < args.min_area:
                continue

            motion = True
            log.info("Motion detected")

            # draw the text and timestamp on the frame
            if args.enable_annotate:
                frame = annotate_frame(frame, c)

            if args.enable_sensehat:
                hat.show_letter('X', back_colour=[255, 20, 50])

            if args.enable_statsd:
                statsd.increment('camera.motion_detected')

        if motion:
            img_name = datetime.datetime.utcnow().strftime(
                '%Y-%m-%d_%H_%M_%S.%f') + '.jpg'
            img_path = '{}/{}'.format(args.image_path, img_name)
            cv2.imwrite(img_path, frame)

            # todo enqueue upload so it doesn't block main loop
            if args.enable_s3:
                with open(img_path, 'rb') as data:
                    log.debug('Uploading to s3://{}/{}{}...'.format(
                        args.s3_bucket, args.s3_prefix, img_name))
                    s3.upload_fileobj(data, args.s3_bucket,
                                      args.s3_prefix + img_name)
                    log.debug('Uploaded')

        raw_capture.truncate(0)
        motion = False
        if args.enable_sensehat:
            hat.clear()
Example #44
0
#input camera taken as default webcam
camera = cv2.VideoCapture(0)
_, frame = camera.read()
fcount = 0
count_defects = 0
avg2 = np.float32(frame)
while True:
    # grabing the current frame
    (grabbed, frame) = camera.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (11, 11), 0)
    temframe = frame
    check = 0
    if (fcount < 500):
        cv2.accumulateWeighted(temframe, avg2, 0.005)
        backmask = cv2.convertScaleAbs(avg2)
        grayback = cv2.cvtColor(backmask, cv2.COLOR_BGR2GRAY)
        grayback = cv2.GaussianBlur(grayback, (7, 7), 0)

        grayback = cv2.threshold(grayback, 125, 255, cv2.THRESH_BINARY)[1]
        grayback = cv2.bitwise_not(grayback)
    gray = cv2.threshold(gray, 125, 255, cv2.THRESH_BINARY)[1]
    gray = cv2.bitwise_not(gray)

    backsub = cv2.bitwise_xor(gray, grayback)

    backsub = cv2.dilate(backsub, None, iterations=2)

    cv2.imshow("mask", backsub)
    temframe = cv2.bitwise_and(temframe, temframe, mask=backsub)
Example #45
0
def main():
    global mouse_info
    display = Display({
        'original': {},
        'gray': {
            'show': False
        },
        'background': {},
        'processed': {},
        'mask': {
            'callback': handle_click
        },
        'histogram': {},
    })

    path = 'videos/2017_11_29_hot_hand_mike/pinball_field_video.avi'
    video = pinball_types.PinballVideo(common.get_all_frames_from_video(path),
                                       all_keypoints=None)

    while True:
        for frame in video.frames:  #[33:48+1]:
            print(frame.ix)
            display.Clear()
            display.Add('original', frame.img)
            display.Add('gray', frame.img_gray)

            background = np.zeros_like(frame.img, dtype=np.float32)
            alpha = 0.9
            for ix in range(max(frame.ix - 5, 0), max(frame.ix - 1, 0)):
                cv2.accumulateWeighted(video.frames[ix].img, background, alpha)
                print(np.min(background), np.max(background))
            background = cv2.convertScaleAbs(background)
            print(np.min(background), np.max(background))
            display.Add('background', background)

            processed = cv2.absdiff(frame.img, background)
            print(np.min(processed), np.max(processed),
                  np.sum(frame.img - background))
            display.Add('processed', processed)

            mask = np.uint8(255) * np.any(processed > 25, axis=2)
            print(mask.dtype, np.sum(mask))
            display.Add('mask', mask)

            while True:
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    cv2.destroyAllWindows()
                    return
                elif key == ord('n'):
                    break
                elif mouse_info:
                    window, [start_x, start_y], [end_x, end_y] = mouse_info
                    print("Draw a histogram for the box! ", mouse_info)
                    mouse_info = None

                    # Apply the mask to the original image.
                    masked = cv2.bitwise_and(frame.img, frame.img, mask=mask)
                    # Clip down to just the ROI.
                    masked = masked[start_x:end_x, start_y:end_y, :]

                    plt.figure()
                    plt.title("'Flattened' Color Histogram")
                    plt.xlabel("Bins")
                    plt.ylabel("# of Pixels")
                    features = []
                    for channel, color in zip(range(3), "bgr"):
                        # Take a histogram of the ROI of the masked image.
                        hist = cv2.calcHist([masked[:, :, channel]], [0], None,
                                            [256], [0, 255])
                        # Plot it.
                        print("Plotting histogram for channel ", channel)
                        plt.plot(hist, color=color)
                        plt.xlim([0, 255])
                    plt.plot()
                    plt.show()

    cv2.destroyAllWindows()
Example #46
0
        gray_X = cv2.cvtColor(frame_X, cv2.COLOR_BGR2GRAY)
        gray_Y = cv2.cvtColor(frame_Y, cv2.COLOR_BGR2GRAY)

        gray_X = cv2.GaussianBlur(gray_X, (7, 7), 0)
        gray_Y = cv2.GaussianBlur(gray_Y, (7, 7), 0)

        #----------------------------------------------------------------------------------

        if framesNumber_X < 30:
            #running_average(gray_X, weight_X)

            if backGround_X is None:
                backGround_X = gray_X.copy().astype("float")
            else:
                cv2.accumulateWeighted(gray_X, backGround_X, weight_X)

        else:
            diff_X = cv2.absdiff(backGround_X.astype("uint8"), gray_X)
            thresholded_X = cv2.threshold(diff_X, threshold_x, 255,
                                          cv2.THRESH_BINARY)[1]

            cv2.imshow("Thesholded_X", thresholded_X)
        # increment the number of frames
        framesNumber_X += 1

        #----------------------------------------------------------------------------------

        if framesNumber_Y < 30:
            #running_average(gray_Y, weight_Y)
Example #47
0
    def __display_image(self, lepton_frame):

        clip_lower_bound = self.clipLower.value()
        clip_upper_bound = self.clipUpper.value()
        if clip_upper_bound < clip_lower_bound:
            clip_upper_bound = clip_lower_bound
        clip_range = clip_upper_bound - clip_lower_bound

        blur_size = self.blurSize.value()
        blur_sigma = self.blurSigma.value()

        zero_cutoff = self.zeroCutoff.value()
        mask_cutoff = self.maskCutoff.value()

        detect_top = self.detectTop.value()
        detect_left = self.detectLeft.value()
        detect_bottom = self.detectBottom.value()
        detect_right = self.detectRight.value()

        detect_slice = (slice(detect_top, detect_bottom + 1), slice(detect_left, detect_right + 1))
        detect_rect = ((detect_left, detect_top), (detect_right, detect_bottom))

        ease_fn = EASING_FN[self.easeFn.value()]

        # half assed filter that seems to work....
        np.clip(lepton_frame, clip_lower_bound, clip_upper_bound, lepton_frame)
        lepton_frame -= clip_lower_bound
        cv2.GaussianBlur(lepton_frame, (blur_size, blur_size), blur_sigma)
        lepton_frame[ lepton_frame < zero_cutoff ] = 0
        #print('lepton_frame', lepton_frame.min(), lepton_frame.max(), np.sum(lepton_frame < 100), np.sum(lepton_frame < 250), np.sum(lepton_frame < 400))

        gray = np.float32(lepton_frame) / clip_range

        cv2.accumulateWeighted(gray, self.slow_image, 0.05)
        cv2.accumulateWeighted(gray, self.fast_image, 0.7)

        mask = np.fliplr(self.fast_image <= mask_cutoff)
        #print('mask', mask.sum())

        #eased = np.fliplr(EASING_FN[QtCore.QEasingCurve.Linear](1.0 - self.fast_image))
        eased = np.subtract(1.0, np.fliplr(self.fast_image), dtype=np.float32)
        #print('eased fi', eased.dtype, eased.min(), eased.max(), np.sum(np.isnan(eased)))
        eased_min = eased.min()
        eased_max = eased.max()
        eased_ptp = eased_max - eased_min
        if eased_ptp:
            eased -= eased_min
            eased *= (1.0 / eased_ptp)
            #print('eased s', eased.dtype, eased.min(), eased.max(), np.sum(np.isnan(eased)))
        eased = ease_fn(eased)
        eased *= 255
        #print('eased c', eased.dtype, eased.min(), eased.max(), np.sum(np.isnan(eased)))
        color = cv2.cvtColor(eased, cv2.COLOR_GRAY2RGB)
        #print('color', color.shape, color.min(), color.max())
        rgb_data = np.uint8(color)
        #print('rgb', rgb_data.shape, rgb_data.dtype, rgb_data.min(), rgb_data.max())

        rgb_data[:,:,G] = 0
        rgb_data[mask] = COLOR_WHITE

        delta = cv2.absdiff(self.slow_image[detect_slice], self.fast_image[detect_slice])
        thresh = np.sum(cv2.threshold(delta, 0.3, 1.0, cv2.THRESH_BINARY)[1])
        #print('thresh', np.sum(thresh), np.sum(delta), delta.min(), delta.max())
        if self.movement_count > 0 and thresh < 3:
            self.movement_count -= 1
            #print('dec movement', self.movement_count)
            if self.movement_count == 0 and self.movement:
                print('movement stopped')
                self.movement = False
        elif self.movement_count < 10 and thresh > 10:
            self.movement_count += 1
            #print('inc movement', self.movement_count)
            if self.movement_count == 10 and not self.movement:
                print('movement started')
                self.movement = True

        self.__update_thread.set_lepton_frame(rgb_data, self.movement)

        image_data = rgb_data.copy()
        cv2.rectangle(image_data, detect_rect[0], detect_rect[1], (0, 255, 0), 1)

        bytesPerLine = 3 * Lepton.COLS
        image = QtGui.QImage(image_data.data, Lepton.COLS, Lepton.ROWS, bytesPerLine, QtGui.QImage.Format_RGB888)
        pixmap = QtGui.QPixmap.fromImage(image)
        pixmap = pixmap.scaled(self.imageLabel.width(), self.imageLabel.height(), QtCore.Qt.KeepAspectRatio)
        self.imageLabel.setPixmap(pixmap)
Example #48
0
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument(
        '-c',
        '--conf',
        type=str,
        default="conf.json",
        help=
        'Path to the JSON configuration file, default is conf.json in the program folder'
    )
    arg = vars(ap.parse_args())
    try:
        conf = json.load(open(arg['conf']))
    except FileNotFoundError as error:
        print(error)
        return
    server_port = conf['upnp_port']
    device_uuid = uuid.uuid4()
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.connect(('239.255.255.250', 1900))
    local_ip = sock.getsockname()[0]
    sock.close()

    http_server = UPNPHTTPServer(
        server_port,
        uuid=device_uuid,
        presentation_url='http://{}/'.format(local_ip),
        server_PSK=conf['server_PSK'])
    http_server.start()

    ssdp = SSDPServer(device_uuid, local_ip, server_port)
    ssdp.start()

    stream = VideoStream(conf['source'], conf['resolution'],
                         conf['fps']).start()
    time.sleep(conf['camera_warmup_time'])

    background = None
    motion_counter = 0
    kernel = np.ones((3, 3), np.uint8)
    prev_status = "No motion"

    while True:

        frame = stream.read()
        timestamp = datetime.datetime.now()
        status = "No motion"
        motion = False

        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if background is None:
            background = np.float32(gray)
            continue

        cv2.accumulateWeighted(gray, background, 0.5)
        frame_delta = cv2.absdiff(gray, cv2.convertScaleAbs(background))
        thresh = cv2.threshold(frame_delta, conf['delta_thresh'], 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, kernel, iterations=2)
        contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if imutils.is_cv2() else contours[1]

        for c in contours:
            if cv2.contourArea(c) < conf['min_area']:
                continue

            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            motion = True

        ts = timestamp.strftime('%A %d %B %Y %I:%M:%S%p')
        cv2.putText(frame, '{} detected'.format(status), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        if motion:
            motion_counter += 1

            if motion_counter >= conf['min_motion_frames']:
                status = "Motion"
        else:
            motion_counter = 0

        if status != prev_status:
            http_server.server.update_status(status)
            prev_status = status
Example #49
0
    global cap
    cap = cv2.VideoCapture(videoinput)
    ret, frame, frame_gray = get_frame()

    ### images
    average = np.float32(frame_gray)
    path_average = np.zeros(frame_gray.shape, np.float32)
    white = np.ones(frame_gray.shape, np.uint8) * 255

    frame_nr = 1
    t_last = time.time()
    current = VIS_PATH
    while ret:
        t0 = time.time()
        ### update running average
        cv2.accumulateWeighted(frame_gray, average, alpha)
        diff = cv2.absdiff(frame_gray, cv2.convertScaleAbs(average))
        ret, diff = cv2.threshold(diff, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        if ret <= 5:
            diff = np.zeros(diff.shape, dtype=diff.dtype)

        if frame_nr > 20:
            cv2.accumulateWeighted(diff, path_average, 0.1)
            path = cv2.convertScaleAbs(path_average)
            path = cv2.subtract(white, path)

            #mat = cv2.getRotationMatrix2D((path.shape[1]/2, path.shape[0]/2), r.value(), 1.0)
            #path = cv2.warpAffine(path, mat, (path.shape[1], path.shape[0]))

            #cv2.imshow("frame", frame_gray)
Example #50
0
    def capture_thread(self, IPinver):
        ap = argparse.ArgumentParser()  #OpenCV initialization
        ap.add_argument("-b",
                        "--buffer",
                        type=int,
                        default=64,
                        help="max buffer size")
        args = vars(ap.parse_args())
        pts = deque(maxlen=args["buffer"])

        font = cv2.FONT_HERSHEY_SIMPLEX

        camera = picamera.PiCamera()
        camera.resolution = (640, 480)
        camera.framerate = 20
        rawCapture = PiRGBArray(camera, size=(640, 480))

        context = zmq.Context()
        footage_socket = context.socket(zmq.PUB)
        print(IPinver)
        footage_socket.connect('tcp://%s:5555' % IPinver)

        avg = None
        motionCounter = 0
        #time.sleep(4)
        lastMovtionCaptured = datetime.datetime.now()

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            frame_image = frame.array
            cv2.line(frame_image, (300, 240), (340, 240), (128, 255, 128), 1)
            cv2.line(frame_image, (320, 220), (320, 260), (128, 255, 128), 1)
            timestamp = datetime.datetime.now()

            if FindColorMode:
                print(FindColorMode)
                ####>>>OpenCV Start<<<####
                hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, self.colorLower, self.colorUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    cv2.putText(frame_image, 'Target Detected', (40, 60), font,
                                0.5, (255, 255, 255), 1, cv2.LINE_AA)
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))
                    X = int(x)
                    Y = int(y)
                    if radius > 10:
                        cv2.rectangle(frame_image,
                                      (int(x - radius), int(y + radius)),
                                      (int(x + radius), int(y - radius)),
                                      (255, 255, 255), 1)

                    if Y < (240 - tor):
                        error = (240 - Y) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        SpiderG.up(outv)
                        Y_lock = 0
                    elif Y > (240 + tor):
                        error = (Y - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        SpiderG.down(outv)
                        Y_lock = 0
                    else:
                        Y_lock = 1

                    if X < (320 - tor * 3):
                        error = (320 - X) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        SpiderG.lookleft(outv)
                        #move.move(70, 'no', 'left', 0.6)
                        X_lock = 0
                    elif X > (330 + tor * 3):
                        error = (X - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        SpiderG.lookright(outv)
                        #move.move(70, 'no', 'right', 0.6)
                        X_lock = 0
                    else:
                        #move.motorStop()
                        X_lock = 1

                    if X_lock == 1 and Y_lock == 1:
                        switch.switch(1, 1)
                        switch.switch(2, 1)
                        switch.switch(3, 1)
                    else:
                        switch.switch(1, 0)
                        switch.switch(2, 0)
                        switch.switch(3, 0)

                        # if UltraData > 0.5:
                        #     move.move(70, 'forward', 'no', 0.6)
                        # elif UltraData < 0.4:
                        #     move.move(70, 'backward', 'no', 0.6)
                        #     print(UltraData)
                        # else:
                        #     move.motorStop()

                else:
                    cv2.putText(frame_image, 'Target Detecting', (40, 60),
                                font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

                ####>>>OpenCV Ends<<<####

            if WatchDogMode:
                gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)

                if avg is None:
                    print("[INFO] starting background model...")
                    avg = gray.copy().astype("float")
                    rawCapture.truncate(0)
                    continue

                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                # threshold the delta image, dilate the thresholded image to fill
                # in holes, then find contours on thresholded image
                thresh = cv2.threshold(frameDelta, 5, 255,
                                       cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                # print('x')
                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < 5000:
                        continue

                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame_image, (x, y), (x + w, y + h),
                                  (128, 255, 0), 1)
                    text = "Occupied"
                    motionCounter += 1
                    #print(motionCounter)
                    #print(text)
                    LED.colorWipe(255, 16, 0)
                    lastMovtionCaptured = timestamp
                    switch.switch(1, 1)
                    switch.switch(2, 1)
                    switch.switch(3, 1)

                if (timestamp - lastMovtionCaptured).seconds >= 0.5:
                    LED.colorWipe(255, 255, 0)
                    switch.switch(1, 0)
                    switch.switch(2, 0)
                    switch.switch(3, 0)

            encoded, buffer = cv2.imencode('.jpg', frame_image)
            jpg_as_text = base64.b64encode(buffer)
            footage_socket.send(jpg_as_text)

            rawCapture.truncate(0)
Example #51
0
    def main_loop(self):
        self._set_up_masks()
        rate_of_influence = 0.1  ## todo rate_of_influence = 0.01
        FRAME_CROPPED = False

        while True:

            grabbed, img = self.video_source.read()
            if not grabbed:
                break
            #--------------
            frame_id = int(self.video_source.get(1))  #get current frame index
            img = cv2.resize(img, (self._vid_width, self._vid_height))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            if FRAME_CROPPED:
                working_img = img[x:x + w, y:y + h]
            else:
                working_img = img.copy()
            if self.black_mask is not None:
                working_img = cv2.bitwise_and(working_img, self.black_mask)

            cv2.accumulateWeighted(working_img, self.raw_avg,
                                   rate_of_influence)
            background_avg = cv2.convertScaleAbs(
                self.raw_avg)  #reference background average image
            subtracted_img = cv2.absdiff(background_avg, working_img)

            ##-------Adding extra blur------
            subtracted_img = cv2.GaussianBlur(subtracted_img, (21, 21), 0)
            subtracted_img = cv2.GaussianBlur(subtracted_img, (21, 21), 0)
            subtracted_img = cv2.GaussianBlur(subtracted_img, (21, 21), 0)
            subtracted_img = cv2.GaussianBlur(subtracted_img, (21, 21), 0)

            ##-------Applying threshold
            _, threshold_img = cv2.threshold(subtracted_img, 30, 255, 0)

            #todo try no noise reduction
            ##-------Noise Reduction
            dilated_img = cv2.dilate(threshold_img, None)
            dilated_img = cv2.dilate(dilated_img, None)
            dilated_img = cv2.dilate(dilated_img, None)
            dilated_img = cv2.dilate(dilated_img, None)
            dilated_img = cv2.dilate(dilated_img, None)

            ##-------Drawing bounding boxes and counting
            img = cv2.cvtColor(
                img, cv2.COLOR_GRAY2BGR
            )  #Giving frame 3 channels for color (for drawing colored boxes)

            self.current_car_amount = 0
            self.bind_objects(img, dilated_img)

            ##-------Termination Conditions
            k = cv2.waitKey(25) & 0xFF
            if k == 27 or k == ord('q') or k == ord('Q'):
                break
            elif k == ord('s') or k == ord(
                    'S'
            ):  #if the letter s/S is pressed, a screenshot of the current frame on each window will be saved to the current folder
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_masked_frame.jpeg"), working_img)
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_background_subtracted.jpeg"),
                    subtracted_img)
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_threshold_applied.jpeg"),
                    dilated_img)
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_background_average.jpeg"),
                    background_avg)
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_car_counting.jpeg"), img)
                cv2.imwrite(
                    os.path.join(self.screenshot_folder,
                                 f"{frame_id}_collage.jpeg"),
                    self.collage_frame)
            if k == ord(' '):  #if spacebar is pressed
                paused_key = cv2.waitKey(
                    0) & 0xFF  #program is paused for a while
                if paused_key == ord(
                        ' '):  #pressing space again unpauses the program
                    pass

            if self.video_out:
                self.out_bg_subtracted.write(subtracted_img)
                self.out_threshold.write(dilated_img)
                self.out_bg_average.write(background_avg)
                self.out_bounding_boxes.write(img)
                self.out_collage.write(self.collage_frame)

        self.video_source.release()
        if self.video_out:
            self._release_video_writers()
        cv2.destroyAllWindows()
Example #52
0
        if (img.shape[0] < ref_img.shape[0]) and (img.shape[1] <
                                                  ref_img.shape[1]):
            cropped = ref_img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
            ref_img = cropped
        if (img.shape[0] < green_img.shape[0]) and (img.shape[1] <
                                                    green_img.shape[1]):
            cropped = green_img[refPt[0][1]:refPt[1][1],
                                refPt[0][0]:refPt[1][0]]
            green_img = cropped
        if (img.shape[0] < avg1.shape[0]) and (img.shape[1] < avg1.shape[1]):
            avg1 = np.float32(ref_img)

    result = img

    if (use_time_avg):
        cv2.accumulateWeighted(img, avg1, 0.09)
        img = cv2.convertScaleAbs(avg1)

    if (use_diff):
        fgmask = find_fgmask(img,
                             ref_img,
                             thresh=thresh,
                             use_denoise=use_denoise,
                             h=denoise_h)
        bgmask = cv2.bitwise_not(fgmask)

        if (use_edges):
            bgmask = cv2.bitwise_or(bgmask, bg_edges)
            fgmask = cv2.bitwise_not(bgmask)

        fgimg = cv2.bitwise_and(img, img, mask=fgmask)
Example #53
0
if __name__ == "__main__":
    fileimg = None
    if len(sys.argv) >= 2:
        fileimg = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)
    else:
        # get the reference to the webcam
        camera = cv2.VideoCapture(0)
        _, frame = camera.read()
        frame = processFrame(frame)
        background = frame.copy().astype("float")
        for _ in range(30):
            _, frame = camera.read()
            frame = processFrame(frame)
            # compute weighted average, accumulate it and update the background
            cv2.accumulateWeighted(frame, background, 0.5)
    while True:
        if fileimg is not None:
            frame = fileimg
        else:
            ret, frame = camera.read()  # BGR
        # frame = cv2.flip(frame, 1)
        cv2.imshow("initial", frame)
        frame = processFrame(frame)

        result = detect_skin(frame)
        cv2.imshow("result", result)

        if fileimg is None:
            foreground = foreground_mask(background, frame)
            result = cv2.bitwise_and(result, result, mask=foreground)
    # if the first frame is None, initialize it
    if firstFrame is None:
        firstFrame = gray
        continue

    # if the average frame is None, initialize it
    if avg is None:
        avg = gray.copy().astype("float")
        continue

    # Object Detection/Edge Detection

    # accumulate the weighted average between the current frame and (accumulated)previous frames, then compute the difference between the current frame and running average
    cv2.accumulateWeighted(
        firstFrame, avg, 0.5
    )  # avg is accumulated slight differences in pixels over the iteration of frames, 0.5 is weight for gray frame
    frameDelta = cv2.absdiff(
        firstFrame,
        gray)  # element wise differences new array of differences in pixels

    thresh = cv2.threshold(
        frameDelta, 55, 255, cv2.THRESH_BINARY
    )[1]  # new array of thresheld (camera inaccuracies) differences in pixels returned as thresh

    # dilate the thresholded image to fill in holes, then find contours on thresholded image
    thresh = cv2.dilate(
        thresh, None, iterations=6
    )  # dilation fills in neighbouring pixels as 1 (for binary) to add pixels to the outer border (dilating pixels in the threshold array)
    cnts = cv2.findContours(
        thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
Example #55
0
    contours = contours[1] if imutils.is_cv3() else contours[0]

    for c in contours:
        # 忽略太小的區域
        if cv2.contourArea(c) < 2500:
            continue

        # 偵測到物體,可以自己加上處理的程式碼在這裡...

        # 計算等高線的外框範圍
        (x, y, w, h) = cv2.boundingRect(c)

        # 畫出外框
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # 畫出等高線(除錯用)
    cv2.drawContours(frame, contours, -1, (0, 255, 255), 2)

    # 顯示偵測結果影像
    cv2.imshow('frame', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    # 更新平均影像
    cv2.accumulateWeighted(blur, avg_float, 0.01)
    avg = cv2.convertScaleAbs(avg_float)

cap.release()
cv2.destroyAllWindows()
Example #56
0
import numpy as np

c = cv2.VideoCapture("../khadims.avi")
_, f = c.read()

avg1 = np.float32(f)
avg2 = np.float32(f)

p = 0
res2 = None
while (1):
    try:
        _, f = c.read()

        # cv2.accumulateWeighted(f,avg1,0.1)
        cv2.accumulateWeighted(f, avg2, 0.01)

        # res1 = cv2.convertScaleAbs(avg1)
        res2 = cv2.convertScaleAbs(avg2)

        # cv2.imshow('img',f)
        # cv2.imshow('avg1',res1)
        # cv2.imshow('avg2',res2)
        k = cv2.waitKey(20)

        print(p, end="\r")
        p += 1
        if k == 27:
            break
    except:
        break
Example #57
0
def main():
    """
    Main entry point for the detector and tracker.
    """
    subtractors = {
        'MOG': cv2.createBackgroundSubtractorMOG2,
        'GMG': cv2.bgsegm.createBackgroundSubtractorGMG
    }

    avg = None
    trackers = []
    id_num = 0
    frame_count = 0
    dropped = 0
    total_in = total_out = 0
    fgmask = thresh = dim = None
    timer = {
        'read': [],
        'detect': [],
        'track': [],
        'total': [],
        'resize': [],
        'apply': []
    }

    # Initialize the background subtractor (if being used)
    fgbg = subtractors.get(args.detect_method, None)
    if args.detect_method == 'GMG':
        fgbg = fgbg(initializationFrames=args.init_frames,
                    decisionThreshold=args.decision_thresh)
    elif args.detect_method == 'MOG':
        fgbg = fgbg(history=args.history,
                    varThreshold=args.var_thresh,
                    detectShadows=args.detect_shadows)
    else:
        fgbg = None

    vs = cv2.VideoCapture(args.input if args.input else 0)
    minute = datetime.now().minute

    while True:
        t_start = time.time()
        ti = time.time()
        ret, frame = vs.read()
        timer['read'].append(time.time() - ti)
        frame_count += 1

        if frame_count % 2 == 1:
            continue

        if frame is None:
            dropped += 1
            vs = cv2.VideoCapture(args.input if args.input else 0)
            ret, frame = vs.read()
            frame_count = 1
            id_num = 0
            trackers = []

        # compute dimensions for frame resize
        if dim is None:
            (h, w) = frame.shape[:2]
            r = args.width / float(w)
            dim = (args.width, int(h * r))

        # resize frame
        t1 = time.time()
        frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
        timer['resize'].append(time.time() - t1)

        # initialize frames for display
        if fgmask is None or thresh is None:
            if args.detect_method in subtractors:
                fgmask = fgbg.apply(frame, learningRate=args.learning_rate)
            else:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (17, 17), 0)
                avg = gray.copy().astype('float')
                cv2.accumulateWeighted(gray, avg, 0.1)
                fgmask = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
            thresh = cv2.threshold(fgmask, args.threshold, 255,
                                   cv2.THRESH_BINARY)[1]
            thresh = cv2.erode(thresh, None, iterations=args.erode)
            thresh = cv2.dilate(thresh,
                                np.ones((5, 5), np.uint8),
                                iterations=args.dilate)
            continue

        # extract foreground mask
        if args.detect_method in subtractors:
            t1 = time.time()
            fgmask = fgbg.apply(frame, learningRate=args.learning_rate)
            timer['apply'].append(time.time() - t1)

        if frame_count < args.skip_frames:
            continue

        time.sleep(args.delay)

        # detect people using background subtraction method
        if frame_count % args.frame_interval == 0:
            t1 = time.time()

            if args.detect_method == 'custom':
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (17, 17), 0)
                cv2.accumulateWeighted(gray, avg, 0.1)
                fgmask = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

            # remove shadows, erode noise, dilate disparate masked pixels regions
            thresh = cv2.threshold(fgmask, args.threshold, 255,
                                   cv2.THRESH_BINARY)[1]
            thresh = cv2.erode(thresh, None, iterations=args.erode)
            thresh = cv2.dilate(thresh,
                                np.ones((5, 5), np.uint8),
                                iterations=args.dilate)

            # extract contours from the segmented image
            (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

            # process detections to match/create/destroy trackers
            contours = []
            sim_matrix = []
            for i, c in enumerate(cnts):
                # ignore contours that fall below a minimum area
                if cv2.contourArea(c) < args.min_area:
                    continue

                (x, y, w, h) = cv2.boundingRect(c)
                contours.append(np.array([x, y, x + w, y + h], np.float32))
                logger.debug("contour {} bbox: {}".format(i, contours[-1]))
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5)
                sim_matrix.append([
                    bb_sim(contours[-1], t['box'], method=args.bb_sim_method)
                    for t in trackers
                ])
                logger.debug("IOUs with existing {} tracker(s): {}".format(
                    len(trackers), sim_matrix[-1]))

            # if there were any preexisting trackers...
            if any([x for x in sim_matrix]):
                # match the detections with existing trackers if possible; also
                # identify brand new detections and lost trackers
                matches, unmatched_detections, lost_trackers = detection_tracker_match(
                    sim_matrix, method=args.detection_tracker_match)

                # create new trackers for unmatched detections
                for d in unmatched_detections:
                    box = contours[d]
                    trackers.append({
                        'id':
                        id_num,
                        'box':
                        box.copy(),
                        'init':
                        box.copy(),
                        'width':
                        box[2] - box[0],
                        'height':
                        box[3] - box[1],
                        'centroids': [],
                        'kalman_filter':
                        kalman_filter(args.num_measurements),
                        'lost':
                        0
                    })
                    logger.debug(
                        "new tracker {} for unmatched detection".format(
                            trackers[-1]['id']))
                    trackers[-1]['kalman_filter'].predict()
                    centroid = np.array([
                        box[0] + 0.5 * (box[2] - box[0]), box[1] + 0.5 *
                        (box[3] - box[1])
                    ], np.float32)
                    trackers[-1]['centroids'].append(centroid)
                    if args.num_measurements == 4:
                        trackers[-1]['kalman_filter'].correct(box)
                    else:
                        trackers[-1]['kalman_filter'].correct(centroid)
                    id_num += 1

                # update trackers for matched detections
                for t in matches:
                    logger.debug(
                        "\ncorrecting bbox for tracker {} (detection {})".
                        format(t[1], t[0]))
                    tracker = trackers[t[1]]
                    box = contours[t[0]]
                    centroid = np.array([
                        box[0] + 0.5 * (box[2] - box[0]), box[1] + 0.5 *
                        (box[3] - box[1])
                    ], np.float32)
                    if args.num_measurements == 4:
                        tracker['kalman_filter'].correct(box - tracker['init'])
                    else:
                        init = tracker['init']
                        init_centroid = np.array([
                            init[0] + 0.5 * (init[2] - init[0]),
                            init[1] + 0.5 * (init[3] - init[1])
                        ], np.float32)
                        trackers[-1]['kalman_filter'].correct(centroid -
                                                              init_centroid)
                    tracker['centroids'].append(centroid)

                # identify the lost trackers that need to be deleted
                to_delete = set()
                new_trackers = []
                for t in lost_trackers:
                    tracker = trackers[t]
                    tracker['lost'] += 1

                    if tracker['lost'] > args.track_cutoff:
                        logger.debug("\tremoving lost tracker {}".format(
                            tracker['id']))
                        to_delete.add(tracker['id'])

                # reinitialize the list of trackers excluding the lost ones
                for t in trackers:
                    if t['id'] not in to_delete:
                        new_trackers.append(t)
                trackers = new_trackers
            else:
                # initialize trackers for first new detections
                for box in contours:
                    trackers.append({
                        'id':
                        id_num,
                        'box':
                        box.copy(),
                        'init':
                        box.copy(),
                        'width':
                        box[2] - box[0],
                        'height':
                        box[3] - box[1],
                        'centroids': [],
                        'kalman_filter':
                        kalman_filter(args.num_measurements),
                        'lost':
                        0
                    })
                    logger.debug("\tmade new tracker {}".format(
                        trackers[-1]['id']))
                    trackers[-1]['kalman_filter'].predict()
                    centroid = np.array([
                        box[0] + 0.5 * (box[2] - box[0]), box[1] + 0.5 *
                        (box[3] - box[1])
                    ], np.float32)
                    if args.num_measurements == 4:
                        trackers[-1]['kalman_filter'].correct(
                            box - trackers[-1]['init'])
                    else:
                        init = trackers[-1]['init']
                        init_centroid = np.array([
                            init[0] + 0.5 * (init[2] - init[0]),
                            init[1] + 0.5 * (init[3] - init[1])
                        ], np.float32)
                        trackers[-1]['kalman_filter'].correct(centroid -
                                                              init_centroid)
                    trackers[-1]['centroids'].append(centroid)
                    id_num += 1

                if not contours:
                    # identify the lost trackers that need to be deleted
                    to_delete = set()
                    new_trackers = []
                    for t in trackers:
                        t['lost'] += 1

                        if t['lost'] > args.track_cutoff:
                            logger.debug("\tremoving lost tracker {}".format(
                                t['id']))
                            to_delete.add(t['id'])
                        else:
                            new_trackers.append(t)
                    trackers = new_trackers
            timer['detect'].append(time.time() - t1)
        else:
            t1 = time.time()
            for t in trackers:
                pred = t['kalman_filter'].predict()
                if args.num_measurements == 4:
                    pred[:4] += t['init'].reshape(-1, 1)
                    x, y, x2, y2 = pred[:4]
                    x1, y1 = x, y
                else:
                    init_centroid = np.array([
                        t['init'][0] + 0.5 * (t['init'][2] - t['init'][0]),
                        t['init'][1] + 0.5 * (t['init'][3] - t['init'][1])
                    ], np.float32)
                    pred[:2] += init_centroid.reshape(-1, 1)
                    x, y = pred[:2]
                    x1 = x - 0.5 * t['width']
                    y1 = y - 0.5 * t['height']
                    x2 = x + 0.5 * t['width']
                    y2 = y + 0.5 * t['height']
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                logger.debug("\tprediction for tracker {} is {}".format(
                    t['id'], [x.item() for x in [x1, y1, x2, y2]]))
                cv2.putText(frame, str(t['id']), (x, y - 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
            timer['track'].append(time.time() - t1)

        for t in trackers:
            if 'counted' not in t:
                if args.direction == 'vertical':
                    hist = [c[1] for c in t['centroids']]
                else:
                    hist = [c[0] for c in t['centroids']]
                centroid = t['centroids'][-1]
                direction = (centroid[1] if args.direction == 'vertical' \
                        else centroid[0]) - np.mean(hist)

                if direction < 0 and \
                        (centroid[1] if args.direction == 'vertical' \
                                else centroid[0]) <  \
                        (dim[1] if args.direction == 'vertical' \
                                else dim[0]) // 2:
                    total_in += 1
                    t['counted'] = 1

                elif direction > 0 and \
                        (centroid[1] if args.direction == 'vertical' \
                                else centroid[0]) >  \
                        (dim[1] if args.direction == 'vertical' \
                                else dim[0]) // 2:
                    total_out += 1
                    t['counted'] = 1
        cv2.putText(frame, "Headcount: {}".format(len(trackers)), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame, "In: {}".format(total_in), (10, dim[1] - 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame, "Out: {}".format(total_out), (10, dim[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        if args.direction == 'vertical':
            cv2.line(frame, (0, dim[1] // 2), (dim[0], dim[1] // 2),
                     (0, 255, 255), 2)
        else:
            cv2.line(frame, (dim[0] // 2, 0), (dim[0] // 2, dim[1]),
                     (0, 255, 255), 2)

        if args.display_frames:
            cv2.namedWindow("Foreground Mask")
            cv2.moveWindow("Foreground Mask", 0, 10)
            cv2.imshow("Foreground Mask", fgmask)

            cv2.namedWindow("Camera Feed")
            cv2.moveWindow("Camera Feed", 600, 450)
            cv2.imshow("Camera Feed", frame)

            cv2.namedWindow("Thresholded/Erode/Dilate")
            cv2.moveWindow("Thresholded/Erode/Dilate", 0, 450)
            cv2.imshow("Thresholded/Erode/Dilate", thresh)

        if frame_count % args.log_interval == 0:
            logger.info("read time (ms): {}".format(
                np.mean(timer['read']) * 1000))
            logger.info("detection time (ms): {}".format(
                np.mean(timer['detect']) * 1000))
            logger.info("tracking time (ms): {}".format(
                np.mean(timer['track']) * 1000))
            logger.info("resize time (ms): {}".format(
                np.mean(timer['resize']) * 1000))
            logger.info("apply time (ms): {}".format(
                np.mean(timer['apply']) * 1000))
            logger.info("total time (ms): {}".format(
                np.mean(timer['total']) * 1000))

        if frame_count % args.timer_reset_interval == 0:
            for k, v in timer.items():
                timer[k] = v[1000:]

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if args.pdb:
            pdb.set_trace()

        now = datetime.now()
        if now.minute != minute:
            log_time = datetime(year=now.year,
                                month=now.month,
                                day=now.day,
                                hour=now.hour,
                                minute=minute).strftime('%Y-%m-%d %H:%M')
            minute = now.minute
            logger.info("{} - IN: {} OUT: {} frames: {} dropped: {}".format(
                log_time, total_in, total_out, frame_count, dropped))
            total_in = total_out = 0
        timer['total'].append(time.time() - t_start)

    vs.stop() if not args.input else vs.release()
    cv2.destroyAllWindows()
Example #58
0
def running_average(img, weight):
    global backGround
    if backGround is None:
        backGround = img.copy().astype("float")
        return
    cv2.accumulateWeighted(img, backGround, weight)
Example #59
0
    def run(self):
        count = 0
        # This method runs in a separate thread
        while not self.terminated:
            time.sleep(conf["camera_wait_time"])
            # Wait for an image to be written to the stream
            if self.bufwriteevent.wait():
                try:
                    while True:
                        self.stream.seek(0)
                        frame = np.asarray(Image.open(self.stream))
                        framebu = frame.copy()
                        timestamp = datetime.datetime.now()
                        text = "Unoccupied"
                        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                        graybu = gray.copy()
                        gray = cv2.GaussianBlur(gray, (21, 21), 0)
                        # if the average frame is None, initialize it
                        if self.avg is None:
                            print("[INFO] starting background model...")
                            self.avg = gray.copy().astype("float")
                            break
                        # accumulate the weighted average between the current frame and
                        # previous frames, then compute the difference between the current
                        # frame and running average
                        cv2.accumulateWeighted(gray, self.avg, 0.5)
                        frameDelta = cv2.absdiff(gray,
                                                 cv2.convertScaleAbs(self.avg))
                        # threshold the delta image, dilate the thresholded image to fill
                        # in holes, then find contours on thresholded image
                        thresh = cv2.threshold(frameDelta,
                                               conf["delta_thresh"], 255,
                                               cv2.THRESH_BINARY)[1]
                        thresh = cv2.dilate(thresh, None, iterations=2)
                        cnts = cv2.findContours(thresh.copy(),
                                                cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
                        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                        # loop over the contours
                        for c in cnts:
                            # if the contour is too small, ignore it
                            if cv2.contourArea(c) < conf["min_area"]:
                                continue
                            # compute the bounding box for the contour, draw it on the frame
                            # and update the text
                            (x, y, w, h) = cv2.boundingRect(c)
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            text = "Occupied"
                        # draw the text and timestamp on the frame
                        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
                        cv2.putText(frame, "Room Status: {}".format(text),
                                    (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (0, 0, 255), 2)

                        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                                    (0, 0, 255), 1)
                        if text == "Occupied":
                            cv2.imwrite(
                                dir_path + '/move/move_' + str(count) + '.jpg',
                                frame)
                            count += 1
                            if count >= MAX_RECS:
                                count = 0
                            if not self.owner.faceDetection.bufwriteevent.is_set(
                            ):
                                self.owner.faceDetection.frame = framebu
                                self.owner.faceDetection.gray = graybu
                                self.owner.faceDetection.bufwriteevent.set()
                        break
                except Exception as e:
                    exc_type, exc_obj, exc_tb = sys.exc_info()
                    fname = os.path.split(
                        exc_tb.tb_frame.f_code.co_filename)[1]
                    print(exc_type, fname, exc_tb.tb_lineno)
                    logging.warning('LastMovementDetected error: %s', str(e))
                finally:
                    # Reset the stream and bufwriteevent
                    self.stream.seek(0)
                    self.stream.truncate()
                    self.bufwriteevent.clear()
Example #60
0
    def start_cam(self, show=False, save=None):
        print("start cam")
        # if the attributes are not specified, extract the values from the config.json
        if show == None:
            show = self.conf["save"]

        if save == None:
            save = self.conf["save"]

        avg = None
        motionCounter = 0
        lastUploaded = dt.datetime(2020, 1, 1)

        # capture frames from the camera
        self.camera = PiCamera()
        self.camera.start_preview()
        self.camera.resolution = (640, 480)
        self.rawCapture = PiRGBArray(self.camera, size=(640, 480))
        time.sleep(self.conf["camera_warmup_time"])

        for f in self.camera.capture_continuous(self.rawCapture,
                                                format="bgr",
                                                use_video_port=True):

            frame = f.array
            now = dt.datetime.now()
            self.text = "No movement"

            # prep the frame
            #frame = imutils.resize(frame, width=250)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (21, 21), 0)

            # initialize average if is None
            if avg is None:
                avg = gray.copy().astype("float")
                self.rawCapture.truncate(0)
                # restart the loop when being initiated
                continue

            cv2.accumulateWeighted(gray, avg, 0.5)
            frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

            # threshold the delta image, dilate the thresholded image to fill
            # in holes, then find contours on thresholded image
            thresh = cv2.threshold(frameDelta, self.conf["delta_thresh"], 255,
                                   cv2.THRESH_BINARY)[1]
            thresh = cv2.dilate(thresh, None, iterations=2)
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            # define whether there is change
            # loop over the contours
            for c in cnts:

                # if the contour is too small, ignore it
                if cv2.contourArea(c) > self.conf["min_area"]:
                    # change the indicator
                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    self.text = "Movement detected"

            # draw the text and timestamp on the frame
            ts = now.strftime("%A %d %B %Y %H:%M:%S%p")
            cv2.putText(frame, "Room Status: {}".format(self.text), (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

            if show:
                # display the security feed
                cv2.imshow("Security Feed", frame)
                key = cv2.waitKey(1) & 0xFF

            if save:
                if self.text == "Movement detected":
                    if (now - lastUploaded
                        ).seconds >= self.conf["min_upload_seconds"]:
                        motionCounter += 1
                        if motionCounter >= self.conf["min_motion_frames"]:
                            name = "{}-{:02d}-{:02d}_{:02d}h{:02d}m{:02d}s".format(
                                now.year, now.month, now.day, now.hour,
                                now.minute, now.second)
                            cv2.imwrite(self.directory + name + ".jpg", frame)
                            lastUploaded = now
                            motionCounter = 0

            self.rawCapture.truncate(0)