def segment_motion(self): min_area = 100 size = cv.GetSize(self.mask) # get current frame size temp = cv.CloneImage(self.mask) cv.CalcMotionGradient(self.mhi, temp, self.orient, self.MAX_TIME_DELTA, self.MIN_TIME_DELTA, 3) #cv.ShowImage("orient", self.orient); if not self.storage: self.storage = cv.CreateMemStorage(0) seq = cv.SegmentMotion(self.mhi, self.segmask, self.storage, self.timestamp, self.MAX_TIME_DELTA) #cv.ShowImage("segmask", self.segmask) max = 0 max_idx = -1 for i in range(len(seq)): (area, value, comp_rect) = seq[i] if area > max: max = area max_idx = i if max_idx == -1: cv.Zero(self.mask) return (area, value, comp_rect) = seq[max_idx] if (area < 100): cv.Zero(self.mask) return cv.Zero(self.mask) cv.Rectangle( self.mask, (comp_rect[0], comp_rect[1]), (comp_rect[0] + comp_rect[2], comp_rect[1] + comp_rect[3]), (255, 255, 255), cv.CV_FILLED)
def update_mhi(img, dst, diff_threshold): global last global mhi global storage global mask global orient global segmask timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds size = cv.GetSize(img) # get current frame size idx1 = last if not mhi or cv.GetSize(mhi) != size: for i in range(N): buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) cv.Zero(buf[i]) mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) cv.Zero(mhi) # clear MHI at the beginning orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1) cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale idx2 = (last + 1) % N # index of (last - (N-1))th frame last = idx2 silh = buf[idx2] cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI cv.CvtScale(mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION) cv.Zero(dst) cv.Merge(mask, None, None, None, dst) cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3) if not storage: storage = cv.CreateMemStorage(0) seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA) for (area, value, comp_rect) in seq: if comp_rect[2] + comp_rect[3] > 100: # reject very small components color = cv.CV_RGB(255, 0,0) silh_roi = cv.GetSubRect(silh, comp_rect) mhi_roi = cv.GetSubRect(mhi, comp_rect) orient_roi = cv.GetSubRect(orient, comp_rect) mask_roi = cv.GetSubRect(mask, comp_rect) angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION) count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI if count < (comp_rect[2] * comp_rect[3] * 0.05): continue magnitude = 30. center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2)) cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0) cv.Line(dst, center, (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)), cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))), color, 3, cv.CV_AA, 0)
def process_motion(self,img): center = (-1, -1) # a lot of stuff from this section was taken from the code motempl.py, # openCV's python sample code timestamp = time.clock() / self.clocks_per_sec # get current time in seconds idx1 = self.last cv.CvtColor(img, self.buf[self.last], cv.CV_BGR2GRAY) # convert frame to grayscale idx2 = (self.last + 1) % self.n_frames self.last = idx2 silh = self.buf[idx2] cv.AbsDiff(self.buf[idx1], self.buf[idx2], silh) # get difference between frames cv.Threshold(silh, silh, 30, 1, cv.CV_THRESH_BINARY) # and threshold it cv.UpdateMotionHistory(silh, self.mhi, timestamp, self.mhi_duration) # update MHI cv.ConvertScale(self.mhi, self.mask, 255./self.mhi_duration, (self.mhi_duration - timestamp)*255./self.mhi_duration) cv.SetZero(img) cv.Merge(self.mask, None, None, None, img) cv.CalcMotionGradient(self.mhi, self.mask, self.orient, self.max_time_delta, self.min_time_delta, 3) seq = cv.SegmentMotion(self.mhi, self.segmask, self.storage, timestamp, self.max_time_delta) inc = 0 a_max = 0 max_rect = -1 # there are lots of things moving around # in this case just find find the biggest change on the image for (area, value, comp_rect) in seq: if comp_rect[2] + comp_rect[3] > 60: # reject small changes if area > a_max: a_max = area max_rect = inc inc += 1 # found it, now just do some processing on the area. if max_rect != -1: (area, value, comp_rect) = seq[max_rect] color = cv.CV_RGB(255, 0,0) silh_roi = cv.GetSubRect(silh, comp_rect) # calculate number of points within silhouette ROI count = cv.Norm(silh_roi, None, cv.CV_L1, None) # this rectangle contains the overall motion ROI cv.Rectangle(self.motion, (comp_rect[0], comp_rect[1]), (comp_rect[0] + comp_rect[2], comp_rect[1] + comp_rect[3]), (0,0,255), 1) # the goal is to report back a center of movement contained in a rectangle # adjust the height based on the number generated by the slider bar h = int(comp_rect[1] + (comp_rect[3] * (float(self.height_value) / 100))) # then calculate the center center = ((comp_rect[0] + comp_rect[2] / 2), h) return center
def processFrame(): # Declare as globals since we are assigning to them now global capture global camera_index global frame_grey global prev_frame global prev_frame_grey global motionMask global mhi global orientation global prevFrameIndex diff_threshold = 50 # Capture current frame frame = cv.QueryFrame(capture) # Create frame buffer and initial all image variables needed for the motion history algorithm if not mhi: for i in range(N): buffer[i] = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) cv.Zero(buffer[i]) mhi = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 1) cv.Zero(mhi) orientation = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 1) # Erode the frame 3 times to remove noise #cv.Erode(frame, frame, None, 3) # Convert frame to greyscale and store it in the buffer cv.CvtColor(frame, buffer[prevFrameIndex], cv.CV_RGB2GRAY) # Iterate the indexes of the image buffer index1 = prevFrameIndex index2 = (prevFrameIndex + 1) % N # Finds the next index in the buffer prevFrameIndex = index2 motionMask = buffer[index2] # Find the motion mask by finding the difference between the current and previous frames cv.AbsDiff(buffer[index1], buffer[index2], motionMask) # Convert the motion mask to a binary image cv.Threshold(motionMask, motionMask, diff_threshold, 1, cv.CV_THRESH_BINARY) # Produce motion history image timeStamp = time.clock() / CLOCKS_PER_SEC cv.UpdateMotionHistory(motionMask, mhi, timeStamp, MHI_DURATION) # Copy new motion mask onto the motion history image and scale it cv.ConvertScale(mhi, motionMask, 255. / MHI_DURATION, (MHI_DURATION - timeStamp) * 255. / MHI_DURATION) # Calculate the motion gradient and find the direction of motion tempMask = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) cv.Merge(motionMask, None, None, None, tempMask) cv.CalcMotionGradient(mhi, tempMask, orientation, 0.5, 0.05, 3) angle = 360 - cv.CalcGlobalOrientation(orientation, tempMask, mhi, timeStamp, MHI_DURATION) #print angle # Contour detection method # Draw motion box and angle line cv.Dilate(tempMask, tempMask, None, 1) storage = cv.CreateMemStorage(0) contour = cv.FindContours(tempMask, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) cv.DrawContours( motionMask, contour, cv.RGB(0, 0, 255), cv.RGB(0, 255, 0), 1) # Change first parameter to motionMask to get mhi and contour # Single outermost contour """ try: bound_rect1 = contour[0] bound_rect2 = contour[1] pt1 = (bound_rect1[0], bound_rect1[1]) pt2 = (bound_rect1[0] + bound_rect2[0], bound_rect1[1] + bound_rect2[1]) # draw cv.Rectangle(motionMask, pt1, pt2, cv.CV_RGB(0,0,255), 1) except: continue """ # Lots of contours """ while contour: bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) # draw cv.Rectangle(motionMask, pt1, pt2, cv.CV_RGB(0,0,255), 1) """ # Combine motion mask and contours output = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) #cv.Merge(motionMask, None, None, None, tempMask) # Display image cv.ShowImage("CS201 - Homework 3 - Tyler Boraski", motionMask) # If wrong camera index is initialized, press "n" to cycle through camera indexes. c = cv.WaitKey(10) if c == "n": camera_index += 1 # Try the next camera index capture = cv.CaptureFromCAM(camera_index) if not capture: # If the next camera index didn't work, reset to 0. camera_index = 0 capture = cv.CaptureFromCAM(camera_index) # If "esc" is pressed the program will end esc = cv.WaitKey(7) % 0x100 if esc == 27: quit()