示例#1
0
    def segment_motion(self):
        min_area = 100
        size = cv.GetSize(self.mask)  # get current frame size
        temp = cv.CloneImage(self.mask)
        cv.CalcMotionGradient(self.mhi, temp, self.orient, self.MAX_TIME_DELTA,
                              self.MIN_TIME_DELTA, 3)
        #cv.ShowImage("orient", self.orient);
        if not self.storage:
            self.storage = cv.CreateMemStorage(0)
        seq = cv.SegmentMotion(self.mhi, self.segmask, self.storage,
                               self.timestamp, self.MAX_TIME_DELTA)
        #cv.ShowImage("segmask", self.segmask)

        max = 0
        max_idx = -1
        for i in range(len(seq)):
            (area, value, comp_rect) = seq[i]
            if area > max:
                max = area
                max_idx = i
        if max_idx == -1:
            cv.Zero(self.mask)
            return

        (area, value, comp_rect) = seq[max_idx]
        if (area < 100):
            cv.Zero(self.mask)
            return

        cv.Zero(self.mask)
        cv.Rectangle(
            self.mask, (comp_rect[0], comp_rect[1]),
            (comp_rect[0] + comp_rect[2], comp_rect[1] + comp_rect[3]),
            (255, 255, 255), cv.CV_FILLED)
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)
    
    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
示例#3
0
  def processMotion(self):
    """
    Take a raw input image frame from the camera and perform motion detection using
    the current frame plus considering several previous frames and return the CV
    image that should be given to the Region network.
    """
    #find motion image, then find feature corners from that
    if self._prevIplImage:
      cv.AbsDiff(self._inputImage, self._prevIplImage, self._diffImage)
    else:
      cv.Copy(self._inputImage, self._diffImage)
      
    cv.Copy(self._inputImage, self._prevIplImage) #save as t-1 image for next frame
    
    #(src, dest, threshold, maxVal, type)
    cv.Threshold(self._diffImage, self._threshImage, 16.0, 255.0, cv.CV_THRESH_BINARY)
    
    #For now, disable segmentMotion and return all motion in frame...
    if self._threshImage!=None:
      return (0,0, self._threshImage.width, self._threshImage.height)
    
    ###Experimental: segment motion to return only the most 'interesting' area
    tsec = clock()
    #(silhouette, mhi, timestamp, duration)
    cv.UpdateMotionHistory(self._threshImage, self._historyImage, tsec, 0.3)

    #(mhi, segMask, storage, timestamp, segThresh)
    #return: [tuple(area, value, rect)], (float, CvScalar, CvRect)
    seqs = cv.SegmentMotion(self._historyImage, self._segMaskImage, \
                            self._memStorage, tsec, 1.0)
    
    #cv.Copy(self._threshImage, self._inputImage)
    #cv.Threshold(self._segMaskImage, self._threshImage, 0.0, 250.0, CV_THRESH_BINARY)
    
    rects = []
    for seq in seqs:
      seqRect = seq[2] #CvRect = tuple (x, y, width, height)
      if(seqRect[2] > 4 and seqRect[3] > 4):
        rects.append(seqRect)
    
    #find the 3rd largest area and only keep those rects
    if len(rects) > 0:
      areas = [x[2]*x[3] for x in rects]
      areas.sort()
      minArea = areas[0]
      if len(areas) > 1:
        minArea = areas[len(areas)-1]
      
      rectsOk = [x for x in rects if x[2]*x[3] >= minArea]
      rect = rectsOk[0]
      
      #center the largest rect
      cRect = (rect[0]+(rect[2]/2), rect[1]+(rect[3]/2))
      return rect
    
    return None #none means no motion bounding box detected
示例#4
0
    def process_motion(self,img):
        center = (-1, -1)
        # a lot of stuff from this section was taken from the code motempl.py, 
        #  openCV's python sample code
        timestamp = time.clock() / self.clocks_per_sec # get current time in seconds
        idx1 = self.last
        cv.CvtColor(img, self.buf[self.last], cv.CV_BGR2GRAY) # convert frame to grayscale
        idx2 = (self.last + 1) % self.n_frames 
        self.last = idx2
        silh = self.buf[idx2]
        cv.AbsDiff(self.buf[idx1], self.buf[idx2], silh) # get difference between frames
        cv.Threshold(silh, silh, 30, 1, cv.CV_THRESH_BINARY) # and threshold it
        cv.UpdateMotionHistory(silh, self.mhi, timestamp, self.mhi_duration) # update MHI
        cv.ConvertScale(self.mhi, self.mask, 255./self.mhi_duration, 
                        (self.mhi_duration - timestamp)*255./self.mhi_duration)
        cv.SetZero(img)
        cv.Merge(self.mask, None, None, None, img)
        cv.CalcMotionGradient(self.mhi, self.mask, self.orient, self.max_time_delta, self.min_time_delta, 3)
        seq = cv.SegmentMotion(self.mhi, self.segmask, self.storage, timestamp, self.max_time_delta)
        inc = 0
        a_max = 0
        max_rect = -1
    
        # there are lots of things moving around
        #  in this case just find find the biggest change on the image
        for (area, value, comp_rect) in seq:
            if comp_rect[2] + comp_rect[3] > 60: # reject small changes
                if area > a_max: 
                    a_max = area
                    max_rect = inc
            inc += 1

        # found it, now just do some processing on the area.
        if max_rect != -1:
            (area, value, comp_rect) = seq[max_rect]
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            # calculate number of points within silhouette ROI
            count = cv.Norm(silh_roi, None, cv.CV_L1, None)

            # this rectangle contains the overall motion ROI
            cv.Rectangle(self.motion, (comp_rect[0], comp_rect[1]), 
                         (comp_rect[0] + comp_rect[2], 
                          comp_rect[1] + comp_rect[3]), (0,0,255), 1)

            # the goal is to report back a center of movement contained in a rectangle
            # adjust the height based on the number generated by the slider bar
            h = int(comp_rect[1] + (comp_rect[3] * (float(self.height_value) / 100)))
            # then calculate the center
            center = ((comp_rect[0] + comp_rect[2] / 2), h)

        return center