Exemple #1
0
    def segment(self,img):
        self.bbox = self.com = None

        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        self.motion = self.moseg.segment(gray)
        self.skin = self.coseg.segment(img)

        if self.moseg.bbox is None:
            self.backprojection = np.zeros_like(self.motion,dtype=float)
            return self.motion

        x,y,w,h = self.moseg.bbox
        self.backprojection = np.zeros_like(self.coseg.backprojection)
        self.backprojection[y:y+h,x:x+w] = self.coseg.backprojection[y:y+h,x:x+w] \
                                           / self.coseg.backprojection[y:y+h,x:x+w].max()

        self.backprojection[y:y+h,x:x+w] *= self.alpha
        self.backprojection[y:y+h,x:x+w] += (1-self.alpha)*self.motion[y:y+h,x:x+w]

        fused = np.zeros_like(gray,dtype=bool)
        fused[y:y+h,x:x+w] = self.skin[y:y+h,x:x+w] | self.motion[y:y+h,x:x+w]
        if fused.any():
            self.bbox,self.com = findBBoxCoM(fused)

        return fused
Exemple #2
0
    def Track(self, img):
        mask = self.segmenter.segment(img)

        if self.segmenter.moseg.bbox is not None:
            x, y, w, h = self.segmenter.moseg.bbox
            mask.fill(0)
            mask[y:y + h, x:x + w] = True

            x, y, w, h = self.tracker.track(
                self.segmenter.coseg.converted_image, mask)

            # it's possible that there is still motion but that tracking failed
            # so make sure backprojection is not all zeros
            if self.tracker.backprojection.any():
                bbox, (xc, yc) = findBBoxCoM(self.tracker.backprojection)
                self.waypts.append((int(xc), int(yc)))
                return  # success! keep tracking...

        # if we got to this point then tracking has failed
        if len(self.waypts) > MINWAYPTS and self.callback is not None:
            # Find best gesture match
            x, y = zip(*[(self.imshape[1] - x, self.imshape[0] - y)
                         for x, y in self.waypts])
            matches = dollar.query(x, y, scale, samplesize, self.template_ds)
            score, theta, clsid = matches[0]

            if score > self.match_threshold:
                ds = self.template_ds[clsid][0]
                self.callback((x, y), (ds['x'], ds['y']), score, theta, clsid)

        self.waypts = []
        return self.Wait
Exemple #3
0
    def segment(self, img):
        self.bbox = self.com = None

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        self.motion = self.moseg.segment(gray)
        self.skin = self.coseg.segment(img)

        if self.moseg.bbox is None:
            self.backprojection = np.zeros_like(self.motion, dtype=float)
            return self.motion

        x, y, w, h = self.moseg.bbox
        self.backprojection = np.zeros_like(self.coseg.backprojection)
        self.backprojection[y:y+h,x:x+w] = self.coseg.backprojection[y:y+h,x:x+w] \
                                           / self.coseg.backprojection[y:y+h,x:x+w].max()

        self.backprojection[y:y + h, x:x + w] *= self.alpha
        self.backprojection[y:y + h,
                            x:x + w] += (1 - self.alpha) * self.motion[y:y + h,
                                                                       x:x + w]

        fused = np.zeros_like(gray, dtype=bool)
        fused[y:y + h, x:x +
              w] = self.skin[y:y + h, x:x + w] | self.motion[y:y + h, x:x + w]
        if fused.any():
            self.bbox, self.com = findBBoxCoM(fused)

        return fused
Exemple #4
0
    def Track(self,img):
        mask = self.segmenter.segment(img)

        if self.segmenter.moseg.bbox is not None:
            x,y,w,h = self.segmenter.moseg.bbox
            mask.fill(0)
            mask[y:y+h,x:x+w] = True

            x,y,w,h = self.tracker.track(self.segmenter.coseg.converted_image,mask)

            # it's possible that there is still motion but that tracking failed
            # so make sure backprojection is not all zeros
            if self.tracker.backprojection.any():
                bbox,(xc,yc) = findBBoxCoM(self.tracker.backprojection)
                self.waypts.append((int(xc),int(yc)))
                return # success! keep tracking...

        # if we got to this point then tracking has failed
        if len(self.waypts) > MINWAYPTS and self.callback is not None:
            # Find best gesture match
            x,y = zip(*[(self.imshape[1]-x,self.imshape[0]-y) for x,y in self.waypts])
            matches = dollar.query(x,y,scale,samplesize,self.template_ds)
            score,theta,clsid = matches[0]

            if score > self.match_threshold:
                ds = self.template_ds[clsid][0]
                self.callback((x,y),(ds['x'],ds['y']),score,theta,clsid)

        self.waypts = []
        return self.Wait
Exemple #5
0
    def segment(self, img):
        '''
        Parameters
        ----------
        img : array_like
            Grayscale image
        '''
        self.bbox = self.com = None

        prv, cur, nxt = self._buff + (img, )

        T = self.T
        bkgnd = self.background
        moving = (cv2.absdiff(prv, nxt) > T) & (cv2.absdiff(cur, nxt) > T)

        # if any motion was found, attempt to fill in the objects detected
        # TODO: needs to work with multiple independent objects
        if moving.any():
            self.bbox, self.com = findBBoxCoM(moving)
            x, y, w, h = self.bbox
            motionfill = cv2.absdiff(nxt[y:y + h, x:x + w],
                                     bkgnd[y:y + h, x:x + w]) > self.T0
            moving[y:y + h, x:x + w] |= motionfill

        # TODO replace boolean indexing with boolean multiply where possible
        # Updating threshold depends on current background model
        # so always update this before updating background
        notmoving = ~moving
        T[notmoving] = self.alpha*T[notmoving] \
                          + (1-self.alpha)*5*cv2.absdiff(nxt,bkgnd)[notmoving]
        # T[moving] = T[moving]
        T[T < self.T0] = self.T0

        bkgnd[notmoving] = self.alpha * bkgnd[notmoving] + (
            1 - self.alpha) * nxt[notmoving]
        bkgnd[moving] = nxt[moving]

        self._buff = (cur, nxt)

        return moving
    def segment(self,img):
        '''
        Parameters
        ----------
        img : array_like
            Grayscale image
        '''
        self.bbox = self.com = None

        prv,cur,nxt = self._buff + (img,)

        T = self.T
        bkgnd = self.background
        moving = (cv2.absdiff(prv,nxt) > T) & (cv2.absdiff(cur,nxt) > T)

        # if any motion was found, attempt to fill in the objects detected
        # TODO: needs to work with multiple independent objects
        if moving.any():
            self.bbox, self.com = findBBoxCoM(moving)
            x,y,w,h = self.bbox
            motionfill = cv2.absdiff(nxt[y:y+h,x:x+w],bkgnd[y:y+h,x:x+w]) > self.T0
            moving[y:y+h,x:x+w] |= motionfill

        # TODO replace boolean indexing with boolean multiply where possible
        # Updating threshold depends on current background model
        # so always update this before updating background
        notmoving = ~moving
        T[notmoving] = self.alpha*T[notmoving] \
                          + (1-self.alpha)*5*cv2.absdiff(nxt,bkgnd)[notmoving]
        # T[moving] = T[moving]
        T[T<self.T0] = self.T0

        bkgnd[notmoving] = self.alpha*bkgnd[notmoving] + (1-self.alpha)*nxt[notmoving]
        bkgnd[moving] = nxt[moving]

        self._buff = (cur,nxt)
        
        return moving