def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})
        
        
            
        
        startbox = path.boxes[start]
        initialrect = [startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl]
        startbox = [startbox.xtl, startbox.ytl, startbox.xbr, startbox.ybr]
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape
        #print "Frame shape: "
        #print previmage.shape
        #print '=================='
        #if not self.started:
        self.tracker.start_track(frames[start], startbox)#self.visbox_to_dbox(startbox))
        self.started = True
        
        boxes = self.dlib_track(start, stop, frames, initialrect, imagesize)
        #meanshift(start, stop, initialrect, imagesize)
        # boxes need to be in vision.Box() form
        # width is computed from x's, so corresponds to the columns, y to the rows (see documentation in annotation.py
        #[dbox.left(), dbox.top(), dbox.right(), dbox.bottom()]
        return Path(path.label, path.id, boxes)
    def track(self, pathid, start, stop, basepath, paths):
        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})

        print "Tracking from {0} to {1}".format(start, stop)
        startbox = path.boxes[start]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        frames = getframes(basepath, False)
        boxes = templatematch(start, stop, initialrect, frames)
        boxes = filterlost(boxes, frames[0].shape)

        """
        for i in range(start, stop):
            image = frames[i]

            if i in boxes:
                box = boxes[i]
                cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            cv2.imshow('Template tracking', image)
            cv2.waitKey(100)
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)
    def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        stopbox = path.boxes[stop]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        finalrect = (stopbox.xtl, stopbox.ytl, stopbox.xbr-stopbox.xtl, stopbox.ybr-stopbox.ytl)

        frames = getframes(basepath, False)
        forwardboxes = templatematch(start, stop, initialrect, frames)
        backwardboxes = templatematch(stop, start, finalrect, frames)

        commonframes = list(set(forwardboxes.keys()) & set(backwardboxes.keys()))
        scores = [mergescore(forwardboxes[i], backwardboxes[i]) for i in commonframes]
        mergeframe = min(zip(commonframes, scores), key=lambda a: a[1])[0]

        boxes = {}
        for frame in range(start, stop):
            if frame <= mergeframe and frame in forwardboxes:
                boxes[frame] = forwardboxes[frame]
            elif frame > mergeframe and frame in backwardboxes:
                boxes[frame] = backwardboxes[frame]


        """
        for i in range(start, stop):
            image = frames[i]

            if i in forwardboxes:
                box = forwardboxes[i]
                cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 0,2)

            if i in backwardboxes:
                box = backwardboxes[i]
                cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            if i == mergeframe:
                cv2.putText(image, "Merge frame", (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)

            if i in frametorow:
                cv2.putText(image, str(meandiff[frametorow[i]]), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.putText(image, "Frame" + str(i), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.circle(image, tuple(forwardmean[frametorow[i],:]), 6, 0, 3)
                cv2.circle(image, tuple(backwardmean[frametorow[i],:]), 6, 255, 3)

            cv2.imshow('Optical flow tracking', image)
            cv2.waitKey()
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)
    def track(self, pathid, start, stop, basepath, paths):
        bgs = cv2.BackgroundSubtractorMOG()
        frames = getframes(basepath, False)
        for frame in range(start, stop):
            fgmask = bgs.apply(frames[frame])

            cv2.imshow("Frame", fgmask)
            cv2.waitKey(40)
        cv2.destroyAllWindows()
        return Path(path.label, path.id, {})
    def track(self, pathid, start, stop, basepath, paths):
        frames = getframes(basepath, True)
        frame = frames[start]

        # setup initial location of window
        box = paths[pathid].boxes[start]
        initialrect = (box.xtl, box.ytl, box.xbr-box.xtl, box.ybr-box.ytl)
        c,r,w,h = initialrect
        rect = initialrect
 
        # set up the ROI for tracking
        roi = frame[r:r+h, c:c+w]
        hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
        roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
        cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
 
        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

        for i in range(start, stop):
            frame = frames[i]
            if frame is None:
                break

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

            # apply meanshift to get the new location
            _, rect = cv2.meanShift(dst, rect, term_crit)
            outrect = {
                'rect':(boxes[i].x, boxes[i].y, boxes[i].width, boxes[i].height),
                'frame':i,
                'generated':(i!=0)
            }
            ret.append(outrect)



            # Draw it on image
            #x,y,w,h = rect
            #cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
            #cv2.imshow('img2',frame)
            #cv2.waitKey(60)


        cv2.destroyAllWindows()
Exemple #6
0
    def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape

        prevpoints = np.array([])

        points = getpoints(start, stop, frames, startbox)
        boxes = meanshift(start, stop, points, initialrect, imagesize)

        """
        for i in range(start, stop):
            image = frames[i]

            if i in points:
                #cv2.circle(image, tuple(forwardmean[i,:]), 6, 0, 3)
                for row in points[i]:
                    cv2.circle(image, tuple(row), 4, 0, 1)

            if i in boxes:
                box = boxes[i]
                cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            cv2.imshow('Optical flow tracking', image)
            cv2.waitKey(40)
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)
	def setUpClass(cls):
		cls.tests = []
		update = False
		cls.tests.append(getframes("", "syunn", update))
		cls.tests.append(getframes("2", "syunn", update))
		cls.tests.append(getframes("3", "velier", update))
Exemple #8
0
    def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        rect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl+1, startbox.ybr-startbox.ytl+1)
        # get colored frames
        frames = getframes(basepath, True)
        previmage = frames[start]
        imagesize = previmage.shape

        cv2.rectangle(previmage, (startbox.xtl,startbox.ytl), (startbox.xbr,startbox.ybr), 255,2)
        cv2.imshow('initialize', previmage)
        
        # set up the ROI for tracking
        roi = previmage[startbox.ytl:startbox.ybr+1, startbox.xtl:startbox.xbr+1]
        hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
        roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
        cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
 
        # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

        boxes={}
        for i in range(start+1, stop):
            nextimage=frames[i]
            if nextimage is None:
                break

            hsv = cv2.cvtColor(nextimage, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

            # apply meanshift to get the new location
#            ret, rect = cv2.meanShift(dst, rect, term_crit)
            ret, rect = cv2.CamShift(dst, rect, term_crit)

            # tracker failed
#            if not ret:
#                break
            
            x1,y1,w,h=rect
            x2=x1+w-1
            y2=y1+h-1
            boxes[i] = vision.Box(
                max(0, x1),
                max(0, y1),
                min(imagesize[1], x2),
                min(imagesize[0], y2),
                frame=i,
                generated=True
            )
            
        # for i in range(start, stop):
        #     image = frames[i]
        #     if i in boxes:
        #         box = boxes[i]
        #         cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)
        #     cv2.imshow('meanshift tracking', image)
        #     cv2.waitKey(40)
#        cv2.destroyAllWindows()

        return Path(path.label, path.id, boxes)
Exemple #9
0
    def track(self, pathid, start, stop, basepath, paths):
        path = paths[pathid]

        if start not in path.boxes or stop not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        stopbox = path.boxes[stop]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        finalrect = (stopbox.xtl, stopbox.ytl, stopbox.xbr-stopbox.xtl, stopbox.ybr-stopbox.ytl)
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape

        forwardpoints = getpoints(start, stop, frames, startbox)
        backwardpoints = getpoints(stop, start, frames, stopbox)

        rowtoframe = sorted(list(set(forwardpoints.keys()) & set(backwardpoints.keys())))
        forwardmean = np.array([np.mean(forwardpoints[frame], axis=0) for frame in rowtoframe])
        backwardmean = np.array([np.mean(backwardpoints[frame], axis=0) for frame in rowtoframe])
        meandiff = np.sum(np.square(forwardmean - backwardmean), axis=1)
        mergeframe = rowtoframe[np.argmin(meandiff)]

        print "Start frame", start
        print "end frame", stop
        print "Merge frame", mergeframe
 
        startboxes = meanshift(start, mergeframe - 5, forwardpoints, initialrect, imagesize)
        stopboxes = meanshift(stop, mergeframe + 5, backwardpoints, finalrect, imagesize)
        boxes = {}
        for row, frame in enumerate(rowtoframe):
            if frame <= mergeframe and frame in startboxes:
                boxes[frame] = startboxes[frame]
            elif frame > mergeframe and frame in stopboxes:
                boxes[frame] = stopboxes[frame]

        """
        frametorow = {frame: row for row, frame in enumerate(rowtoframe)}
        for i in range(start, stop):
            image = frames[i]

            #if i in forwardpoints:
                #for row in forwardpoints[i]:
                #    cv2.circle(image, tuple(row), 4, 0, 1)

            #if i in backwardpoints:
                #cv2.circle(image, tuple(backwardmean[frametorow[i],:]), 6, 255, 3)
                #for row in backwardpoints[i]:
                #    cv2.circle(image, tuple(row), 4, 255, 1)

            if i in frametorow:
                cv2.putText(image, str(meandiff[frametorow[i]]), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.putText(image, "Frame" + str(i), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.circle(image, tuple(forwardmean[frametorow[i],:]), 6, 0, 3)
                cv2.circle(image, tuple(backwardmean[frametorow[i],:]), 6, 255, 3)

            # Draw it on image
            if i in boxes:
                box = boxes[i]
                if i < mergeframe:
                    cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 0,2)
                else:
                    cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            cv2.imshow('Optical flow tracking', image)
            cv2.waitKey()
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)