def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})
        
        
            
        
        startbox = path.boxes[start]
        initialrect = [startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl]
        startbox = [startbox.xtl, startbox.ytl, startbox.xbr, startbox.ybr]
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape
        #print "Frame shape: "
        #print previmage.shape
        #print '=================='
        #if not self.started:
        self.tracker.start_track(frames[start], startbox)#self.visbox_to_dbox(startbox))
        self.started = True
        
        boxes = self.dlib_track(start, stop, frames, initialrect, imagesize)
        #meanshift(start, stop, initialrect, imagesize)
        # boxes need to be in vision.Box() form
        # width is computed from x's, so corresponds to the columns, y to the rows (see documentation in annotation.py
        #[dbox.left(), dbox.top(), dbox.right(), dbox.bottom()]
        return Path(path.label, path.id, boxes)
Exemplo n.º 2
0
def bidirectional(tracker, start, stop, basepath, pathid, paths):
    if pathid not in paths:
        return Path(None, None, {})

    if start not in paths[pathid].boxes or stop not in paths[pathid].boxes:
        return Path(path.label, path.id, {})

    if tracker in bidirectionaltrackers:
        tracker = bidirectionaltrackers[tracker]()
        return tracker.track(pathid, start, stop, basepath, paths)
    return None
Exemplo n.º 3
0
def online(tracker, start, stop, basepath, pathid, paths):
    if pathid not in paths:
        return Path(None, None, {})

    if start not in paths[pathid].boxes:
        return Path(path.label, path.id, {})

    if tracker in onlinetrackers:
        tracker = onlinetrackers[tracker]()
        return tracker.track(pathid, start, stop, basepath, paths)
    return None
Exemplo n.º 4
0
    def track(self, pathid, start, stop, basepath, paths):
        bgs = cv2.BackgroundSubtractorMOG()
        frames = getframes(basepath, False)
        for frame in range(start, stop):
            fgmask = bgs.apply(frames[frame])

            cv2.imshow("Frame", fgmask)
            cv2.waitKey(40)
        cv2.destroyAllWindows()
        return Path(path.label, path.id, {})
Exemplo n.º 5
0
    def track(self, pathid, start, stop, basepath, paths):
        if pathid not in paths:
            return Path(None, None, {})

        path = paths[pathid]

        if start not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape

        prevpoints = np.array([])

        points = getpoints(start, stop, frames, startbox)
        boxes = meanshift(start, stop, points, initialrect, imagesize)

        """
        for i in range(start, stop):
            image = frames[i]

            if i in points:
                #cv2.circle(image, tuple(forwardmean[i,:]), 6, 0, 3)
                for row in points[i]:
                    cv2.circle(image, tuple(row), 4, 0, 1)

            if i in boxes:
                box = boxes[i]
                cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            cv2.imshow('Optical flow tracking', image)
            cv2.waitKey(40)
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)
Exemplo n.º 6
0
    def track(self, pathid, start, stop, basepath, paths):
        path = paths[pathid]

        if start not in path.boxes or stop not in path.boxes:
            return Path(path.label, path.id, {})

        startbox = path.boxes[start]
        stopbox = path.boxes[stop]
        initialrect = (startbox.xtl, startbox.ytl, startbox.xbr-startbox.xtl, startbox.ybr-startbox.ytl)
        finalrect = (stopbox.xtl, stopbox.ytl, stopbox.xbr-stopbox.xtl, stopbox.ybr-stopbox.ytl)
        frames = getframes(basepath, False)
        previmage = frames[start]
        imagesize = previmage.shape

        forwardpoints = getpoints(start, stop, frames, startbox)
        backwardpoints = getpoints(stop, start, frames, stopbox)

        rowtoframe = sorted(list(set(forwardpoints.keys()) & set(backwardpoints.keys())))
        forwardmean = np.array([np.mean(forwardpoints[frame], axis=0) for frame in rowtoframe])
        backwardmean = np.array([np.mean(backwardpoints[frame], axis=0) for frame in rowtoframe])
        meandiff = np.sum(np.square(forwardmean - backwardmean), axis=1)
        mergeframe = rowtoframe[np.argmin(meandiff)]

        print "Start frame", start
        print "end frame", stop
        print "Merge frame", mergeframe
 
        startboxes = meanshift(start, mergeframe - 5, forwardpoints, initialrect, imagesize)
        stopboxes = meanshift(stop, mergeframe + 5, backwardpoints, finalrect, imagesize)
        boxes = {}
        for row, frame in enumerate(rowtoframe):
            if frame <= mergeframe and frame in startboxes:
                boxes[frame] = startboxes[frame]
            elif frame > mergeframe and frame in stopboxes:
                boxes[frame] = stopboxes[frame]

        """
        frametorow = {frame: row for row, frame in enumerate(rowtoframe)}
        for i in range(start, stop):
            image = frames[i]

            #if i in forwardpoints:
                #for row in forwardpoints[i]:
                #    cv2.circle(image, tuple(row), 4, 0, 1)

            #if i in backwardpoints:
                #cv2.circle(image, tuple(backwardmean[frametorow[i],:]), 6, 255, 3)
                #for row in backwardpoints[i]:
                #    cv2.circle(image, tuple(row), 4, 255, 1)

            if i in frametorow:
                cv2.putText(image, str(meandiff[frametorow[i]]), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.putText(image, "Frame" + str(i), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, 0, 2)
                cv2.circle(image, tuple(forwardmean[frametorow[i],:]), 6, 0, 3)
                cv2.circle(image, tuple(backwardmean[frametorow[i],:]), 6, 255, 3)

            # Draw it on image
            if i in boxes:
                box = boxes[i]
                if i < mergeframe:
                    cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 0,2)
                else:
                    cv2.rectangle(image, (box.xtl,box.ytl), (box.xbr,box.ybr), 255,2)

            cv2.imshow('Optical flow tracking', image)
            cv2.waitKey()
        """

        cv2.destroyAllWindows()
        return Path(path.label, path.id, boxes)