class MainProcess(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.lock = threading.Lock()

        # Start detectors
        self.personDetector = Detector(
            'data/person-detection-retail-0013/FP32/person-detection-retail-0013.xml',
            'bin/libcpu_extension.so')
        self.faceDetector = Detector(
            'data/face-detection-retail-0004/face-detection-retail-0004.xml',
            'bin/libcpu_extension.so')

        # Start painter
        self.painter = Painter()

        # Data
        self.personTrackBbIDs = []
        self.faceTrackBbIDs = []
        self.instantFPS = 0
        self.avgFPS = 0
        self.frame = None
        self.startTime = None
        self.avgFilterStartTime = None
        self.detectedPersonHist = {}
        self.detectedFaceHist = {}
        self.detectedPersonHisFiltered = []
        self.sampleTime = 1  # seconds
        self.framesFactor = 0.3
        self.maxIdleTime = 10
        self.filteredDetection = []
        self.postFrequency = 1
        self.videoSize = []

    # return unormalized person bboxes
    def getPersonBboxes(self):
        with self.lock:
            return self.personTrackBbIDs.tolist()

    def getFrame(self):
        img_str = None
        if self.frame is not None:
            _, img_str = cv2.imencode('.png', self.frame)
            img_str = img_str.tobytes()
        return img_str

    def getPersonHis(self):
        with self.lock:
            return self.detectedPersonHist

    def getLiveSummary(self):
        with self.lock:
            return self.detectedPersonHisFiltered

    def filterNumBoxes(self):
        pass

    def getTimeDiffinSec(self, start, end):
        timeDiff = end - start
        return timeDiff.total_seconds()

    def processTracker(self, trackerDetections, trackerHist):
        # Create / update tracked bbox
        for trackedBbox in trackerDetections:
            trackID = str(int(trackedBbox[4]))
            if trackID in trackerHist:
                trackerHist[trackID].increaseHit()
                trackerHist[trackID].updateAliveTime()
                trackerHist[trackID].refreshLastUpdate()
                trackerHist[trackID].setTrackerCoords(trackedBbox[:4])
                trackerHist[trackID].addTrackerShadowPt()
            else:
                trackerHist[trackID] = TrackerBbox(trackID, (0))

            # Draw tracker
            self.frame = self.painter.DrawBox(self.frame,
                                              trackedBbox.tolist(),
                                              self.videoSize,
                                              trackerHist[trackID].getColor(),
                                              thickness=2)
            self.frame = self.painter.DrawTrackerShadow(
                self.frame, trackerHist[trackID].getTrackerShadow(),
                self.videoSize, trackerHist[trackID].getColor())

    def cleanUpBuffers(self, trackerHist):
        # Clean-up old bboxes from buffer
        if len(trackerHist) > 1000:
            for bbox in list(trackerHist):
                if trackerHist[bbox].getIdleTime() > self.maxIdleTime:
                    del trackerHist[bbox]

        # Clean-up history buffer
        if len(self.detectedPersonHisFiltered) > 5:
            self.detectedPersonHisFiltered.pop(0)

    def run(self):
        # Initiliaze timer
        self.startTime = datetime.datetime.now()
        self.avgFilterStartTime = datetime.datetime.now()
        self.startPostTime = datetime.datetime.now()

        # Initialize Trackers
        personTracker = Sort()
        faceTracker = Sort()

        # Open Webcam
        video_capturer = cv2.VideoCapture("testVideos/street360p.mp4")
        # video_capturer = cv2.VideoCapture(0)
        # video_capturer.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
        # video_capturer.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        fps = 0

        self.videoSize = (video_capturer.get(3), video_capturer.get(4))
        img_str = None
        fpsList = []

        while video_capturer.isOpened():
            with self.lock:
                # Start time
                start = time.time()
                timeNow = datetime.datetime.now()

                # Read frame
                ret, self.frame = video_capturer.read()

                # Person detector
                personBBoxes = self.personDetector.Detect(self.frame,
                                                          maxThresh=0.7)

                # Face detector
                faceBboxes = self.faceDetector.Detect(self.frame,
                                                      maxThresh=0.7)

                # Update Trackers
                self.personTrackBbIDs = personTracker.update(
                    np.array(personBBoxes))
                self.faceTrackBbIDs = faceTracker.update(np.array(faceBboxes))

                # Draw fps
                self.frame = self.painter.DrawFPS(self.frame, self.avgFPS)

                # Draw numBoxes
                self.frame = self.painter.DrawTotalBoxes(
                    self.frame, len(self.personTrackBbIDs))

                # Draw person detections
                for personBbox in personBBoxes:
                    self.frame = self.painter.DrawBox(self.frame,
                                                      personBbox,
                                                      self.videoSize,
                                                      color=([255, 255, 255]),
                                                      thickness=5)

                # Draw face detections
                for faceBbox in faceBboxes:
                    self.frame = self.painter.DrawBox(self.frame,
                                                      faceBbox,
                                                      self.videoSize,
                                                      color=([255, 0, 0]),
                                                      thickness=2)

                    self.frame = self.painter.ApplyGaussian(
                        self.frame, faceBbox[:4], self.videoSize)

                # Measure performance
                seconds = time.time() - start
                fps = 1 / seconds
                fpsList.append(fps)

                if len(fpsList) > 20:
                    self.avgFPS = np.array(fpsList).mean()
                    # print("Average FPS: %s" % str(avgFps))
                    fpsList.pop(0)

                # Process person tracker
                self.processTracker(self.personTrackBbIDs,
                                    self.detectedPersonHist)

                # Process face tracker
                self.processTracker(self.faceTrackBbIDs, self.detectedFaceHist)

                # Save detection hist to be consumed later
                if self.getTimeDiffinSec(self.startTime,
                                         timeNow) > self.sampleTime:
                    # filter boxes that had present at least in x% of the frames during y seconds
                    # seconds * average FPS * %factor
                    # 15*20*0.4 = 120 hits to be considered a valid box
                    self.filteredDetection = [
                        self.detectedPersonHist[bboxID]
                        for bboxID in self.detectedPersonHist
                        if self.detectedPersonHist[bboxID].hits >
                        self.sampleTime * self.avgFPS * self.framesFactor
                    ]

                    # Reset person boxes hits
                    [
                        self.detectedPersonHist[bbox].resetHits()
                        for bbox in self.detectedPersonHist
                    ]

                    # Reset boxes hits
                    [
                        self.detectedFaceHist[bbox].resetHits()
                        for bbox in self.detectedFaceHist
                    ]

                    # Reset timer
                    self.startTime = datetime.datetime.now()

                if self.getTimeDiffinSec(self.startPostTime,
                                         timeNow) > self.postFrequency:
                    # Print debug
                    # [print(trackerBox) for trackerBox in self.detectedPersonHisFiltered]
                    # Append data
                    self.detectedPersonHisFiltered.append({
                        "datetime":
                        str(timeNow),
                        "numPerson":
                        len(self.filteredDetection),
                        "avgStayTime":
                        round(
                            np.array([
                                val.getAliveTime()
                                for val in self.filteredDetection
                            ]).mean() /
                            60, 2) if len(self.filteredDetection) > 0 else 0
                    })
                    # print("---------------")
                    self.startPostTime = datetime.datetime.now()

                # Clean-up buffers
                self.cleanUpBuffers(self.detectedPersonHist)
                self.cleanUpBuffers(self.detectedFaceHist)

                cv2.imshow('frame', self.frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
示例#2
0
def main():
    fig = plt.figure()
    args = config_parse()
    imgs = []
    graphs = []
    if not (args.traj or args.plot):
        print("Please choose plot method")
        return
    if args.traj:
        ax = plt.axes(projection='3d')
    else:
        fig, axs = plt.subplots()
    dataset = ['dic', 'fluo', 'phc']
    if args.type in dataset:
        if args.type == 'dic':
            #this associate range when trace backward, used to detect mitosis
            # the new trace needs to link old trace in previous frame
            minD = 30
            maxD = 70
            #the dissassociateRange of kalman track
            dissassociateRange = 30
        if args.type == 'fluo':
            raise NotImplementedError
        if args.type == 'phc':
            raise NotImplementedError
    else:
        print(f"Please choose from {dataset}")
        return

    img = plt.imread("mask_DIC/mask000.tif")

    size = img.shape
    width = size[0]
    height = size[1]
    #we cant delete
    tracker = Tracker(dissassociateRange, 100, 0)
    detector = Detector()

    l = []
    for root, dirs, files in os.walk("mask_DIC"):
        for file in files:
            l.append(file)

    l = sorted(l)

    #original file
    ori = []
    for root, dirs, files in os.walk("Sequence 1"):
        for file in files:
            ori.append(file)
    ori = sorted(ori)

    first = -1
    second = 0
    #center list
    Clist = []
    plt_trace = []

    for name in l:

        imgN = os.path.join("mask_DIC/" + name)
        print(imgN)
        img = plt.imread(imgN)
        graph = Graph(img, height, width)

        centers = detector.Detect(graph)
        Clist.append(centers)
        tracker.Update(centers, second)

        if args.traj:
            #TODO repair traj hasn't been done
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    # for j in range(len(tracker.tracks[i].trace)-1):

                    x1 = tracker.tracks[i].trace[-2][0]
                    y1 = tracker.tracks[i].trace[-2][1]

                    x2 = tracker.tracks[i].trace[-1][0]
                    y2 = tracker.tracks[i].trace[-1][1]

                    if x1 == x2 and y1 == y2:
                        continue

                    ax.plot([x1, x2], [y1, y2], [first, second])
                    plt.draw()
        if args.plot:
            oriN = os.path.join("Sequence 1/" + ori[second])
            originImg = plt.imread(oriN)
            imgs.append(originImg)
            plt_trace.append(copy.deepcopy(tracker.tracks))
            graphs.append(copy.deepcopy(graph))

        first += 1
        second += 1

        if second == args.num:
            break

    plt_trace = MitosisRecovery(tracker, plt_trace, minD, maxD)

    if args.plot:
        ani = FuncAnimation(fig,
                            update,
                            fargs=(imgs, axs, Clist, plt_trace, graphs),
                            interval=args.interval,
                            frames=second)
    plt.show()
    if args.save:
        ani.save('myAnimation.gif', writer='imagemagick', fps=15)

    done = args.search
    while done:
        choice = input("> ")
        qlist = ['speed', 'total', 'net']
        if choice in qlist:
            id = int(input("ID: "))
            frame = int(input("which frame u are: "))
            if frame < 1:
                print("the frame has to be great than 0")
                continue

            t = tracker.findTrack(id)
            if t == None:
                print("Dont have this cell")
                continue
            if len(t.frame) == 1:
                print("Sorry, this cell only appear once")

            absolute_val_array = np.abs(np.array(t.frame) - frame)
            smallest_difference_inx = absolute_val_array.argmin()
            closet_frame = t.frame[smallest_difference_inx]
            if closet_frame != frame:
                print(
                    f"Sorry we can't find {id} in frame {frame}, the closet frame is {closet_frame}"
                )
            if choice == "speed":
                pre_frame = t.frame[closet_frame - 1]
                pre_loc = np.array(t.trace[smallest_difference_inx - 1])
                cur_loc = np.array(t.trace[smallest_difference_inx])
                dist = np.linalg.norm(cur_loc - pre_loc)
                speed = dist / (frame - pre_frame)
                print(
                    f"The {id} at frame {frame} has a speed {speed} pixel/frame"
                )
            if choice == "total":
                t.printTrace(smallest_difference_inx)
                dist = t.totalDistance(smallest_difference_inx)
                print(f"It has travelled {dist} in total")
            if choice == "net":
                loc = np.array(t.trace[smallest_difference_inx])
                start = np.array(t.trace[0])

                dist = np.linalg.norm(loc - start)
                print(f"The net distance is {dist}")

        if choice == "q":
            done = False
        corners[1][1] = max(corners[1][1], corners1[1][1])
        corners[2][0] = min(corners[2][0], corners1[2][0])
        corners[2][1] = min(corners[2][1], corners1[2][1])
        corners[3][0] = max(corners[3][0], corners1[3][0])
        corners[3][1] = min(corners[3][1], corners1[3][1])

        print(corners1)
        continue

    corners = np.float32(corners)

    # Perspective transform on frame to get table only with right measurements:
    frame = getTableFromFrame(corners, frame)

    # Detect box centers and angles:
    (centers, angles) = detector.Detect(frame)

    # Track box centers:
    if (len(centers) > 0):
        tracker.Update(centers)

    for i in range(len(tracker.tracks)):
        if (len(tracker.tracks[i].trace) > 1):
            for j in range(len(tracker.tracks[i].trace) - 1):
                # Draw trace line
                x1 = tracker.tracks[i].trace[j][0][0]
                y1 = tracker.tracks[i].trace[j][1][0]
                x2 = tracker.tracks[i].trace[j + 1][0][0]
                y2 = tracker.tracks[i].trace[j + 1][1][0]
                clr = tracker.tracks[i].track_id % 9
                cv.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),