Beispiel #1
0
def demo_video(video_file):
    import time
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    cap = common.VideoStream(video_file, queueSize=4).start()
    time.sleep(1)
    total_t, counter = 0, 0
    t = common.clock()

    while not cap.stopped:
        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            ids = range(len(detections))

            # temp = mtracker.update(imgcv, to_cvbox(detections))
            # cvboxes, ids = [], []
            # for tid,tracker in mtracker.trackers.items():
            #     if tracker.visible_count > 3 and tracker.consecutive_invisible_count<10:
            #         cvboxes.append(tracker.bbox)
            #         ids.append(tid)
            # detections = to_bbox(cvboxes)

            print(detections)
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print(counter / total_t)
Beispiel #2
0
def demo_video(video_file):
    import cv2
    # with tf.device(device_id):
    cap = common.VideoStream(video_file).start()
    dmg = Demography()

    def draw_faces(img, detections):
        """ Draws bounding boxes of objects detected on given image """
        h, w = img.shape[:2]
        for face in detections:
            # draw rectangle
            x1, y1, x2, y2 = face['box']['topleft']['x'], face['box']['topleft']['y'], face['box']['bottomright']['x'], face['box']['bottomright']['y']
            cv2.rectangle(img, (x1, y1), (x2, y2), (0,255,0), 2)

            # draw class text
            text = "%s %s"%(face['classes'][0]['meta']['gender'], face['classes'][0]['meta']['age'])
            return common.draw_label(img, text, (x1, y1))

    while cap.more():
        img = cap.read()
        if img is not None:
            faces = dmg.run(img)
            img = draw_faces(img, faces)

            common.showImage(img)
            key = cv2.waitKey(1) & 0xff
            if key == 27:
                break
        else:
            print('Cannot read frame')
            break
Beispiel #3
0
def process_frame(frame):
    detections = facedemo.detect(frame, upsamples=0)
    # frame = draw_faces(frame, detections)
    return frame, detections


if __name__ == '__main__':
    # threadn = cv2.getNumberOfCPUs()
    # print threadn
    # from multiprocessing import Pool
    # pool = Pool(processes = threadn)
    # pending = deque()
    multi = common.Multicore(process_frame)

    total_t, counter = 0, 0
    cap = common.VideoStream(url, queueSize=4).start()
    t = common.clock()

    while not cap.stopped:
        # while len(pending) > 0 and pending[0].ready():
        #     frame = pending.popleft().get()
        #     common.showImage(frame)

        # if len(pending) < threadn:
        #     imgcv = cap.read()
        #     if imgcv is not None:
        #         counter += 1
        #         task = pool.apply_async(process_frame, (imgcv.copy(),))
        #         pending.append(task)
        #     else:
        #         print "Cannot read frame"
Beispiel #4
0
def demo_video(video_file):
    detector = ObjectDetectorYolo(model='tiny-yolo-voc')
    mtracker = KalmanTracker(['person'], tracker='deep_sort')

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0
    total_t, counter = 0, 0

    while not cap.stopped:
        t = common.clock()
        imgcv = cap.read()

        if imgcv is not None:
            counter += 1
            detections = detector.run(imgcv)
            mtracker.update(imgcv, detections)
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.iteritems():
                if tracker.consecutive_invisible_count < 5:
                    state_current = get_pos(tracker.bbox)

                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current

                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)
            print Incount, Outcount

            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            common.drawLabel(imgcv,
                             "IN:%d  OUT:%d" % (Incount, Outcount), (10, 10),
                             size=1,
                             color=(0, 0, 255))
            common.showImage(draw_boxes(imgcv, cvboxes, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t
Beispiel #5
0
def demo_video(video_file):
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    mtracker = MultiTracker(SingleTrackerType=CorrelationTracker)
    # mtracker = MultiTracker(SingleTrackerType=CorrelationTracker,
    #                         removalConfig=removalConfig)
    # mtracker = MultiTracker(SingleTrackerType = cv2.TrackerKCF_create)

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0

    while not cap.stopped:
        t = common.clock()
        total_t, counter = 0, 0

        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            mtracker.update(imgcv, common.toCvbox(detections))
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.items():
                if tracker.visible_count > 3 and tracker.consecutive_invisible_count < 10:
                    state_current = get_pos(tracker.bbox)
                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current
                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)

            detections = to_bbox(cvboxes)
            print Incount, Outcount
            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            imgcv = common.drawLabel(imgcv,
                                     "IN:%d  OUT:%d" % (Incount, Outcount),
                                     (10, 10),
                                     color=(0, 0, 255))
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t