def min_face_size_test(img_path):
    # img = cv2.imread(img_path)
    img = img_path
    counter = 1.0
    skipped = 0
    if img is not None:
        while True:
            imgcv = cv2.resize(img, (0, 0), fx=counter, fy=counter)
            print imgcv.shape,
            detections = facedemo.detect_largest(imgcv, upsamples=0)
            if detections:
                skipped = 0
                common.showImage(draw_faces(imgcv, [detections]))
                print("%s %s" % (detections['box']['bottomright']['x'] -
                                 detections['box']['topleft']['x'],
                                 detections['box']['bottomright']['y'] -
                                 detections['box']['topleft']['y']))
            else:
                skipped += 1
            counter -= 0.01
            if skipped >= 5:
                break

            key = 0xFF & cv2.waitKey(1)
            if key == 27:
                break
Example #2
0
def demo_video(video_file):
    import time
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    cap = common.VideoStream(video_file, queueSize=4).start()
    time.sleep(1)
    total_t, counter = 0, 0
    t = common.clock()

    while not cap.stopped:
        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            ids = range(len(detections))

            # temp = mtracker.update(imgcv, to_cvbox(detections))
            # cvboxes, ids = [], []
            # for tid,tracker in mtracker.trackers.items():
            #     if tracker.visible_count > 3 and tracker.consecutive_invisible_count<10:
            #         cvboxes.append(tracker.bbox)
            #         ids.append(tid)
            # detections = to_bbox(cvboxes)

            print(detections)
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print(counter / total_t)
Example #3
0
def demo_video(video_file):
    import cv2
    # with tf.device(device_id):
    cap = common.VideoStream(video_file).start()
    dmg = Demography()

    def draw_faces(img, detections):
        """ Draws bounding boxes of objects detected on given image """
        h, w = img.shape[:2]
        for face in detections:
            # draw rectangle
            x1, y1, x2, y2 = face['box']['topleft']['x'], face['box']['topleft']['y'], face['box']['bottomright']['x'], face['box']['bottomright']['y']
            cv2.rectangle(img, (x1, y1), (x2, y2), (0,255,0), 2)

            # draw class text
            text = "%s %s"%(face['classes'][0]['meta']['gender'], face['classes'][0]['meta']['age'])
            return common.draw_label(img, text, (x1, y1))

    while cap.more():
        img = cap.read()
        if img is not None:
            faces = dmg.run(img)
            img = draw_faces(img, faces)

            common.showImage(img)
            key = cv2.waitKey(1) & 0xff
            if key == 27:
                break
        else:
            print('Cannot read frame')
            break
Example #4
0
def main():

    #获取命令行参数
    params = sys.argv

    if (len(params) == 1):
        params.append('1')
        params.append('/Users/paiconor/Downloads/数据下发')
        params.append('192.168.99.144')
        params.append('8000')

    if (len(params) <= 4):
        return

    #清理临时目录
    print '清理临时目录...'
    if os.path.exists(tempdir):
        __import__('shutil').rmtree(tempdir)
    os.mkdir(tempdir)

    #下发数据时,将需要下发的目录压缩
    if (params[1] == '1'):
        print '正在压缩上传文件...'
        zipfile = tempdir + '/' + str(uuid.uuid1()) + '.zip'
        zip_dir(params[2], zipfile)
        params[2] = zipfile

    #生成连接二维码
    print '正在生成二维码...'
    qrfile = tempdir + '/' + str(uuid.uuid1()) + '.jpg'
    connectString = GeneralQRCode(qrfile, int(params[1]), params[2], params[3],
                                  params[4])
    fConnString = open(tempdir + '/connectString.txt', 'a')
    fConnString.write(connectString)
    fConnString.flush()
    fConnString.close()

    showImage(qrfile)

    #启动服务
    print '启动服务...'
    app.run(host=params[3], port=int(params[4]))
Example #5
0
def demo_video(video_file):
    detector = ObjectDetectorYolo(model='tiny-yolo-voc')
    mtracker = KalmanTracker(['person'], tracker='deep_sort')

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0
    total_t, counter = 0, 0

    while not cap.stopped:
        t = common.clock()
        imgcv = cap.read()

        if imgcv is not None:
            counter += 1
            detections = detector.run(imgcv)
            mtracker.update(imgcv, detections)
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.iteritems():
                if tracker.consecutive_invisible_count < 5:
                    state_current = get_pos(tracker.bbox)

                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current

                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)
            print Incount, Outcount

            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            common.drawLabel(imgcv,
                             "IN:%d  OUT:%d" % (Incount, Outcount), (10, 10),
                             size=1,
                             color=(0, 0, 255))
            common.showImage(draw_boxes(imgcv, cvboxes, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t
Example #6
0
        #     common.showImage(frame)

        # if len(pending) < threadn:
        #     imgcv = cap.read()
        #     if imgcv is not None:
        #         counter += 1
        #         task = pool.apply_async(process_frame, (imgcv.copy(),))
        #         pending.append(task)
        #     else:
        #         print "Cannot read frame"
        #         break

        imgcv = cap.read()
        print imgcv.shape,
        if imgcv is not None:
            counter += 1
            out = multi.run(imgcv)
            if out is not None:
                frame, detections = out
                common.showImage(common.drawObjects(frame, detections))

        t1 = common.clock()
        dt = t1-t
        t = t1
        total_t += dt
        print counter/total_t

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break
Example #7
0
def demo_video(video_file):
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    mtracker = MultiTracker(SingleTrackerType=CorrelationTracker)
    # mtracker = MultiTracker(SingleTrackerType=CorrelationTracker,
    #                         removalConfig=removalConfig)
    # mtracker = MultiTracker(SingleTrackerType = cv2.TrackerKCF_create)

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0

    while not cap.stopped:
        t = common.clock()
        total_t, counter = 0, 0

        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            mtracker.update(imgcv, common.toCvbox(detections))
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.items():
                if tracker.visible_count > 3 and tracker.consecutive_invisible_count < 10:
                    state_current = get_pos(tracker.bbox)
                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current
                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)

            detections = to_bbox(cvboxes)
            print Incount, Outcount
            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            imgcv = common.drawLabel(imgcv,
                                     "IN:%d  OUT:%d" % (Incount, Outcount),
                                     (10, 10),
                                     color=(0, 0, 255))
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t