Exemplo n.º 1
0
def realse(stack):
    print('Begin to get frame......')
    os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    detectors = net('onet')
    mtcnnDetector = MtcnnDetector(detectors=detectors,
                                  min_face_size=24,
                                  threshold=[0.9, 0.6, 0.7])
    while True:

        if len(stack) > 30:
            image = stack.pop()
            image = cv2.resize(image,
                               (int(image.shape[1]), int(image.shape[0])))
            image = np.array(image)
            boxes_c, _ = mtcnnDetector.detect_video(image)
            for bbox in boxes_c:
                x1 = int(bbox[0])
                y1 = int(bbox[1])
                x2 = int(bbox[2])
                y2 = int(bbox[3])
                cv2.putText(image,
                            str(np.round(bbox[4], 2)),
                            (int(bbox[0]), int(bbox[1])),
                            cv2.FONT_HERSHEY_TRIPLEX,
                            0.3,
                            color=(0, 255, 0))
                cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255))

                # cut = image[y1:y2, x1:x2]
                # for i in range(len(boxes_c)):
                #     cv2.imwrite(str(i) + '.jpg', cut)

                print('deteced face: ({},{}), ({},{})'.format(x1, y1, x2, y2))
            cv2.imshow("Detected", image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()
def recognize(stack):
    logging.info("[INFO]:Starting video stream...")
    # os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    # session = tf.Session(config = config)
    data = pickle.loads(
        open('/home/lxz/project/faceid/alignment.pickle', "rb").read())
    detectors = net('onet')
    mtcnnDetector = MtcnnDetector(detectors=detectors,
                                  min_face_size=60,
                                  threshold=[0.9, 0.6, 0.7])
    logging.info('MTCNN/KNN Model load sucessed !!!!')
    while True:
        if len(stack) > 20:
            boxes = []
            frame = stack.pop()
            image = np.array(frame)
            allBoxes, _ = mtcnnDetector.detect_video(image)
            for box in allBoxes:
                x_1 = int(box[0])
                y_1 = int(box[1])
                x_2 = int(box[2])
                y_2 = int(box[3])
                boxes.append((y_1 - 10, x_2 + 12, y_2 + 10, x_1 - 12))
            logging.debug(boxes)
            start = time.time()
            # num_jitters(re-sample人脸的次数)参数的设定,数值越大精度相对会高,但是速度会慢;
            encodings = face_recognition.face_encodings(frame,
                                                        boxes,
                                                        num_jitters=6)
            end = time.time()
            logging.info('[INFO]:Encoding face costed: {} s'.format(end -
                                                                    start))
            print('encode time is {}ms'.format((end - start) * 1000))
            names = []

            for encoding in encodings:
                # distance between faces to consider it a match, optimize is 0.6
                matches = face_recognition.compare_faces(data['encodings'],
                                                         encoding,
                                                         tolerance=0.35)
                name = 'Stranger'
                if True in matches:
                    matchesidx = [i for (i, b) in enumerate(matches) if b]
                    counts = {}
                    for i in matchesidx:
                        name = data['names'][i]
                        counts[name] = counts.get(name, 0) + 1
                    name = max(counts, key=counts.get)
                    logging.debug(name)
                names.append(name)

            # 绘制检测框 + 人脸识别结果
            for ((top, right, bottom, left), name) in zip(boxes, names):
                # print(name)
                y1 = int(top)
                x1 = int(right)
                y2 = int(bottom)
                x2 = int(left)
                cv2.rectangle(frame, (x2, y1), (x1, y2), (0, 0, 255), 2)
                if name == 'Stranger':
                    cv2.putText(frame, name, (x2, y1 - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                else:
                    print(name)
                    cv2.putText(frame, name, (x2, y1 - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            cv2.imshow('Recognize-no-alignment', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()