Ejemplo n.º 1
0
def main_loop():

    frame, face_data = acquire_frame(detector, embedder, vs, recognizer, le,
                                     0.5, 0.65)
    for item in face_data:
        frame = draw_frame(frame, item)
    show_frame(frame)
Ejemplo n.º 2
0
def main_core(detector, embedder, recognizer, le, frame_queue, pframe_queue):

    print('[INFO] Starting:', mp.current_process().name)
    # time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue,
                            src=0,
                            name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
    video_getter.start()
    time.sleep(1.0)

    cpt = 0
    exitbool = False
    fps_count = FPS(2).start()
    while True:

        frame = video_getter.frame.copy()
        fps_count.istime(
        )  # Calculate if enough time has passed for process the frame

        if fps_count.boolFrames:

            fps_count.updateactualtime()  # Update the comparison time

            face_data = acquire_frame(detector, embedder, frame, recognizer,
                                      le, 0.5, 0.65)  #,fa)
            pframe_queue.put(face_data)
            for item in face_data:
                # print(item[2:])
                frame = draw_frame(frame, item)
            fps_count.update()

            # cpt +=1

        exitbool = show_frame(frame)

        if exitbool or cpt == 80:
            #
            fps_count.stop()
            print("[INFO] elasped time fps processed: {:.2f}".format(
                fps_count.elapsed()))
            print("[INFO] approx. processed FPS: {:.2f}".format(
                fps_count.fps()))
            time.sleep(1)
            video_getter.stop()
            time.sleep(2)
            print('[INFO] Exiting :', mp.current_process().name)
            break
Ejemplo n.º 3
0
    for item in face_data:
        frame = draw_frame(frame, item)
    show_frame(frame)


## INICIO ##

# initialize the video stream, then allow the camera sensor to warm up
# print("[INFO] starting viif key == ord('q') or key == 27: self.stopped = Truedeo stream...")
# vs = VideoStream(src=0).start()
video_getter = VideoGet(src=0, name='Video Getter')
time.sleep(1.0)
video_getter.start()
time.sleep(1.0)

while True:
    # main_loop()

    frame = video_getter.frame
    face_data = acquire_frame(detector, embedder, frame, recognizer, le, 0.5,
                              0.65, fa)

    for item in face_data:
        frame = draw_frame(frame, item)
    exitbool = show_frame(frame)

    # exitbool = show_frame(frame);
    if exitbool:
        video_getter.stop()
        break
Ejemplo n.º 4
0
def main_core(args, frame_queue, pframe_queue):

    print('[INFO] Starting:', mp.current_process().name)

    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
    modelPath = os.path.sep.join(
        [args["detector"], "res10_300x300_ssd_iter_140000.caffemodel"])
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    #predictor = dlib.shape_predictor(args["shape_predictor"])
    #fa = FaceAligner(predictor, desiredFaceWidth=256)
    # load our serialized face embedding model from disk
    print("[INFO] loading face recognizer...")
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

    # load the face embeddings
    print("[INFO] loading face embeddings...")
    data = pickle.loads(open(args["embeddings"], "rb").read())

    recognizer, le = train(data)
    time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue,
                            src=0,
                            name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
    video_getter.start()
    time.sleep(1.0)

    cpt = 0
    exitbool = False
    fps_count = FPS().start()

    while True:

        frame = video_getter.frame.copy()

        face_data = acquire_frame(detector, embedder, frame, recognizer, le,
                                  0.5, 0.65)  #,fa)
        # pframe_queue.put(face_data)
        for item in face_data:
            # print(item[2:])
            frame = draw_frame(frame, item)
        fps_count.update()

        cpt += 1

        #       exitbool = show_frame(frame)

        if exitbool or cpt == 80:
            #
            fps_count.stop()
            print("[INFO] elasped time fps processed: {:.2f}".format(
                fps_count.elapsed()))
            print("[INFO] approx. processed FPS: {:.2f}".format(
                fps_count.fps()))
            time.sleep(1)
            video_getter.stop()
            time.sleep(2)
            print('[INFO] Exiting :', mp.current_process().name)
            break