def main():
    try:
        args = parse_args()

        assert args.recognition_distance_threshold > 0

        sources = []

        for sname in args.sources_names:
            sources.append(OpenCVSource(sname))

        service = FacerecService.create_service(args.dll_path, args.config_dir)

        print("Library version: ", service.get_version(), "\n")

        recognizer = service.create_recognizer(args.method_config, True, True)

        capturer_config = Config("common_capturer4_fda_singleface.xml")
        capturer = service.create_capturer(capturer_config)

        # create database
        database = Database(args.database_list_filepath, recognizer, capturer,
                            args.recognition_distance_threshold)

        # create one VideoWorker
        vw_config = Config(args.vw_config_file)
        vw_config.override_parameter("search_k", 10)
        vw_config.override_parameter("not_found_match_found_callback", 1)
        vw_config.override_parameter("downscale_rawsamples_to_preferred_size",
                                     0)

        vw_params = video_worker.Params()
        vw_params.video_worker_config = vw_config
        vw_params.recognizer_ini_file = args.method_config
        vw_params.streams_count = len(sources)
        vw_params.processing_threads_count = len(sources)
        vw_params.matching_threads_count = len(sources)
        #####
        vw_params.age_gender_estimation_threads_count = len(sources)
        vw_params.emotions_estimation_threads_count = len(sources)
        #####
        # vw_params.active_liveness_checks_order = [
        #     active_liveness.CheckType.SMILE,
        #     active_liveness.CheckType.TURN_RIGHT,
        #     active_liveness.CheckType.TURN_LEFT]
        #####

        vw = service.create_video_worker(vw_params)

        # set database
        vw.set_database(database.vw_elements)

        for sname in args.sources_names:
            if args.fullscreen == "yes":
                cv2.namedWindow(sname, cv2.WINDOW_NORMAL)
                cv2.setWindowProperty(sname, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)

            cv2.imshow(sname, np.zeros([100, 100, 3], dtype=np.uint8))

        # prepare buffers for store drawed results
        draw_images_mutex = Mutex(threading.Lock())
        draw_images = [np.array([])] * len(sources)

        # create one worker per one source
        workers = list()

        for i in range(len(sources)):
            workers.append(
                Worker(
                    database,
                    vw,
                    sources[i],
                    i,  # stream_id
                    draw_images_mutex,
                    draw_images[i],
                    args.frame_fps_limit))

        # draw results until escape pressed
        while True:
            draw_images_mutex.wait_one()

            i = 0
            for worker in workers:
                if worker._shutdown:
                    worker.dispose()
                    workers.remove(worker)
                    cv2.destroyWindow(args.sources_names[i])
                    args.sources_names.remove(args.sources_names[i])
                    i -= 1
                elif worker.draw_image.size:
                    cv2.imshow(args.sources_names[i], worker.draw_image)
                    worker.draw_image = np.array([])
                i += 1

            draw_images_mutex.release_mutex()

            key = cv2.waitKey(20)

            if 27 == key or not workers:
                cv2.destroyAllWindows()
                break

            if ord('t') == key:
                print("resetTrackerOnStream")
                vw.reset_tracker_on_stream(0)

            if ord('r') == key:
                track_id_threshold = vw.reset_stream(0)
                print("resetStream return track_id_threshold: ",
                      track_id_threshold)

            # check exceptions in callbacks
            vw.check_exception()

        for w in workers:
            w.dispose()

    except Exception as ex:
        print('\n video_recognition_demo exception catched: "{}"'.format(ex))
Exemple #2
0
import cv2


def process_one_face(img_path):
    img = cv2.imread(img_path)
    assert img is not None
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    raw_img = CVRawImage(img)
    detected = capturer.capture(raw_img)
    print(f'On image detected {len(detected)} faces')
    assert len(detected) > 0
    return recognizer.processing(detected[0])


face_sdk_dir = "../.."
service = FacerecService.create_service(
    os.path.join(face_sdk_dir, "lib/libfacerec.so"),
    os.path.join(face_sdk_dir, "conf/facerec"))
print('Service created')

recognizer = service.create_recognizer("method9v30_recognizer.xml", True, True, False)
print('Recognizer created')

capturer = service.create_capturer(Config("common_capturer_blf_fda_front.xml"))
print('Capturer created')

template1 = process_one_face(os.path.join(face_sdk_dir, "bin/set1", "01100.jpg"))
template2 = process_one_face(os.path.join(face_sdk_dir, "bin/set2", "01100.jpg"))

print(recognizer.verify_match(template1, template2))
Exemple #3
0
import cv2
import numpy as np


def detect(img_path):
    img = cv2.imread(img_path)
    assert img is not None
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    raw_img = CVRawImage(img)
    detected = capturer.capture(raw_img)
    print(f'On image detected {len(detected)} faces')
    return detected


face_sdk_dir = "../.."
service = FacerecService.create_service(
    os.path.join(face_sdk_dir, "lib/libfacerec.so"),
    os.path.join(face_sdk_dir, "conf/facerec"))
print('Service created')

capturer = service.create_capturer(Config("common_capturer_uld_fda.xml"))
print('Capturer created')

samples = detect(os.path.join(face_sdk_dir, "bin/set1", "01100.jpg"))
for i, sample in enumerate(samples):
    raw_img = sample.cut_face_raw_image(Format.FORMAT_GRAY)
    img_crop = np.frombuffer(raw_img.data, dtype=np.uint8).reshape(
        [raw_img.height, raw_img.width])
    cv2.imshow('Image', img_crop)
    cv2.waitKey()