Beispiel #1
0
def recognize_from_video():
    # net initialize
    detector = ailia.Detector(MODEL_PATH,
                              WEIGHT_PATH,
                              len(FACE_CATEGORY),
                              format=ailia.NETWORK_IMAGE_FORMAT_RGB,
                              channel=ailia.NETWORK_IMAGE_CHANNEL_FIRST,
                              range=RANGE,
                              algorithm=ALGORITHM,
                              env_id=args.env_id)

    capture = webcamera_utils.get_capture(args.video)

    if args.savepath != SAVE_IMAGE_PATH:
        writer = webcamera_utils.get_writer(
            args.savepath,
            IMAGE_HEIGHT,
            IMAGE_WIDTH,
            fps=capture.get(cv2.CAP_PROP_FPS),
        )
    else:
        writer = None

    while (True):
        ret, frame = capture.read()
        if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
            break

        _, resized_img = webcamera_utils.adjust_frame_size(
            frame, IMAGE_HEIGHT, IMAGE_WIDTH)

        img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2BGRA)
        detector.compute(img, THRESHOLD, IOU)

        detections = []
        for idx in range(detector.get_object_count()):
            obj = detector.get_object(idx)
            detections.append(obj)
        detections = nms_between_categories(detections,
                                            frame.shape[1],
                                            frame.shape[0],
                                            categories=[0, 1],
                                            iou_threshold=IOU)

        res_img = plot_results(detections, resized_img, FACE_CATEGORY, False)
        cv2.imshow('frame', res_img)

        # save results
        if writer is not None:
            writer.write(res_img)

    capture.release()
    cv2.destroyAllWindows()
    if writer is not None:
        writer.release()
    logger.info('Script finished successfully.')
Beispiel #2
0
def get_faces(detector, frame, w, h):
    # detect face
    org_detections = []
    if args.face == "blazeface":
        org_detections = compute_blazeface(detector, frame)
    else:
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
        detector.compute(img, args.face_threshold, FACE_IOU)
        count = detector.get_object_count()
        for idx in range(count):
            obj = detector.get_object(idx)
            org_detections.append(obj)

    # remove overwrapped detection
    org_detections = nms_between_categories(org_detections,
                                            w,
                                            h,
                                            categories=[0, 1],
                                            iou_threshold=FACE_IOU)

    detections = []
    for idx in range(len(org_detections)):
        # get detected face
        obj = org_detections[idx]
        margin = FACE_MARGIN

        cx = (obj.x + obj.w / 2) * w
        cy = (obj.y + obj.h / 2) * h
        cw = max(obj.w * w * margin, obj.h * h * margin)
        fx = max(cx - cw / 2, 0)
        fy = max(cy - cw / 2, 0)
        fw = min(cw, w - fx)
        fh = min(cw, h - fy)
        top_left = (int(fx), int(fy))
        bottom_right = (int((fx + fw)), int(fy + fh))

        # get detected face
        crop_img = frame[top_left[1]:bottom_right[1],
                         top_left[0]:bottom_right[0], 0:3]
        if crop_img.shape[0] <= 0 or crop_img.shape[1] <= 0:
            continue
        crop_img, resized_frame = adjust_frame_size(crop_img, IMAGE_HEIGHT,
                                                    IMAGE_WIDTH)
        detections.append({
            "resized_frame": resized_frame,
            "top_left": top_left,
            "bottom_right": bottom_right,
            "id_sim": 0,
            "score_sim": 0,
            "fe": None
        })

    return detections
Beispiel #3
0
def recognize_from_image():
    # net initialize
    detector = ailia.Detector(MODEL_PATH,
                              WEIGHT_PATH,
                              len(FACE_CATEGORY),
                              format=ailia.NETWORK_IMAGE_FORMAT_RGB,
                              channel=ailia.NETWORK_IMAGE_CHANNEL_FIRST,
                              range=RANGE,
                              algorithm=ALGORITHM,
                              env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        img = load_image(image_path)
        logger.debug(f'input image shape: {img.shape}')

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                detector.compute(img, THRESHOLD, IOU)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            detector.compute(img, THRESHOLD, IOU)

        # nms
        detections = []
        for idx in range(detector.get_object_count()):
            obj = detector.get_object(idx)
            detections.append(obj)
        detections = nms_between_categories(
            detections,
            img.shape[1],
            img.shape[0],
            categories=[0, 1],
            iou_threshold=IOU,
        )

        # plot result
        res_img = plot_results(detections, img, FACE_CATEGORY)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, res_img)
    logger.info('Script finished successfully.')
Beispiel #4
0
def recognize_from_video():
    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    detector = ailia.Detector(
        MODEL_PATH,
        WEIGHT_PATH,
        len(FACE_CATEGORY),
        format=ailia.NETWORK_IMAGE_FORMAT_RGB,
        channel=ailia.NETWORK_IMAGE_CHANNEL_FIRST,
        range=RANGE,
        algorithm=ALGORITHM,
        env_id=env_id
    )

    if args.video == '0':
        print('[INFO] Webcam mode is activated')
        capture = cv2.VideoCapture(0)
        if not capture.isOpened():
            print("[ERROR] webcamera not found")
            sys.exit(1)
    else:
        if check_file_existance(args.video):
            capture = cv2.VideoCapture(args.video)

    while(True):
        ret, frame = capture.read()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if not ret:
            continue

        _, resized_img = adjust_frame_size(frame, IMAGE_HEIGHT, IMAGE_WIDTH)

        img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2BGRA)
        detector.compute(img, THRESHOLD, IOU)

        detections = []
        for idx in range(detector.get_object_count()):
            obj = detector.get_object(idx)
            detections.append(obj)
        detections=nms_between_categories(detections,frame.shape[1],frame.shape[0],categories=[0,1],iou_threshold=IOU)

        res_img = plot_results(detections, resized_img, FACE_CATEGORY, False)
        cv2.imshow('frame', res_img)

    capture.release()
    cv2.destroyAllWindows()
    print('Script finished successfully.')
def recognize_from_frame(net, detector, frame):
    # object detection
    detector.compute(frame, args.threshold, args.iou)

    frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)

    # nms
    detections = []
    for idx in range(detector.get_object_count()):
        obj = detector.get_object(idx)
        detections.append(obj)
    detections = nms_between_categories(
        detections,
        frame.shape[1],
        frame.shape[0],
        categories=DETECT_CLASSES,
        iou_threshold=IOU,
    )

    for idx in range(len(detections)):
        obj = detections[idx]
        if obj.category not in DETECT_CLASSES:
            continue

        # cropping image
        margin = 1.0
        crop_img, top_left, bottom_right = crop(obj, margin, frame)
        # inference
        img = cv2.resize(crop_img, (IMAGE_SIZE, IMAGE_SIZE))
        img = np.expand_dims(img, axis=0)  # 次元合せ

        output = net.predict([img])
        out_typ, out_clr = output
        typ = TYPE_LIST[np.argmax(out_typ)]
        clr = COLOR_LIST[np.argmax(out_clr)]
        clr_table = COLOR_TABLE_LIST[np.argmax(out_clr)]

        # draw label
        LABEL_WIDTH = bottom_right[1] - top_left[1]
        LABEL_HEIGHT = 20
        color = clr_table  #(255, 128, 128)
        cv2.rectangle(frame, top_left, bottom_right, color, thickness=2)
        cv2.rectangle(
            frame,
            top_left,
            (top_left[0] + LABEL_WIDTH, top_left[1] + LABEL_HEIGHT),
            color,
            thickness=-1,
        )

        text_position = (top_left[0], top_left[1] + LABEL_HEIGHT * 3 // 4)
        color = (0, 0, 0)
        fontScale = 0.7
        cv2.putText(
            frame,
            "{} {}".format(typ, clr),
            text_position,
            cv2.FONT_HERSHEY_SIMPLEX,
            fontScale,
            color,
            1,
        )

    return frame