示例#1
0
def face_parsing(_image_info, _face_box_info, _face_landmark_info):
    """
    人脸语义分区

    Args:
        _image_info:    待识别的完整图像
        _face_box_info:  人脸所在区域
        _face_landmark_info:    人脸landmark坐标信息

    Returns:    人脸不同区域的mask的key

    """
    to_return_result = {
        'parsing_info': {
            'bucket_name': '',
            'path': ''
        },
    }
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(_image_info['bucket_name'],
                                          _image_info['path'])
    cropped_image = get_rotated_box_roi_from_image(img,
                                                   _face_box_info,
                                                   _scale_ratio=1.5)
    face_parsing_result = face_parsing_handler.execute(cropped_image,
                                                       _face_landmark_info)
    parsing_result = face_parsing_result['semantic_segmentation']
    date_string = get_date_string()
    name_string = get_uuid_name()
    target_path = os.path.join(date_string, name_string)
    target_path = oss_handler.upload_numpy_array('intermediate', target_path,
                                                 parsing_result)
    to_return_result['parsing_info']['bucket_name'] = 'intermediate'
    to_return_result['parsing_info']['path'] = target_path
    return to_return_result
示例#2
0
def text_recognize(_image_info, _box_info):
    """
    文本识别

    Args:
        _image_info:    待识别的完整图像
        _box_info:      图像中文本区域的位置

    Returns:    文本区域位置的识别结果

    """
    to_return_result = {'text': ''}
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(
        _image_info['bucket_name'],
        _image_info['path']
    )
    cropped_image = get_rotated_box_roi_from_image(img, _box_info)
    get_image_rotation = text_orientation_op.execute(cropped_image)
    if get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_90:
        rotated_image, _ = rotate_degree_img(cropped_image, 90)
    elif get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_180:
        rotated_image, _ = rotate_degree_img(cropped_image, 180)
    elif get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_270:
        rotated_image, _ = rotate_degree_img(cropped_image, 270)
    else:
        rotated_image = cropped_image
    recognize_result = text_recognize_op.execute(rotated_image)
    to_return_result['text'] = recognize_result['text']
    return to_return_result
示例#3
0
def text_recognize(_image_info, _box_info):
    """
    文本识别

    Args:
        _image_info:    待识别的完整图像
        _box_info:      图像中文本区域的位置

    Returns:    文本区域位置的识别结果

    """
    to_return_result = {'text': ''}
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(_image_info['bucket_name'],
                                          _image_info['path'])
    cropped_image = get_rotated_box_roi_from_image(img, _box_info)
    recognize_result = text_recognize_op.execute(cropped_image)
    to_return_result['text'] = recognize_result['text']
    return to_return_result
示例#4
0
def face_liveness_detect(_image_info, _face_box_info):
    """
    静默人脸活体检测

    Args:
        _image_info:    待识别的完整图像
        _face_box_info:  人脸所在区域

    Returns:    人脸的特征向量

    """
    to_return_result = {"is_fake": False}
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(_image_info['bucket_name'],
                                          _image_info['path'])
    cropped_image = get_rotated_box_roi_from_image(img, _face_box_info, 2.7)
    liveness_result = mini_fasnetv2_handler.execute(cropped_image)
    score = liveness_result['classification_scores']
    to_return_result['is_fake'] = (score[1] > score[0]) and (score[1] >
                                                             score[2])
    return to_return_result
示例#5
0
def face_embedding(_image_info, _face_box_info, _face_landmark_info):
    """
    人脸特征向量提取

    Args:
        _image_info:    待识别的完整图像
        _face_box_info:  人脸所在区域
        _face_landmark_info:    人脸landmark坐标信息

    Returns:    人脸的特征向量

    """
    to_return_result = {"face_feature_vector": []}
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(_image_info['bucket_name'],
                                          _image_info['path'])
    cropped_image = get_rotated_box_roi_from_image(img, _face_box_info, 1.35)
    embedding_result = asia_face_embedding_handler.execute(
        cropped_image, _face_landmark_info)
    to_return_result['face_feature_vector'] = embedding_result[
        'feature_vector']
    return to_return_result
示例#6
0
def face_landmark(_image_info, _face_box_info):
    """
    人脸landmark检测

    Args:
        _image_info:    待识别的完整图像
        _face_box_info:  人脸所在区域

    Returns:    人脸landmark坐标

    """
    to_return_result = {
        'points_count': 106,
        'x_locations': [0] * 106,
        'y_locations': [0] * 106,
    }
    oss_handler = get_oss_handler()
    img = oss_handler.download_image_file(_image_info['bucket_name'],
                                          _image_info['path'])
    cropped_image = get_rotated_box_roi_from_image(img, _face_box_info, 1.35)
    landmark_detect_result = landmark106p_detect_handler.execute(cropped_image)
    to_return_result = landmark_detect_result.copy()
    return to_return_result
                    default=8001,
                    help='triton grpc 端口')
    args = ag.parse_args()
    img = cv2.imread(args.image_path)
    ultra_light_face_detect_handler = GeneralUltraLightFaceDetect(
        {
            'name': 'triton',
            'triton_url': args.triton_url,
            'triton_port': args.triton_port
        }, True, 0.7, 0.5)
    landmark106p_detect_handler = GeneralLandmark106p(
        {
            'name': 'triton',
            'triton_url': args.triton_url,
            'triton_port': args.triton_port
        }, True)
    fair_handler = AgeRaceGenderWithFair(
        {
            'name': 'triton',
            'triton_url': args.triton_url,
            'triton_port': args.triton_port,
        }, True)
    # 假设图中有且仅有一人
    all_features = []
    face_detect_result = ultra_light_face_detect_handler.execute(img)
    face_bbox_info = face_detect_result['locations'][0]['box_info']
    cropped_face = get_rotated_box_roi_from_image(img, face_bbox_info, 1.25)
    landmark_result = landmark106p_detect_handler.execute(cropped_face)
    attribute_result = fair_handler.execute(cropped_face, landmark_result)
    pprint(attribute_result)
示例#8
0
                 dest='triton_port',
                 type=int,
                 default=8001,
                 help='triton grpc 端口')
 args = ag.parse_args()
 # 假设图中只有一个人头
 img = cv2.imread(args.image_path)
 ultra_light_face_detect_handler = GeneralUltraLightFaceDetect(
     {
         'name': 'triton',
         'triton_url': args.triton_url,
         'triton_port': args.triton_port
     }, True, 0.7, 0.5)
 face_bbox = ultra_light_face_detect_handler.execute(
     img)['locations'][0]['box_info']
 cropped_image_2_7 = get_rotated_box_roi_from_image(img, face_bbox, 2.7)
 cropped_image_4_0 = get_rotated_box_roi_from_image(img, face_bbox, 4.0)
 cv2.imshow('cropped_image_4_0', cropped_image_4_0)
 cv2.imshow('cropped_image_2_7', cropped_image_2_7)
 cv2.waitKey(0)
 cv2.destroyAllWindows()
 mini_fasnetv1se_handler = GeneralMiniFASNetV1SE(
     {
         'name': 'triton',
         'triton_url': args.triton_url,
         'triton_port': args.triton_port
     }, True)
 mini_fasnetv2_handler = GeneralMiniFASNetV2(
     {
         'name': 'triton',
         'triton_url': args.triton_url,
示例#9
0
        'triton_url': args.triton_url,
        'triton_port': args.triton_port
    }, True)
    ultra_light_face_detect_handler = GeneralUltraLightFaceDetect({
        'name': 'triton',
        'triton_url': args.triton_url,
        'triton_port': args.triton_port
    }, True, 0.7, 0.5)
    landmark106p_result = landmark106p_detect_handler.execute(img)
    landmark106p_result_image = img.copy()
    landmark106p_all_points = [(x, y) for x, y in
                               zip(landmark106p_result['x_locations'],
                                   landmark106p_result['y_locations'])
                               ]
    annotate_circle_on_image(landmark106p_result_image, landmark106p_all_points, (0, 255, 0), 3, -1)
    cv2.imshow('landmark106p_result_image', landmark106p_result_image)
    face_detect_result = ultra_light_face_detect_handler.execute(img)
    face_bbox = face_detect_result['locations'][0]['box_info']
    cropped_face_region = get_rotated_box_roi_from_image(img, face_bbox, 1.25)
    landmark106p_with_bbox_result = landmark106p_detect_handler.execute(cropped_face_region)
    landmark106p_with_bbox_result_image = cropped_face_region.copy()
    landmark106p_with_bbox_result_all_points = [(x, y) for x, y in
                                                zip(landmark106p_with_bbox_result['x_locations'],
                                                    landmark106p_with_bbox_result['y_locations'])
                                                ]
    annotate_circle_on_image(landmark106p_with_bbox_result_image, landmark106p_with_bbox_result_all_points,
                             (255, 0, 255), 3, -1)
    cv2.imshow('landmark106p_with_bbox_result_image', landmark106p_with_bbox_result_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
示例#10
0
if __name__ == '__main__':
    from argparse import ArgumentParser
    import cv2
    from Utils.AnnotationTools import draw_rotated_bbox
    from Utils.GeometryUtils import get_rotated_box_roi_from_image

    ag = ArgumentParser('Text Detect Example')
    ag.add_argument('-i', '--image_path', dest='image_path', type=str, required=True, help='本地图像路径')
    ag.add_argument('-u', '--triton_url', dest='triton_url', type=str, required=True, help='triton url')
    ag.add_argument('-p', '--triton_port', dest='triton_port', type=int, default=8001, help='triton grpc 端口')
    ag.add_argument('-b', '--backbone', dest='backbone', choices=['res18', 'mbv3'], default='res18', help='DB的backbone')
    args = ag.parse_args()
    img = cv2.imread(args.image_path)
    db_handler = GeneralDBDetect({
        'name': 'triton',
        'backbone_type': args.backbone,
        'triton_url': args.triton_url,
        'triton_port': args.triton_port},
        True, 0.3, 5, 5
    )
    db_boxes = db_handler.execute(img)['locations']
    db_result_to_show = img.copy()
    for m_box_index, m_box in enumerate(db_boxes, 1):
        draw_rotated_bbox(db_result_to_show, m_box['box_info'], (0, 0, 255), 2)
        m_roi_image = get_rotated_box_roi_from_image(img, _rotated_box=m_box['box_info'])
        cv2.imshow(f'roi No.{m_box_index}', m_roi_image)
    cv2.imshow(f'db_{args.backbone}_result_to_show', db_result_to_show)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
示例#11
0
        }, True)
    ultra_light_face_detect_handler = GeneralUltraLightFaceDetect(
        {
            'name': 'triton',
            'triton_url': args.triton_url,
            'triton_port': args.triton_port
        }, True, 0.7, 0.5)
    landmark106p_detect_handler = GeneralLandmark106p(
        {
            'name': 'triton',
            'triton_url': args.triton_url,
            'triton_port': args.triton_port
        }, True)
    face_bbox = ultra_light_face_detect_handler.execute(
        img)['locations'][0]['box_info']
    cropped_image = get_rotated_box_roi_from_image(img, face_bbox, 1.35)
    landmark_info = landmark106p_detect_handler.execute(cropped_image)
    landmark106p_with_bbox_result_image = cropped_image.copy()
    landmark106p_with_bbox_result_all_points = [(x, y) for x, y in zip(
        landmark_info['x_locations'], landmark_info['y_locations'])]
    face_parsing_with_bbox_result = face_parsing_handler.execute(
        cropped_image, landmark_info)
    face_parsing_with_bbox_result_image = cropped_image.copy()
    face_parsing_with_bbox_result_image = annotate_segmentation(
        face_parsing_with_bbox_result_image,
        face_parsing_with_bbox_result['semantic_segmentation'])
    cv2.imshow(f'face_parsing_with_bbox_result_image',
               face_parsing_with_bbox_result_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()