Пример #1
0
def alignment(src_img, src_pts, output_size=(96, 112)):
    """Warp a face image so that its features align with a canoncial
    reference set of landmarks. The alignment is performed with an
    affine warp

    Args:
        src_img (ndarray): an HxWx3 RGB containing a face
        src_pts (ndarray): a 5x2 array of landmark locations
        output_size (tuple): the dimensions (oH, oW) of the output image

    Returns:
        (ndarray): an (oH x oW x 3) warped RGB image.
    """
    ref_pts = [
        [30.2946, 51.6963],
        [65.5318, 51.5014],
        [48.0252, 71.7366],
        [33.5493, 92.3655],
        [62.7299, 92.2041],
    ]
    src_pts = np.array(src_pts).reshape(5, 2)
    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)
    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, output_size)
    return face_img
Пример #2
0
def align(src_img,
          src_pts,
          ref_pts,
          image_size,
          scale=1.0,
          transpose_input=False,
          landmarks_label=None):
    w, h = image_size = tuple(image_size)

    # Actual offset = new center - old center (scaled)
    scale_ = max(w, h) * scale
    cx_ref = cy_ref = 0.
    offset_x = 0.5 * w - cx_ref * scale_
    offset_y = 0.5 * h - cy_ref * scale_

    s = np.array(src_pts).astype(np.float32).reshape([-1, 2])
    r = np.array(ref_pts).astype(np.float32) * scale_ + np.array(
        [[offset_x, offset_y]])
    if transpose_input:
        s = s.reshape([2, -1]).T

    tfm = get_similarity_transform_for_cv2(s, r)
    dst_img = cv2.warpAffine(src_img, tfm, image_size)

    s_new = np.concatenate([s.reshape([2, -1]), np.ones((1, s.shape[0]))])
    lm_new = np.concatenate(
        [landmarks_label.T,
         np.ones((1, landmarks_label.shape[0]))])
    lm_new = np.matmul(tfm, lm_new)
    lm_new = lm_new.T
    s_new = np.matmul(tfm, s_new)
    s_new = s_new.reshape([-1]) if transpose_input else s_new.T.reshape([-1])
    tfm = tfm.reshape([-1])
    return dst_img, lm_new, tfm
Пример #3
0
def alignment(src_img, src_pts):  #校正人臉
    of = 0
    #ref_pts = [ [30.2946+of, 51.6963+of],[65.5318+of, 51.5014+of],[48.0252+of, 71.7366+of] ]
    ref_pts = [[86 + of, 140 + of], [172 + of, 140 + of],
               [129 + of, 170 + of]]  #[29,80][159,80][96,157]
    crop_size = (304, 320)  #(96+of*2, 112+of*2)#(144+of*2,160+of*2)

    # ref_pts = [ [86+of,80+of],[300+of,80+of],[129+of,120+of]]
    # crop_size =(304,320)#src_img.shape[:2]#(160,144)

    xx1, xx2, yy1, yy2 = 76, 182, 105, 200  #20,75,46,76
    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)
    tfm = get_similarity_transform_for_cv2(s, r)

    M = cv2.getAffineTransform(s, r)

    face_img = cv2.warpAffine(src_img, tfm, crop_size)

    #face_img = cv2.warpAffine(src_img, M, crop_size)
    plt_show(face_img)

    rx, ry = 0, 0
    crop_face_img = face_img[yy1 - ry:yy2 + ry, xx1 - rx:xx2 + rx]

    plt_show(crop_face_img)

    #crop_face_img=face_img[yy1-ry:yy2+ry,xx1-rx:xx2+rx]
    resize = (160, 144)  #(96, 112)
    #plt_show(face_img)
    #plt_show(crop_face_img)
    resize_face_img = cv2.resize(crop_face_img, resize)  #crop_face_img
    #cv2.imwrite('output.jpg', resize_face_img)
    return resize_face_img
Пример #4
0
	def alignment(self,src_img,src_pts):
		ref_pts = [ [30.2946, 51.6963],[65.5318, 51.5014],[48.0252, 71.7366],[33.5493, 92.3655],[62.7299, 92.2041]]
		crop_size = (96, 112)
		src_pts = np.array(src_pts).reshape(5,2)
		s = np.array(src_pts).astype(np.float32)
		r = np.array(ref_pts).astype(np.float32)
		tfm = get_similarity_transform_for_cv2(s, r)
		face_img = cv2.warpAffine(src_img, tfm, crop_size)
		return face_img
Пример #5
0
def alignment(src_img,src_pts):
    of = 2
    ref_pts = [ [30.2946+of, 51.6963+of],[65.5318+of, 51.5014+of],
        [48.0252+of, 71.7366+of],[33.5493+of, 92.3655+of],[62.7299+of, 92.2041+of] ]
    crop_size = (96+of*2, 112+of*2)

    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)
    print s
    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    return face_img
Пример #6
0
def alignment_test(src_img,src_pts):
    of = 0
    ref_pts = [ [30.2946+of, 51.6963+of],[65.5318+of, 51.5014+of],[48.0252+of, 71.7366+of]]
    crop_size =(96+of*2, 112+of*2)
    xx1,xx2,yy1,yy2=30,65,51,92
    #print(src_img.shape[1],src_img.shape[0])
    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)
    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    plt_show(face_img)
    rx,ry=12,14
    crop_face_img=face_img[yy1-ry:yy2+ry,xx1-rx:xx2+rx]
    resize_face_img=cv2.resize(crop_face_img, (96, 112))
    return resize_face_img
Пример #7
0
def alignment(src_img, src_pts):
    ref_pts = [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
               [33.5493, 92.3655], [62.7299, 92.2041]]
    crop_size = (96, 112)

    src_pts = [
        src_pts["keypoints"]["left_eye"], src_pts["keypoints"]["right_eye"],
        src_pts["keypoints"]["nose"], src_pts["keypoints"]["mouth_left"],
        src_pts["keypoints"]["mouth_right"]
    ]
    src_pts = np.array(src_pts)

    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)

    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    return face_img
Пример #8
0
def alignment(src_img, src_pts, size=None):
    ref_pts = [[30.2946, 51.6963], [65.5318, 51.5014],
               [48.0252, 71.7366], [33.5493, 92.3655],
               [62.7299, 92.2041]]
    if size is not None:
        ref_pts = np.array(ref_pts)
        ref_pts[:,0] = ref_pts[:,0] * size/96
        ref_pts[:,1] = ref_pts[:,1] * size/96
        crop_size = (int(size), int(112/(96/size)))
    else:
        crop_size = (96, 112)
    src_pts = np.array(src_pts).reshape(5, 2)
    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)
    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size, flags=cv2.INTER_CUBIC)
    if size is not None:
        face_img = cv2.resize(face_img, dsize=(96, 112), interpolation=cv2.INTER_CUBIC)
    return face_img
Пример #9
0
def alignment(src_img, src_pts):
    min_y = min(src_pts["keypoints"]["left_eye"][1],
                src_pts["keypoints"]["right_eye"][1])
    max_y = max(src_pts["keypoints"]["mouth_left"][1],
                src_pts["keypoints"]["mouth_right"][1])
    min_x = min(src_pts["keypoints"]["left_eye"][0],
                src_pts["keypoints"]["mouth_left"][0])
    max_x = max(src_pts["keypoints"]["right_eye"][0],
                src_pts["keypoints"]["mouth_right"][0])
    # print(src_pts["keypoints"]["right_eye"])
    height = max_y - min_y
    width = max_x - min_x

    db_img = src_img[
        max(0, int(min_y - height *
                   1.8)):min(src_img.shape[0], int(max_y + height * 1.5)),
        max(0, int(min_x -
                   width * 1.2)):min(src_img.shape[1], int(max_x +
                                                           width * 1.2))]

    #cv2.imshow("windows", db_img)
    #cv2.waitKey(0)
    ref_pts = [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
               [33.5493, 92.3655], [62.7299, 92.2041]]
    crop_size = (96, 112)

    src_pts = [
        src_pts["keypoints"]["left_eye"], src_pts["keypoints"]["right_eye"],
        src_pts["keypoints"]["nose"], src_pts["keypoints"]["mouth_left"],
        src_pts["keypoints"]["mouth_right"]
    ]
    src_pts = np.array(src_pts)

    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)

    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    return face_img, db_img
Пример #10
0
def return_warp_vid(vidname, t=120):
    path = return_path(vidname)
    openposename = os.listdir(openpose_coordinates_dir+path)[0]
    openposename =openposename.split('_keypoints.json')[0][:-13]
    start_frame, end_frame = return_start_end(vidname)
    for frame_num in range(start_frame, end_frame+1):     
        ##################### read openpose
        openpose_path = openpose_coordinates_dir+path+'/'+openposename+'_'+'{:012d}'.format(frame_num)+'_keypoints.json'
        data = json.load(open(openpose_path))
        arr=data['people'][0]['face_keypoints_2d']
        coords_list=[(round(arr[i*3]),round(arr[i*3+1])) for i in range(int(len(arr)/3))]
        ##################### find face coordinates
        bb_x_min = min([item[0] for item in coords_list])
        bb_x_max = max([item[0] for item in coords_list])
        bb_y_min = min([item[1] for item in coords_list])
        bb_y_max = max([item[1] for item in coords_list])
        c1,c2 = int(np.mean([bb_x_min,bb_x_max])), int(np.mean([bb_y_min,bb_y_max]))
        face_coords = [c1-t,c1+t,c2-t,c2+t] #x_min, x_max, y_min, y_max 
        #####################crop the frame from high resolution to 240x240 
        frame_path = return_highres_path(vidname)+'/frame_{}.png'.format(frame_num)       
        im = cv2.imread(frame_path)
        cropped_img = im[face_coords[2]:face_coords[3], face_coords[0]:face_coords[1]]  
        ##################### shift the coordinates
        x_org,y_org = [c1-t,c2-t] 
        shifted_coords_list = [(x-x_org,y-y_org) for (x,y) in coords_list]
        coord_arr = np.array(shifted_coords_list).astype(np.float32)
        mean_coords_list = [(71, 92), (71, 105), (72, 118), (74, 131), (79, 143), (86, 153), (96, 161), (108, 167), (121, 169), (133, 167), (144, 162), (154, 154), (161, 144), (165, 132), (167, 120), (169, 108), (170, 95), (82, 77), (89, 73), (97, 71), (106, 72), (114, 75), (131, 76), (139, 73), (147, 73), (155, 76), (160, 81), (122, 87), (122, 94), (122, 100), (122, 107), (112, 116), (117, 118), (122, 119), (126, 118), (131, 117), (92, 89), (97, 86), (103, 86), (108, 89), (103, 91), (97, 91), (135, 91), (140, 88), (146, 88), (151, 91), (146, 93), (140, 92), (104, 135), (110, 130), (116, 127), (121, 128), (126, 127), (133, 130), (138, 136), (133, 140), (126, 142), (121, 142), (115, 142), (109, 139), (107, 134), (116, 132), (121, 133), (126, 133), (135, 135), (126, 135), (121, 135), (116, 135), (100, 88), (143, 90)]
        mean_arr = np.array(mean_coords_list).astype(np.float32)
        ##################### warp the cropped image
        tfm = get_similarity_transform_for_cv2(coord_arr,mean_arr)
        warped_img = cv2.warpAffine(cropped_img, tfm, (240, 240))
        ##################### crop the warped image
        crop_warped_img = warped_img[41: 169, 71:170, :]
        ##################### write the outout image
        if not os.path.exists('output/'+vidname):
            os.mkdir('output/'+vidname)
        des_path = 'output/'+vidname+'/frame_{}.png'.format(frame_num)
        cv2.imwrite(des_path,crop_warped_img)
Пример #11
0
def alignment_orig(src_img, src_pts, ncols=96, nrows=112, custom_align=None):
    """
    Original alignment function for MTCNN
    :param src_img: input image
    :param src_pts: landmark points
    :return:
    """
    from matlab_cp2tform import get_similarity_transform_for_cv2

    ref_pts = [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
               [33.5493, 92.3655], [62.7299, 92.2041]]

    if custom_align is not None:
        row[0] += custom_align[0]
        row[1] += custom_align[1]

    elif ncols == 112:
        for row in ref_pts:
            row[0] += 8.0
    elif ncols == 128:
        for row in ref_pts:
            row[0] += 16.0

    if nrows == 128 and custom_align is None:
        for row in ref_pts:
            row[1] += 16.0

    # print(ref_pts)

    crop_size = (ncols, nrows)
    src_pts = np.array(src_pts).reshape(5, 2)

    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)

    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    return face_img
Пример #12
0
def warp_and_crop_face(src_img,
                       facial_pts,
                       reference_pts = None,
                       crop_size=(96, 112),
                       align_type = 'smilarity'):
    """
    Function:
    ----------
        apply affine transform 'trans' to uv
    Parameters:
    ----------
        @src_img: 3x3 np.array
            input image
        @facial_pts: could be
            1)a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        @reference_pts: could be
            1) a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        or
            3) None
            if None, use default reference facial points
        @crop_size: (w, h)
            output face image size
        @align_type: transform type, could be one of
            1) 'similarity': use similarity transform
            2) 'cv2_affine': use the first 3 points to do affine transform,
                    by calling cv2.getAffineTransform()
            3) 'affine': use all points to do affine transform
    Returns:
    ----------
        @face_img: output face image with size (w, h) = @crop_size
    """

    if reference_pts is None:
        if crop_size[0] == 96 and crop_size[1] == 112:
            reference_pts = REFERENCE_FACIAL_POINTS
        else:
            default_square = False
            inner_padding_factor = 0
            outer_padding = (0, 0)
            output_size = crop_size

            reference_pts = get_reference_facial_points(output_size,
                                                        inner_padding_factor,
                                                        outer_padding,
                                                        default_square)

    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape
    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
        raise FaceWarpException(
            'reference_pts.shape must be (K,2) or (2,K) and K>2')

    if ref_pts_shp[0] == 2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
        raise FaceWarpException(
            'facial_pts.shape must be (K,2) or (2,K) and K>2')

    if src_pts_shp[0] == 2:
        src_pts = src_pts.T

#    #print('--->src_pts:\n', src_pts
#    #print('--->ref_pts\n', ref_pts

    if src_pts.shape != ref_pts.shape:
        raise FaceWarpException(
            'facial_pts and reference_pts must have the same shape')

    if align_type is 'cv2_affine':
        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
#        #print(('cv2.getAffineTransform() returns tfm=\n' + str(tfm))
    elif align_type is 'affine':
        tfm = get_affine_transform_matrix(src_pts, ref_pts)
#        #print(('get_affine_transform_matrix() returns tfm=\n' + str(tfm))
    else:
        tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
#        #print(('get_similarity_transform_for_cv2() returns tfm=\n' + str(tfm))

#    #print('--->Transform matrix: '
#    #print(('type(tfm):' + str(type(tfm)))
#    #print(('tfm.dtype:' + str(tfm.dtype))
#    #print( tfm

    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return face_img
Пример #13
0
def warp_and_crop_face(src_img,
                       facial_pts,
                       reference_pts=None,
                       crop_size=(96, 112),
                       align_type='smilarity'):
    """
    Function:
    ----------
        apply affine transform 'trans' to uv

    Parameters:
    ----------
        @src_img: 3x3 np.array
            input image
        @facial_pts: could be
            1)a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        @reference_pts: could be
            1) a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        or
            3) None
            if None, use default reference facial points
        @crop_size: (w, h)
            output face image size
        @align_type: transform type, could be one of
            1) 'similarity': use similarity transform
            2) 'cv2_affine': use the first 3 points to do affine transform,
                    by calling cv2.getAffineTransform()
            3) 'affine': use all points to do affine transform

    Returns:
    ----------
        @face_img: output face image with size (w, h) = @crop_size
    """

    if reference_pts is None:
        if crop_size[0] == 96 and crop_size[1] == 112:
            reference_pts = REFERENCE_FACIAL_POINTS
        else:
            default_square = False
            inner_padding_factor = 0
            outer_padding = (0, 0)
            output_size = crop_size

            reference_pts = get_reference_facial_points(
                output_size, inner_padding_factor, outer_padding,
                default_square)

    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape
    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
        raise FaceWarpException(
            'reference_pts.shape must be (K,2) or (2,K) and K>2')

    if ref_pts_shp[0] == 2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
        raise FaceWarpException(
            'facial_pts.shape must be (K,2) or (2,K) and K>2')

    if src_pts_shp[0] == 2:
        src_pts = src_pts.T

    if src_pts.shape != ref_pts.shape:
        raise FaceWarpException(
            'facial_pts and reference_pts must have the same shape')

    if align_type is 'cv2_affine':
        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
    elif align_type is 'affine':
        tfm = get_affine_transform_matrix(src_pts, ref_pts)
    else:
        tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)

    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return face_img
Пример #14
0
def warp_and_crop_face(src_img,
                       facial_pts,
                       reference_pts=None,
                       crop_size=(96, 112),
                       align_type='smilarity'):
    """
    Function:
    ----------
        apply affine transform 'trans' to uv

    Parameters:
    ----------
        @src_img: 3x3 np.array
            input image
        @facial_pts: could be
            1)a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        @reference_pts: could be
            1) a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        or
            3) None
            if None, use default reference facial points
        @crop_size: (w, h)
            output face image size
        @align_type: transform type, could be one of
            1) 'similarity': use similarity transform
            2) 'cv2_affine': use the first 3 points to do affine transform,
                    by calling cv2.getAffineTransform()
            3) 'affine': use all points to do affine transform

    Returns:
    ----------
        @face_img: output face image with size (w, h) = @crop_size
    """

    if reference_pts is None:
        reference_pts = REFERENCE_FACIAL_POINTS

    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape
    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
        raise FaceWarpException(
            'reference_pts.shape must be (K,2) or (2,K) and K>2')

    if ref_pts_shp[0] == 2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
        raise FaceWarpException(
            'facial_pts.shape must be (K,2) or (2,K) and K>2')

    if src_pts_shp[0] == 2:
        src_pts = src_pts.T

#    print '--->src_pts:\n', src_pts
#    print '--->ref_pts\n', ref_pts

    if src_pts.shape != ref_pts.shape:
        raise FaceWarpException(
            'facial_pts and reference_pts must have the same shape')

    if align_type is 'cv2_affine':
        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
#        print('cv2.getAffineTransform() returns tfm=\n' + str(tfm))
    elif align_type is 'affine':
        tfm = get_affine_transform_matrix(src_pts, ref_pts)
#        print('get_affine_transform_matrix() returns tfm=\n' + str(tfm))
    else:
        tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)


#        print('get_similarity_transform_for_cv2() returns tfm=\n' + str(tfm))

#    print '--->Transform matrix: '
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))
#    print tfm

    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return face_img