Пример #1
0
def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)

    # estimate landmarks transform from global space to local aligned space with bounds [0..1]
    mat = umeyama(
        np.concatenate([image_landmarks[17:49], image_landmarks[54:55]]),
        landmarks_2D_new, True)[0:2]

    # get corner points in global space
    l_p = transform_points(
        np.float32([(0, 0), (1, 0), (1, 1), (0, 1), (0.5, 0.5)]), mat, True)
    l_c = l_p[4]

    # calc diagonal vectors between corners in global space
    tb_diag_vec = (l_p[2] - l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1] - l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    # calc modifier of diagonal vectors for scale and padding value
    padding, remove_align = FaceType_to_padding_remove_align.get(
        face_type, 0.0)
    mod = (1.0 / scale) * (npla.norm(l_p[0] - l_p[2]) *
                           (padding * np.sqrt(2.0) + 0.5))

    # calc 3 points in global space to estimate 2d affine transform
    if not remove_align:
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod)
        ])
    else:
        # remove_align - face will be centered in the frame but not aligned
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod),
            np.round(l_c - bt_diag_vec * mod),
        ])

        # get area of face square in global space
        area = mathlib.polygon_area(l_t[:, 0], l_t[:, 1])

        # calc side of square
        side = np.float32(math.sqrt(area) / 2)

        # calc 3 points with unrotated square
        l_t = np.array([
            np.round(l_c + [-side, -side]),
            np.round(l_c + [side, -side]),
            np.round(l_c + [side, side])
        ])

    # calc affine transform from 3 global space points to 3 local space points size of 'output_size'
    pts2 = np.float32(((0, 0), (output_size, 0), (output_size, output_size)))
    mat = cv2.getAffineTransform(l_t, pts2)
    return mat
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0, full_face_align_top=True):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks)

    padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)

    mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
    l_p = transform_points (  np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
    l_c = l_p[4]

    tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )

    if not remove_align:
        l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
                          np.round( l_c + bt_diag_vec*mod ),
                          np.round( l_c + tb_diag_vec*mod ) ] )
    else:
        l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
                          np.round( l_c + bt_diag_vec*mod ),
                          np.round( l_c + tb_diag_vec*mod ),
                          np.round( l_c - bt_diag_vec*mod ),
                         ] )

        area = mathlib.polygon_area(l_t[:,0], l_t[:,1] )
        side = np.float32(math.sqrt(area) / 2)
        l_t = np.array( [ np.round( l_c + [-side,-side] ),
                          np.round( l_c + [ side,-side] ),
                          np.round( l_c + [ side, side] ) ] )

    pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
    mat = cv2.getAffineTransform(l_t,pts2)


    #if remove_align:
    #    bbox = transform_points ( [ (0,0), (0,output_size), (output_size, output_size), (output_size,0) ], mat, True)
    #    #import code
    #    #code.interact(local=dict(globals(), **locals()))
    #    area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
    #    side = math.sqrt(area) / 2
    #    center = transform_points ( [(output_size/2,output_size/2)], mat, True)
    #    pts1 = np.float32(( center+[-side,-side], center+[side,-side], center+[side,-side] ))
    #    pts2 = np.float32([[0,0],[output_size,0],[0,output_size]])
    #    mat = cv2.getAffineTransform(pts1,pts2)

    return mat
Пример #3
0
def get_transform_mat_data (image_landmarks, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks)

    # estimate landmarks transform from global space to local aligned space with bounds [0..1]
    mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
    
    # get corner points in global space
    l_p = transform_points (  np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
    l_c = l_p[4]

    # calc diagonal vectors between corners in global space
    tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    # calc modifier of diagonal vectors for scale and padding value
    padding, _ = FaceType_to_padding_remove_align.get(face_type, 0.0)
    mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
    return l_c, tb_diag_vec, bt_diag_vec, mod
Пример #4
0
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks)


    # estimate landmarks transform from global space to local aligned space with bounds [0..1]
    mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]

    # get corner points in global space
    g_p = transform_points (  np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5) ]) , mat, True)
    g_c = g_p[4]

    # calc diagonal vectors between corners in global space
    tb_diag_vec = (g_p[2]-g_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (g_p[1]-g_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    # calc modifier of diagonal vectors for scale and padding value
    padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)
    mod = (1.0 / scale)* ( npla.norm(g_p[0]-g_p[2])*(padding*np.sqrt(2.0) + 0.5) )

    if face_type == FaceType.WHOLE_FACE:
        # adjust vertical offset for WHOLE_FACE, 7% below in order to cover more forehead
        vec = (g_p[0]-g_p[3]).astype(np.float32)
        vec_len = npla.norm(vec)
        vec /= vec_len
        g_c += vec*vec_len*0.07

    elif face_type == FaceType.HEAD:
        mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]

        # assuming image_landmarks are 3D_Landmarks extracted for HEAD,
        # adjust horizontal offset according to estimated yaw
        yaw = estimate_averaged_yaw(transform_points (image_landmarks, mat, False))

        hvec = (g_p[0]-g_p[1]).astype(np.float32)
        hvec_len = npla.norm(hvec)
        hvec /= hvec_len

        yaw *= np.abs(math.tanh(yaw*2)) # Damp near zero

        g_c -= hvec * (yaw * hvec_len / 2.0)

        # adjust vertical offset for HEAD, 50% below
        vvec = (g_p[0]-g_p[3]).astype(np.float32)
        vvec_len = npla.norm(vvec)
        vvec /= vvec_len
        g_c += vvec*vvec_len*0.50

    # calc 3 points in global space to estimate 2d affine transform
    if not remove_align:
        l_t = np.array( [ g_c - tb_diag_vec*mod,
                          g_c + bt_diag_vec*mod,
                          g_c + tb_diag_vec*mod ] )
    else:
        # remove_align - face will be centered in the frame but not aligned
        l_t = np.array( [ g_c - tb_diag_vec*mod,
                          g_c + bt_diag_vec*mod,
                          g_c + tb_diag_vec*mod,
                          g_c - bt_diag_vec*mod,
                         ] )

        # get area of face square in global space
        area = mathlib.polygon_area(l_t[:,0], l_t[:,1] )

        # calc side of square
        side = np.float32(math.sqrt(area) / 2)

        # calc 3 points with unrotated square
        l_t = np.array( [ g_c + [-side,-side],
                          g_c + [ side,-side],
                          g_c + [ side, side] ] )

    # calc affine transform from 3 global space points to 3 local space points size of 'output_size'
    pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
    mat = cv2.getAffineTransform(l_t,pts2)
    return mat