def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)

    if face_type == FaceType.AVATAR:
        centroid = np.mean(image_landmarks, axis=0)

        mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
        a, c = mat[0, 0], mat[1, 0]
        scale = math.sqrt((a * a) + (c * c))

        padding = (output_size / 64) * 32

        mat = np.eye(2, 3)
        mat[0, 2] = -centroid[0]
        mat[1, 2] = -centroid[1]
        mat = mat * scale * (output_size / 3)
        mat[:, 2] += output_size / 2
    else:
        if face_type == FaceType.HALF:
            padding = 0
        elif face_type == FaceType.FULL:
            padding = (output_size / 64) * 12
        elif face_type == FaceType.HEAD:
            padding = (output_size / 64) * 24
        else:
            raise ValueError('wrong face_type')

        mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
        mat = mat * (output_size - 2 * padding)
        mat[:, 2] += padding
        mat *= (1 / scale)
        mat[:, 2] += -output_size * (((1 / scale) - 1.0) / 2)

    return mat
Ejemplo n.º 2
0
def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)
    """
    if face_type == FaceType.AVATAR:
        centroid = np.mean (image_landmarks, axis=0)

        mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
        a, c = mat[0,0], mat[1,0]
        scale = math.sqrt((a * a) + (c * c))

        padding = (output_size / 64) * 32

        mat = np.eye ( 2,3 )
        mat[0,2] = -centroid[0]
        mat[1,2] = -centroid[1]
        mat = mat * scale * (output_size / 3)
        mat[:,2] += output_size / 2
    else:
    """
    remove_align = False
    if face_type == FaceType.FULL_NO_ALIGN:
        face_type = FaceType.FULL
        remove_align = True
    elif face_type == FaceType.HEAD_NO_ALIGN:
        face_type = FaceType.HEAD
        remove_align = True

    if face_type == FaceType.HALF:
        padding = 0
    elif face_type == FaceType.FULL:
        padding = (output_size / 64) * 12
    elif face_type == FaceType.HEAD:
        padding = (output_size / 64) * 21
    else:
        raise ValueError('wrong face_type: ', face_type)

    mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
    mat = mat * (output_size - 2 * padding)
    mat[:, 2] += padding
    mat *= (1 / scale)
    mat[:, 2] += -output_size * (((1 / scale) - 1.0) / 2)

    if remove_align:
        bbox = transform_points([(0, 0), (0, output_size - 1),
                                 (output_size - 1, output_size - 1),
                                 (output_size - 1, 0)], mat, True)
        area = mathlib.polygon_area(bbox[:, 0], bbox[:, 1])
        side = math.sqrt(area) / 2
        center = transform_points([(output_size / 2, output_size / 2)], mat,
                                  True)

        pts1 = np.float32([
            center + [-side, -side], center + [side, -side],
            center + [-side, side]
        ])
        pts2 = np.float32([[0, 0], [output_size - 1, 0], [0, output_size - 1]])
        mat = cv2.getAffineTransform(pts1, pts2)

    return mat
Ejemplo n.º 3
0
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0, full_face_align_top=True):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks)

    padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)

    mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
    l_p = transform_points (  np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
    l_c = l_p[4]

    tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)
    
    mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
    
    l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ), 
                      np.round( l_c + bt_diag_vec*mod ), 
                      np.round( l_c + tb_diag_vec*mod ) ] )    

    pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
    mat = cv2.getAffineTransform(l_t,pts2)
    
    #if full_face_align_top and (face_type == FaceType.FULL or face_type == FaceType.FULL_NO_ALIGN):
    #    #lmrks2 = expand_eyebrows(image_landmarks)    
    #    #lmrks2_ = transform_points( [ lmrks2[19], lmrks2[24] ], mat, False )     
    #    #y_diff = np.float32( (0,np.min(lmrks2_[:,1])) ) 
    #    #y_diff = transform_points( [ np.float32( (0,0) ), y_diff], mat, True)
    #    #y_diff = y_diff[1]-y_diff[0]
    #    
    #    x_diff = np.float32((0,0))
    #    
    #    lmrks2_ = transform_points( [ image_landmarks[0], image_landmarks[16] ], mat, False )   
    #    if lmrks2_[0,0] < 0:
    #        x_diff = lmrks2_[0,0]        
    #        x_diff = transform_points( [ np.float32( (0,0) ), np.float32((x_diff,0)) ], mat, True)
    #        x_diff = x_diff[1]-x_diff[0]        
    #    elif lmrks2_[1,0] >= output_size:
    #        x_diff = lmrks2_[1,0]-(output_size-1)
    #        x_diff = transform_points( [ np.float32( (0,0) ), np.float32((x_diff,0)) ], mat, True)
    #        x_diff = x_diff[1]-x_diff[0]    
    #    
    #    mat = cv2.getAffineTransform( l_t+y_diff+x_diff ,pts2)
        
    if remove_align:
        bbox = transform_points ( [ (0,0), (0,output_size), (output_size, output_size), (output_size,0) ], mat, True)
        area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
        side = math.sqrt(area) / 2
        center = transform_points ( [(output_size/2,output_size/2)], mat, True)
        pts1 = np.float32(( center+[-side,-side], center+[side,-side], center+[-side,side] ))
        mat = cv2.getAffineTransform(pts1,pts2)

    return mat
Ejemplo n.º 4
0
def get_transform_mat (image_landmarks, output_size, face_type):
    if output_size != 64 and output_size != 128 and output_size != 256 and output_size != 512:
        raise ValueError ('get_transform_mat() output_size must be power of 2')
        
    if face_type == 'half_face':
        padding = 0
    elif face_type == 'full_face':
        padding = (output_size // 64) * 12
    elif face_type == 'head':
        padding = (output_size // 64) * 24
        
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks) 
    mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
    mat = mat * (output_size - 2 * padding)
    mat[:,2] += padding
    return mat
Ejemplo n.º 5
0
def get_transform_mat(image_landmarks,
                      output_size,
                      face_type,
                      scale=1.0,
                      full_face_align_top=True):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)

    padding, remove_align = FaceType_to_padding_remove_align.get(
        face_type, 0.0)

    mat = umeyama(
        np.concatenate([image_landmarks[17:49], image_landmarks[54:55]]),
        landmarks_2D_new, True)[0:2]
    l_p = transform_points(
        np.float32([(0, 0), (1, 0), (1, 1), (0, 1), (0.5, 0.5)]), mat, True)
    l_c = l_p[4]

    tb_diag_vec = (l_p[2] - l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1] - l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    mod = (1.0 / scale) * (npla.norm(l_p[0] - l_p[2]) *
                           (padding * np.sqrt(2.0) + 0.5))

    if not remove_align:
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod)
        ])
    else:
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod),
            np.round(l_c - bt_diag_vec * mod),
        ])

        area = mathlib.polygon_area(l_t[:, 0], l_t[:, 1])
        side = np.float32(math.sqrt(area) / 2)
        l_t = np.array([
            np.round(l_c + [-side, -side]),
            np.round(l_c + [side, -side]),
            np.round(l_c + [side, side])
        ])

    pts2 = np.float32(((0, 0), (output_size, 0), (output_size, output_size)))
    mat = cv2.getAffineTransform(l_t, pts2)

    #if remove_align:
    #    bbox = transform_points ( [ (0,0), (0,output_size), (output_size, output_size), (output_size,0) ], mat, True)
    #    #import code
    #    #code.interact(local=dict(globals(), **locals()))
    #    area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
    #    side = math.sqrt(area) / 2
    #    center = transform_points ( [(output_size/2,output_size/2)], mat, True)
    #    pts1 = np.float32(( center+[-side,-side], center+[side,-side], center+[side,-side] ))
    #    pts2 = np.float32([[0,0],[output_size,0],[0,output_size]])
    #    mat = cv2.getAffineTransform(pts1,pts2)

    return mat