Пример #1
0
def _load_tri(bfm_fp):
    if osp.split(bfm_fp)[-1] == 'bfm_noneck_v3.pkl':
        tri = _load(make_abs_path('../configs/tri.pkl'))  # this tri/face is re-built for bfm_noneck_v3
    else:
        tri = _load(bfm_fp).get('tri')

    tri = _to_ctype(tri.T).astype(np.int32)
    return tri
Пример #2
0
    def __init__(self, **kvs):
        # torch.set_grad_enabled(False)

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        onnx_fp = kvs.get('onnx_fp',
                          kvs.get('checkpoint_fp').replace('.pth', '.onnx'))

        # convert to onnx online if not existed
        if onnx_fp is None or not osp.exists(onnx_fp):
            print(
                f'{onnx_fp} does not exist, try to convert the `.pth` version to `.onnx` online'
            )
            onnx_fp = convert_to_onnx(**kvs)

        self.session = onnxruntime.InferenceSession(onnx_fp, None)

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
Пример #3
0
    def __init__(self, bfm_fp, shape_dim=40, exp_dim=10):
        bfm = _load(bfm_fp)
        self.u = bfm.get('u').astype(np.float32)  # fix bug
        self.w_shp = bfm.get('w_shp').astype(np.float32)[..., :shape_dim]
        self.w_exp = bfm.get('w_exp').astype(np.float32)[..., :exp_dim]
        if osp.split(bfm_fp)[-1] == 'bfm_noneck_v3.pkl':
            self.tri = _load(make_abs_path('../configs/tri.pkl'))  # this tri/face is re-built for bfm_noneck_v3
        else:
            self.tri = bfm.get('tri')

        self.tri = _to_ctype(self.tri.T).astype(np.int32)
        self.keypoints = bfm.get('keypoints').astype(np.long)  # fix bug
        w = np.concatenate((self.w_shp, self.w_exp), axis=1)
        self.w_norm = np.linalg.norm(w, axis=0)

        self.u_base = self.u[self.keypoints].reshape(-1, 1)
        self.w_shp_base = self.w_shp[self.keypoints]
        self.w_exp_base = self.w_exp[self.keypoints]
Пример #4
0
    def __init__(self, bfm_fp):
        bfm = _load(bfm_fp)
        self.u = bfm.get('u').astype(np.float32)  # fix bug
        self.w_shp = bfm.get('w_shp').astype(np.float32)
        self.w_exp = bfm.get('w_exp').astype(np.float32)
        self.tri = bfm.get('tri')
        self.keypoints = bfm.get('keypoints').astype(np.long)  # fix bug
        w = np.concatenate((self.w_shp, self.w_exp), axis=1)
        self.w_norm = np.linalg.norm(w, axis=0)

        self.u_base = self.u[self.keypoints].reshape(-1, 1)
        self.w_shp_base = self.w_shp[self.keypoints]
        self.w_exp_base = self.w_exp[self.keypoints]
Пример #5
0
    def __init__(self, bfm_fp, shape_dim=40, exp_dim=10):
        super(BFMModel_ONNX, self).__init__()

        _to_tensor = _numpy_to_tensor

        # load bfm
        bfm = _load(bfm_fp)

        u = _to_tensor(bfm.get('u').astype(np.float32))
        self.u = u.view(-1, 3).transpose(1, 0)
        w_shp = _to_tensor(bfm.get('w_shp').astype(np.float32)[..., :shape_dim])
        w_exp = _to_tensor(bfm.get('w_exp').astype(np.float32)[..., :exp_dim])
        w = torch.cat((w_shp, w_exp), dim=1)
        self.w = w.view(-1, 3, w.shape[-1]).contiguous().permute(1, 0, 2)
Пример #6
0
    def __init__(self, **kvs):
        torch.set_grad_enabled(False)

        # load BFM
        self.bfm = BFMModel(bfm_fp=kvs.get(
            'bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),
                            shape_dim=kvs.get('shape_dim', 40),
                            exp_dim=kvs.get('exp_dim', 10))
        self.tri = self.bfm.tri

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)
        if self.gpu_mode:
            print(f"Loading 3DDFA_v2 to GPU (id={self.gpu_id})...")

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        # load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)
        model = getattr(models,
                        kvs.get('arch'))(num_classes=kvs.get('num_params', 62),
                                         widen_factor=kvs.get(
                                             'widen_factor', 1),
                                         size=self.size,
                                         mode=kvs.get('mode', 'small'))
        model = load_model(model, kvs.get('checkpoint_fp'))

        if self.gpu_mode:
            print("Loading 3DDFA_v2 model to GPU...")
            cudnn.benchmark = True
            model = model.cuda(device=self.gpu_id)

        self.model = model
        self.model.eval()  # eval mode, fix BN

        # data normalization
        transform_normalize = NormalizeGjz(mean=127.5, std=128)
        transform_to_tensor = ToTensorGjz()
        transform = Compose([transform_to_tensor, transform_normalize])
        self.transform = transform

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
Пример #7
0
def gen_3d_vertex():
    filelists = 'test.data/AFLW2000-3D_crop.list'
    root = 'AFLW-2000-3D/'
    fns = open(filelists).read().strip().split('\n')
    params = _load('res/params_aflw2000.npy')

    sel = ['00427', '00439', '00475', '00477', '00497', '00514', '00562', '00623', '01045', '01095', '01104', '01506',
           '01621', '02214', '02244', '03906', '04157']
    sel = list(map(lambda x: f'image{x}.jpg', sel))
    for i in range(2000):
        fn = fns[i]
        if fn in sel:
            vertex = reconstruct_vertex(params[i], dense=True)
            wfp = osp.join('res/AFLW-2000-3D_vertex/', fn.replace('.jpg', '.mat'))
            print(wfp)
            sio.savemat(wfp, {'vertex': vertex})
Пример #8
0
    def __init__(self, **kvs):
        # torch.set_grad_enabled(False)

        # load onnx version of BFM
        bfm_fp = kvs.get('bfm_fp',
                         make_abs_path(__file__, 'configs/bfm_noneck_v3.pkl'))
        bfm_onnx_fp = bfm_fp.replace('.pkl', '.onnx')
        if not osp.exists(bfm_onnx_fp):
            convert_bfm_to_onnx(bfm_onnx_fp,
                                shape_dim=kvs.get('shape_dim', 40),
                                exp_dim=kvs.get('exp_dim', 10))
        self.bfm_session = onnxruntime.InferenceSession(bfm_onnx_fp, None)

        # load for optimization
        bfm = BFMModel(bfm_fp,
                       shape_dim=kvs.get('shape_dim', 40),
                       exp_dim=kvs.get('exp_dim', 10))
        self.tri = bfm.tri
        self.u_base, self.w_shp_base, self.w_exp_base = bfm.u_base, bfm.w_shp_base, bfm.w_exp_base

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                __file__,
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        onnx_fp = kvs.get('onnx_fp',
                          kvs.get('checkpoint_fp').replace('.pth', '.onnx'))

        # convert to onnx online if not existed
        if onnx_fp is None or not osp.exists(onnx_fp):
            print(
                f'{onnx_fp} does not exist, try to convert the `.pth` version to `.onnx` online'
            )
            onnx_fp = convert_to_onnx(**kvs)

        self.session = onnxruntime.InferenceSession(onnx_fp, None)

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
Пример #9
0
def pncc(img, ver_lst, show_flag=False, wfp=None, with_bg_flag=True):
    ncc_code = _load(make_abs_path('../configs/ncc_code.npy'))

    if with_bg_flag:
        overlap = img.copy()
    else:
        overlap = np.zeros_like(img)

    # rendering pncc
    for ver_ in ver_lst:
        ver = _to_ctype(ver_.T)  # transpose
        overlap = rasterize(ver, tri, ncc_code.T, bg=overlap)  # m x 3

    if wfp is not None:
        cv2.imwrite(wfp, overlap)
        print(f'Save visualization result to {wfp}')

    if show_flag:
        plot_image(overlap)

    return overlap
Пример #10
0
    def __init__(self, **kvs):
        torch.set_grad_enabled(False)

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        # load model, 62 = 12(pose) + 40(shape) +10(expression)
        model = getattr(models,
                        kvs.get('arch'))(num_classes=kvs.get('num_params', 62),
                                         widen_factor=kvs.get(
                                             'widen_factor', 1),
                                         size=self.size,
                                         mode=kvs.get('mode', 'small'))
        model = load_model(model, kvs.get('checkpoint_fp'))

        if self.gpu_mode:
            cudnn.benchmark = True
            model = model.cuda(device=self.gpu_id)

        self.model = model
        self.model.eval()  # eval mode, fix BN

        # data normalization
        transform_normalize = NormalizeGjz(mean=127.5, std=128)
        transform_to_tensor = ToTensorGjz()
        transform = Compose([transform_to_tensor, transform_normalize])
        self.transform = transform

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
Пример #11
0
    uv_coords = C['UV'].copy(order='C').astype(np.float32)
    return uv_coords


def process_uv(uv_coords, uv_h=256, uv_w=256):
    uv_coords[:, 0] = uv_coords[:, 0] * (uv_w - 1)
    uv_coords[:, 1] = uv_coords[:, 1] * (uv_h - 1)
    uv_coords[:, 1] = uv_h - uv_coords[:, 1] - 1
    uv_coords = np.hstack(
        (uv_coords, np.zeros((uv_coords.shape[0], 1),
                             dtype=np.float32)))  # add z
    return uv_coords


g_uv_coords = load_uv_coords(make_abs_path('../configs/BFM_UV.mat'))
indices = _load(make_abs_path('../configs/indices.npy'))
g_uv_coords = g_uv_coords[indices, :]


def get_colors(img, ver):
    # nearest-neighbor sampling
    [h, w, _] = img.shape
    ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1)  # x
    ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1)  # y
    ind = np.round(ver).astype(np.int32)
    colors = img[ind[1, :], ind[0, :], :]  # n x 3

    return colors


def bilinear_interpolate(img, x, y):
Пример #12
0
"""
Notation (2019.09.15): two versions of spliting AFLW2000-3D:
 1) AFLW2000-3D.pose.npy: according to the fitted pose
 2) AFLW2000-3D-new.pose: according to AFLW labels 
There is no obvious difference between these two splits.
"""

import os.path as osp
import numpy as np
from math import sqrt
from utils.io import _load

d = 'test.configs'

# [1312, 383, 305], current version
yaws_list = _load(osp.join(d, 'AFLW2000-3D.pose.npy'))

# [1306, 462, 232], same as paper
# yaws_list = _load(osp.join(d, 'AFLW2000-3D-new.pose.npy'))

# origin
pts68_all_ori = _load(osp.join(d, 'AFLW2000-3D.pts68.npy'))

# reannonated
pts68_all_re = _load(osp.join(d, 'AFLW2000-3D-Reannotated.pts68.npy'))
roi_boxs = _load(osp.join(d, 'AFLW2000-3D_crop.roi_box.npy'))


def ana(nme_list):
    yaw_list_abs = np.abs(yaws_list)
    ind_yaw_1 = yaw_list_abs <= 30
Пример #13
0
#!/usr/bin/env python3
# coding: utf-8

import os.path as osp
import numpy as np
from math import sqrt
from utils.io import _load

d = 'test.configs'
yaw_list = _load(osp.join(d, 'AFLW_GT_crop_yaws.npy'))
roi_boxs = _load(osp.join(d, 'AFLW_GT_crop_roi_box.npy'))
pts68_all = _load(osp.join(d, 'AFLW_GT_pts68.npy'))
pts21_all = _load(osp.join(d, 'AFLW_GT_pts21.npy'))


def ana(nme_list):
    yaw_list_abs = np.abs(yaw_list)
    ind_yaw_1 = yaw_list_abs <= 30
    ind_yaw_2 = np.bitwise_and(yaw_list_abs > 30, yaw_list_abs <= 60)
    ind_yaw_3 = yaw_list_abs > 60

    nme_1 = nme_list[ind_yaw_1]
    nme_2 = nme_list[ind_yaw_2]
    nme_3 = nme_list[ind_yaw_3]

    mean_nme_1 = np.mean(nme_1) * 100
    mean_nme_2 = np.mean(nme_2) * 100
    mean_nme_3 = np.mean(nme_3) * 100
    # mean_nme_all = np.mean(nme_list) * 100

    std_nme_1 = np.std(nme_1) * 100
Пример #14
0
    uv_coords = C['UV'].copy(order='C').astype(np.float32)
    return uv_coords


def process_uv(uv_coords, uv_h=256, uv_w=256):
    uv_coords[:, 0] = uv_coords[:, 0] * (uv_w - 1)
    uv_coords[:, 1] = uv_coords[:, 1] * (uv_h - 1)
    uv_coords[:, 1] = uv_h - uv_coords[:, 1] - 1
    uv_coords = np.hstack(
        (uv_coords, np.zeros((uv_coords.shape[0], 1),
                             dtype=np.float32)))  # add z
    return uv_coords


g_uv_coords = load_uv_coords(make_abs_path('../configs/BFM_UV.mat'))
indices = _load(
    make_abs_path('../configs/indices.npy'))  # todo: handle bfm_slim
g_uv_coords = g_uv_coords[indices, :]


def get_colors(img, ver):
    # nearest-neighbor sampling
    [h, w, _] = img.shape
    ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1)  # x
    ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1)  # y
    ind = np.round(ver).astype(np.int32)
    colors = img[ind[1, :], ind[0, :], :]  # n x 3

    return colors


def bilinear_interpolate(img, x, y):
Пример #15
0
import sys

sys.path.append('..')

import os.path as osp
import numpy as np
from utils.io import _load

make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)


class BFMModel(object):
    def __init__(self, bfm_fp):
        bfm = _load(bfm_fp)
        self.u = bfm.get('u').astype(np.float32)  # fix bug
        self.w_shp = bfm.get('w_shp').astype(np.float32)
        self.w_exp = bfm.get('w_exp').astype(np.float32)
        self.tri = bfm.get('tri')
        self.keypoints = bfm.get('keypoints').astype(np.long)  # fix bug
        w = np.concatenate((self.w_shp, self.w_exp), axis=1)
        self.w_norm = np.linalg.norm(w, axis=0)

        self.u_base = self.u[self.keypoints].reshape(-1, 1)
        self.w_shp_base = self.w_shp[self.keypoints]
        self.w_exp_base = self.w_exp[self.keypoints]


cfg_path = make_abs_path('../configs')
bfm = BFMModel(osp.join(cfg_path, 'bfm_noneck_v3.pkl'))
tri = _load(osp.join(cfg_path, 'tri.pkl'))
Пример #16
0
sys.path.append('..')

import os.path as osp
import numpy as np
from utils.io import _load

make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)


class BFMModel(object):
    def __init__(self, bfm_fp):
        bfm = _load(bfm_fp)
        self.u = bfm.get('u').astype(np.float32)  # fix bug
        self.w_shp = bfm.get('w_shp').astype(np.float32)
        self.w_exp = bfm.get('w_exp').astype(np.float32)
        self.tri = bfm.get('tri')
        self.keypoints = bfm.get('keypoints').astype(np.long)  # fix bug
        w = np.concatenate((self.w_shp, self.w_exp), axis=1)
        self.w_norm = np.linalg.norm(w, axis=0)

        self.u_base = self.u[self.keypoints].reshape(-1, 1)
        self.w_shp_base = self.w_shp[self.keypoints]
        self.w_exp_base = self.w_exp[self.keypoints]


cfg_path = make_abs_path('../configs')
bfm = BFMModel(osp.join(
    cfg_path, 'bfm_noneck_v3.pkl'))  # you can change the bfm pkl path
tri = _load(osp.join(cfg_path,
                     'tri.pkl'))  # this tri is re-built for bfm_noneck_v3
Пример #17
0
def draw_landmarks():
    filelists = 'test.data/AFLW2000-3D_crop.list'
    root = 'AFLW-2000-3D/'
    fns = open(filelists).read().strip().split('\n')
    params = _load('res/params_aflw2000.npy')

    for i in range(2000):
        plt.close()
        img_fp = osp.join(root, fns[i])
        img = io.imread(img_fp)
        lms = reconstruct_vertex(params[i], dense=False)
        lms = convert_to_ori(lms, i)

        # print(lms.shape)
        fig = plt.figure(figsize=plt.figaspect(.5))
        # fig = plt.figure(figsize=(8, 4))
        ax = fig.add_subplot(1, 2, 1)
        ax.imshow(img)

        alpha = 0.8
        markersize = 4
        lw = 1.5
        color = 'w'
        markeredgecolor = 'black'

        nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
        for ind in range(len(nums) - 1):
            l, r = nums[ind], nums[ind + 1]
            ax.plot(lms[0, l:r], lms[1, l:r], color=color, lw=lw, alpha=alpha - 0.1)

            ax.plot(lms[0, l:r], lms[1, l:r], marker='o', linestyle='None', markersize=markersize, color=color,
                    markeredgecolor=markeredgecolor, alpha=alpha)

        ax.axis('off')

        # 3D
        ax = fig.add_subplot(1, 2, 2, projection='3d')
        lms[1] = img.shape[1] - lms[1]
        lms[2] = -lms[2]

        # print(lms)
        ax.scatter(lms[0], lms[2], lms[1], c="cyan", alpha=1.0, edgecolor='b')

        for ind in range(len(nums) - 1):
            l, r = nums[ind], nums[ind + 1]
            ax.plot3D(lms[0, l:r], lms[2, l:r], lms[1, l:r], color='blue')

        ax.view_init(elev=5., azim=-95)
        # ax.set_xlabel('x')
        # ax.set_ylabel('y')
        # ax.set_zlabel('z')

        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_zticklabels([])

        plt.tight_layout()
        # plt.show()

        wfp = f'res/AFLW-2000-3D/{osp.basename(img_fp)}'
        plt.savefig(wfp, dpi=200)