Beispiel #1
0
def main_s(args):
    global tddfa
    global face_boxes
    # Given a still image path and load to BGR channel
    img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes
    boxes = face_boxes(img)
    n = len(boxes)
    if n == 0:
        print(f'No face detected, exit')
        return
        #sys.exit(-1)
    print(f'Detect {n} faces')

    param_lst, roi_box_lst = tddfa(img, boxes)

    # Visualization and serialization
    dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex', 'ply', 'obj')
    old_suffix = get_suffix(args.img_fp)
    new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

    wfp = f'examples/results/{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' + new_suffix

    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)
    # print(len(ver_lst),ver_lst[0].shape)
    if args.opt == '3d':
        viz_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)
        return render(img, ver_lst, tddfa.tri, alpha=0.6, show_flag=args.show_flag, wfp=wfp)
    else:
        raise ValueError(f'Unknown opt {args.opt}')
Beispiel #2
0
def main(args):
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    gpu_mode = args.mode == 'gpu'
    tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Initialize FaceBoxes
    face_boxes = FaceBoxes()

    # Given a video path
    fn = args.video_fp.split('/')[-1]
    reader = imageio.get_reader(args.video_fp)

    fps = reader.get_meta_data()['fps']

    suffix = get_suffix(args.video_fp)
    video_wfp = f'examples/results/videos/{fn.replace(suffix, "")}_{args.opt}.mp4'
    writer = imageio.get_writer(video_wfp, fps=fps)

    # run
    dense_flag = args.opt in ('3d',)
    pre_ver = None
    for i, frame in tqdm(enumerate(reader)):
        frame_bgr = frame[..., ::-1]  # RGB->BGR

        if i == 0:
            # the first frame, detect face, here we only use the first face, you can change depending on your need
            boxes = face_boxes(frame_bgr)
            boxes = [boxes[0]]
            param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
            ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]

            # refine
            param_lst, roi_box_lst = tddfa(frame_bgr, [ver], crop_policy='landmark')
            ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]
        else:
            param_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver], crop_policy='landmark')

            roi_box = roi_box_lst[0]
            # todo: add confidence threshold to judge the tracking is failed
            if abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] - roi_box[1]) < 2020:
                boxes = face_boxes(frame_bgr)
                boxes = [boxes[0]]
                param_lst, roi_box_lst = tddfa(frame_bgr, boxes)

            ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]

        pre_ver = ver  # for tracking

        if args.opt == '2d':
            res = cv_draw_landmark(frame_bgr, ver)
        elif args.opt == '3d':
            res = render(frame_bgr, [ver])
        else:
            raise Exception(f'Unknown opt {args.opt}')

        writer.append_data(res[..., ::-1])  # BGR->RGB

    writer.close()
    print(f'Dump to {video_wfp}')
Beispiel #3
0
def main(args):
    # Init TDDFA
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    gpu_mode = args.mode == 'gpu'
    tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Init FaceBoxes
    face_boxes = FaceBoxes()

    # Given a still image path and load to BGR channel
    img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes
    boxes = face_boxes(img)
    n = len(boxes)
    print(f'Detect {n} faces')
    if n == 0:
        print(f'No face detected, exit')
        sys.exit(-1)

    param_lst, roi_box_lst = tddfa(img, boxes)

    # Visualization and serialization
    dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex', 'ply', 'obj')
    old_suffix = get_suffix(args.img_fp)
    new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

    wfp = f'examples/results/{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' + new_suffix

    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)

    if args.opt == '2d_sparse':
        draw_landmarks(img, ver_lst, show_flag=args.show_flag, dense_flag=dense_flag, wfp=wfp)
    elif args.opt == '2d_dense':
        draw_landmarks(img, ver_lst, show_flag=args.show_flag, dense_flag=dense_flag, wfp=wfp)
    elif args.opt == '3d':
        render(img, ver_lst, alpha=0.6, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'depth':
        # if `with_bf_flag` is False, the background is black
        depth(img, ver_lst, show_flag=args.show_flag, wfp=wfp, with_bg_flag=True)
    elif args.opt == 'pncc':
        pncc(img, ver_lst, show_flag=args.show_flag, wfp=wfp, with_bg_flag=True)
    elif args.opt == 'uv_tex':
        uv_tex(img, ver_lst, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'pose':
        viz_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'ply':
        ser_to_ply(ver_lst, height=img.shape[0], wfp=wfp)
    elif args.opt == 'obj':
        ser_to_obj(img, ver_lst, height=img.shape[0], wfp=wfp)
    else:
        raise ValueError(f'Unknown opt {args.opt}')
Beispiel #4
0
def main(args):
    # Init TDDFA
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    gpu_mode = args.mode == 'gpu'
    tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Init FaceBoxes
    face_boxes = FaceBoxes()

    # Given a still image path and load to BGR channel
    img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes
    boxes = face_boxes(img)
    n = len(boxes)
    print(f'Detect {n} faces')
    if n == 0:
        print(f'No face detected, exit')
        sys.exit(-1)

    param_lst, roi_box_lst = tddfa(img, boxes)

    # Visualization and serialization
    dense_flag = args.opt in (
        '2d_dense', '3d', 'depth'
    )  # if opt is 2d_dense or 3d, reconstruct dense vertices
    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)

    suffix = get_suffix(args.img_fp)
    wfp = f'examples/results/{args.img_fp.split("/")[-1].replace(suffix, "")}_{args.opt}.jpg'

    if args.opt == '2d_sparse':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '2d_dense':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '3d':
        render(img, ver_lst, alpha=0.6, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'depth':
        depth(img, ver_lst, show_flag=args.show_flag, wfp=wfp)
    else:
        raise Exception(f'Unknown opt {args.opt}')
Beispiel #5
0
def get_yaw(img):
    parser = argparse.ArgumentParser(description='The demo of still image of 3DDFA_V2')
    parser.add_argument('-c', '--config', type=str, default='configs/mb1_120x120.yml')
    parser.add_argument('-i', '--img_fp', type=str, default='/home/wei/gy/code/3DDFA_V2/examples/results/72.png_448_ori.jpg')
    parser.add_argument('-r', '--img_ref', type=str, default='/home/wei/gy/code/3DDFA_V2/examples/results/268.png_448_ori.jpg')
    parser.add_argument('-m', '--mode', type=str, default='gpu', help='gpu or cpu mode')
    parser.add_argument('-o', '--opt', type=str, default='3d',
                        choices=['2d_sparse', '2d_dense', '3d', 'depth', 'pncc', 'uv_tex', 'pose', 'ply', 'obj'])
    parser.add_argument('--show_flag', type=str2bool, default='true', help='whether to show the visualization result')
    parser.add_argument('--onnx', action='store_true', default=False)

    args = parser.parse_args()


    global tddfa
    global face_boxes
    # Given a still image path and load to BGR channel
  #  img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes

    boxes = face_boxes(img)
    n = len(boxes)
    if n == 0:
        print(f'No face detected, exit')
        return None
        #sys.exit(-1)
   # print(f'Detect {n} faces')
    param_lst, roi_box_lst = tddfa(img, boxes)   ##this 0.1
    # Visualization and serialization
    dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex', 'ply', 'obj')
    old_suffix = get_suffix(args.img_fp)
    new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

    wfp = f'examples/results/{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' + new_suffix

    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)
    # print(len(ver_lst),ver_lst[0].shape)

    if args.opt == '3d':
        pose = get_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)

        return pose
Beispiel #6
0
def main(args):
    # Init TDDFA or TDDFA_ONNX
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    if args.onnx:
        from TDDFA_ONNX import TDDFA_ONNX
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Initialize FaceBoxes
    face_boxes = FaceBoxes()

    # Given a video path
    fn = args.video_fp.split('/')[-1]
    reader = imageio.get_reader(args.video_fp)

    fps = reader.get_meta_data()['fps']
    suffix = get_suffix(args.video_fp)
    video_wfp = f'examples/results/videos/{fn.replace(suffix, "")}_{args.opt}_smooth.mp4'
    writer = imageio.get_writer(video_wfp, fps=fps)

    # the simple implementation of average smoothing by looking ahead by n_next frames
    # assert the frames of the video >= n
    n_pre, n_next = args.n_pre, args.n_next
    n = n_pre + n_next + 1
    queue_ver = deque()
    queue_frame = deque()

    # run
    dense_flag = args.opt in (
        '2d_dense',
        '3d',
    )
    pre_ver = None
    for i, frame in tqdm(enumerate(reader)):
        if args.start > 0 and i < args.start:
            continue
        if args.end > 0 and i > args.end:
            break

        frame_bgr = frame[..., ::-1]  # RGB->BGR

        if i == 0:
            # detect
            boxes = face_boxes(frame_bgr)
            boxes = [boxes[0]]
            param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
            ver = tddfa.recon_vers(param_lst,
                                   roi_box_lst,
                                   dense_flag=dense_flag)[0]

            # refine
            param_lst, roi_box_lst = tddfa(frame_bgr, [ver],
                                           crop_policy='landmark')
            ver = tddfa.recon_vers(param_lst,
                                   roi_box_lst,
                                   dense_flag=dense_flag)[0]

            # padding queue
            for _ in range(n_pre):
                queue_ver.append(ver.copy())
            queue_ver.append(ver.copy())

            for _ in range(n_pre):
                queue_frame.append(frame_bgr.copy())
            queue_frame.append(frame_bgr.copy())

        else:
            param_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver],
                                           crop_policy='landmark')

            roi_box = roi_box_lst[0]
            # todo: add confidence threshold to judge the tracking is failed
            if abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] -
                                                  roi_box[1]) < 2020:
                boxes = face_boxes(frame_bgr)
                boxes = [boxes[0]]
                param_lst, roi_box_lst = tddfa(frame_bgr, boxes)

            ver = tddfa.recon_vers(param_lst,
                                   roi_box_lst,
                                   dense_flag=dense_flag)[0]

            queue_ver.append(ver.copy())
            queue_frame.append(frame_bgr.copy())

        pre_ver = ver  # for tracking

        # smoothing: enqueue and dequeue ops
        if len(queue_ver) >= n:
            ver_ave = np.mean(queue_ver, axis=0)

            if args.opt == '2d_sparse':
                img_draw = cv_draw_landmark(queue_frame[n_pre],
                                            ver_ave)  # since we use padding
            elif args.opt == '2d_dense':
                img_draw = cv_draw_landmark(queue_frame[n_pre],
                                            ver_ave,
                                            size=1)
            elif args.opt == '3d':
                img_draw = render(queue_frame[n_pre], [ver_ave],
                                  tddfa.bfm.tri,
                                  alpha=0.7)
            else:
                raise ValueError(f'Unknown opt {args.opt}')

            writer.append_data(img_draw[:, :, ::-1])  # BGR->RGB

            queue_ver.popleft()
            queue_frame.popleft()

    # we will lost the last n_next frames, still padding
    for _ in range(n_next):
        queue_ver.append(ver.copy())
        queue_frame.append(frame_bgr.copy())  # the last frame

        ver_ave = np.mean(queue_ver, axis=0)

        if args.opt == '2d_sparse':
            img_draw = cv_draw_landmark(queue_frame[n_pre],
                                        ver_ave)  # since we use padding
        elif args.opt == '2d_dense':
            img_draw = cv_draw_landmark(queue_frame[n_pre], ver_ave, size=1)
        elif args.opt == '3d':
            img_draw = render(queue_frame[n_pre], [ver_ave],
                              tddfa.bfm.tri,
                              alpha=0.7)
        else:
            raise ValueError(f'Unknown opt {args.opt}')

        writer.append_data(img_draw[..., ::-1])  # BGR->RGB

        queue_ver.popleft()
        queue_frame.popleft()

    writer.close()
    print(f'Dump to {video_wfp}')
Beispiel #7
0
def main(args):
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)

    # Init FaceBoxes and TDDFA, recommend using onnx flag
    if args.onnx:
        import os
        os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
        os.environ['OMP_NUM_THREADS'] = '4'

        from FaceBoxes.FaceBoxes_ONNX import FaceBoxes_ONNX
        from TDDFA_ONNX import TDDFA_ONNX

        face_boxes = FaceBoxes_ONNX()
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)
        face_boxes = FaceBoxes()

    # Given a still image path and load to BGR channel

    print(args.img_fp)

    if not os.path.isfile(args.img_fp):
        raise ValueError(
            '--face argument must be a valid path to video/image file')

    elif args.img_fp.split('.')[1] in ['jpg', 'png', 'jpeg']:
        full_frames = [cv2.imread(args.img_fp)]

    else:
        video_stream = cv2.VideoCapture(args.img_fp)
        fps = video_stream.get(cv2.CAP_PROP_FPS)

        print('Reading video frames...')

        full_frames = []
        while 1:
            still_reading, frame = video_stream.read()
            if not still_reading:
                video_stream.release()
                break

            full_frames.append(frame)
        print("num of frames : ", len(full_frames))

    for i, img in enumerate(full_frames):

        # Detect faces, get 3DMM params and roi boxes
        boxes = face_boxes(img)
        n = len(boxes)
        if n == 0:
            print(f'No face detected, exit')
            sys.exit(-1)
        print(f'Detect {n} faces')

        param_lst, roi_box_lst = tddfa(img, boxes)

        # Visualization and serialization
        dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex',
                                  'ply', 'obj')
        old_suffix = get_suffix(args.img_fp)
        new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

        idx = str(i)
        while (len(idx) != 4):
            idx = "0" + idx

        wfp = f'examples/results/obj/{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' + idx + new_suffix

        ver_lst = tddfa.recon_vers(param_lst,
                                   roi_box_lst,
                                   dense_flag=dense_flag)

        if args.opt == 'obj':
            ser_to_obj(img, ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
        # elif args.opt == '2d_sparse':
        #     draw_landmarks(img, ver_lst, show_flag=args.show_flag, dense_flag=dense_flag, wfp=wfp)
        # elif args.opt == '2d_dense':
        #     draw_landmarks(img, ver_lst, show_flag=args.show_flag, dense_flag=dense_flag, wfp=wfp)
        # elif args.opt == '3d':
        #     render(img, ver_lst, tddfa.tri, alpha=0.6, show_flag=args.show_flag, wfp=wfp)
        # elif args.opt == 'depth':
        #     # if `with_bf_flag` is False, the background is black
        #     depth(img, ver_lst, tddfa.tri, show_flag=args.show_flag, wfp=wfp, with_bg_flag=True)
        elif args.opt == 'pncc':
            pncc(img,
                 ver_lst,
                 tddfa.tri,
                 show_flag=args.show_flag,
                 wfp=wfp,
                 with_bg_flag=True)
        elif args.opt == 'uv_tex':
            uv_tex(img, ver_lst, tddfa.tri, show_flag=args.show_flag, wfp=wfp)
        # elif args.opt == 'pose':
        #     viz_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)
        # elif args.opt == 'ply':
        #     ser_to_ply(ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
        else:
            raise ValueError(f'Unknown opt {args.opt}')
def main(args):
    # Init TDDFA or TDDFA_ONNX
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    if args.onnx:
        from TDDFA_ONNX import TDDFA_ONNX
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Initialize FaceBoxes
    face_boxes = FaceBoxes()

    # Given a video path
    fn = args.video_fp.split('/')[-1]
    reader = imageio.get_reader(args.video_fp)

    fps = reader.get_meta_data()['fps']
    dense_flag = args.opt in ('3d', )
    nick_orig = cv2.imread('Assets4FacePaper/nick.bmp')
    tv_neutral, tc_neutral, tv_set, tc_set, nick_param = load_vertices_set(
        'Assets4FacePaper', [
            'nick.bmp', 'nick_crop_1.jpg', 'nick_crop_2.jpg',
            'nick_crop_3.jpg', 'nick_crop_4.jpg', 'nick_crop_5.jpg',
            'nick_crop_6.png'
        ], face_boxes, tddfa)
    sv_neutral, sc_neutral, sv_set, sc_set, thanh_param = load_vertices_set(
        'Assets4FacePaper', [
            'Thanh.jpg', 'Thanh1.jpg', 'Thanh2.jpg', 'Thanh3.jpg',
            'Thanh4.jpg', 'Thanh5.jpg', 'Thanh6.jpg'
        ], face_boxes, tddfa)

    _, _, nick_alpha_shp, nick_alpha_exp = _parse_param(nick_param)
    nick_act_masks = calc_activation_masks(tv_neutral, tv_set)
    delta_set_nick = get_delta_vertices_set(tv_neutral, tv_set)

    _, _, thanh_alpha_shp, thanh_alpha_exp = _parse_param(thanh_param)
    thanh_act_masks = calc_activation_masks(sv_neutral, sv_set)
    delta_set_thanh = get_delta_vertices_set(sv_neutral, sv_set)

    nick_ver = tc_neutral
    nick_color = tc_neutral

    suffix = get_suffix(args.video_fp)
    video_wfp = f'examples/results/videos/{fn.replace(suffix, "")}_{args.opt}.mp4'

    N = list(tv_neutral.size())[0]
    E = list(tv_set.size())[0]
    # run
    pre_ver = None
    total_timer = Timer()
    optimizing_timer = Timer()
    for i, frame in tqdm(enumerate(reader)):
        total_timer.tic()
        frame_bgr = frame[..., ::-1]  # RGB->BGR

        if i == 0:
            # the first frame, detect face, here we only use the first face, you can change depending on your need
            boxes = face_boxes(frame_bgr)
            boxes = [boxes[0]]
            param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
            ver = tddfa.recon_vers(param_lst,
                                   roi_box_lst,
                                   dense_flag=dense_flag)[0]

            # refine
            param_lst, roi_box_lst = tddfa(frame_bgr, [ver],
                                           crop_policy='landmark')
            R, offset, alpha_shp, alpha_exp = _parse_param(param_lst[0])
            roi_box = roi_box_lst[0]

            ver = recon_dense_explicit(R, offset, nick_alpha_shp, alpha_exp,
                                       roi_box, tddfa.size)
            ver_base = recon_dense_base(nick_alpha_shp, alpha_exp)
        else:
            param_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver],
                                           crop_policy='landmark')

            roi_box = roi_box_lst[0]
            # todo: add confidence threshold to judge the tracking is failed
            if abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] -
                                                  roi_box[1]) < 2020:
                boxes = face_boxes(frame_bgr)
                boxes = [boxes[0]]
                param_lst, roi_box_lst = tddfa(frame_bgr, boxes)

            R, offset, alpha_shp, alpha_exp = _parse_param(param_lst[0])
            ver_base = recon_dense_base(nick_alpha_shp, alpha_exp)
            ver = recon_dense_explicit(R, offset, nick_alpha_shp, alpha_exp,
                                       roi_box, tddfa.size)

        # Write object
        pre_ver = ver  # for tracking
        ver = torch.from_numpy(ver_base.transpose()).float().to(device)
        optimizing_timer.tic()
        a_set = fit_coeffs_aio(ver, tv_neutral, delta_set_nick, 100)
        print(a_set)

        tver = torch.matmul(delta_set_nick.view(E, -1).transpose(0, 1),
                            a_set).view(N, 3) + tv_neutral
        tver = transform_vertices(R, offset,
                                  tver.cpu().numpy().transpose(), roi_box,
                                  tddfa.size)  # 3 N
        cver = calc_colors_w_act_mask(a_set, nick_act_masks, tc_neutral,
                                      tc_set).cpu().numpy()
        optimizing_time = optimizing_timer.toc()
        execution_time = total_timer.toc()
        print("Execution time: {}, Total time: {}".format(
            optimizing_time, execution_time))
        '''
        tver = torch.matmul(delta_set_thanh.view(E, -1).transpose(0, 1), a_set).view(
            N, 3) + sv_neutral
        tver = transform_vertices(R, offset, tver.numpy().transpose(),
                                  roi_box, tddfa.size)         # 3 N
        cver = calc_colors_w_act_mask(a_set, thanh_act_masks, sc_neutral,
                                      sc_set).numpy()

        print(cver.shape)
        '''
        ser_to_obj_multiple_colors(cver, [tver],
                                   height=int(nick_orig.shape[0] * 1.5),
                                   wfp='nick_' + str(i) + '.obj')
        print(f'Dump to nick_{str(i)}.obj')
        if i > 1000:
            break
Beispiel #9
0
def main(args):
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)

    # Init FaceBoxes and TDDFA, recommend using onnx flag
    if args.onnx:
        import os
        os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
        os.environ['OMP_NUM_THREADS'] = '4'

        from FaceBoxes.FaceBoxes_ONNX import FaceBoxes_ONNX
        from TDDFA_ONNX import TDDFA_ONNX

        face_boxes = FaceBoxes_ONNX()
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)
        face_boxes = FaceBoxes()

    # Given a still image path and load to BGR channel
    img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes
    boxes = face_boxes(img)
    n = len(boxes)
    if n == 0:
        print('No face detected, exit')
        sys.exit(-1)
    print(f'Detect {n} faces')

    param_lst, roi_box_lst = tddfa(img, boxes)

    # Visualization and serialization
    dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex',
                              'ply', 'obj')
    old_suffix = get_suffix(args.img_fp)
    new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

    wfp = f'{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' +\
        new_suffix
    wfp = base_path / 'examples' / 'results' / wfp

    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)

    if args.opt == '2d_sparse':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '2d_dense':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '3d':
        render(img,
               ver_lst,
               tddfa.tri,
               alpha=0.6,
               show_flag=args.show_flag,
               wfp=wfp)
    elif args.opt == 'depth':
        # if `with_bf_flag` is False, the background is black
        depth(img,
              ver_lst,
              tddfa.tri,
              show_flag=args.show_flag,
              wfp=wfp,
              with_bg_flag=True)
    elif args.opt == 'pncc':
        pncc(img,
             ver_lst,
             tddfa.tri,
             show_flag=args.show_flag,
             wfp=wfp,
             with_bg_flag=True)
    elif args.opt == 'uv_tex':
        uv_tex(img, ver_lst, tddfa.tri, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'pose':
        viz_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'ply':
        ser_to_ply(ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
    elif args.opt == 'obj':
        ser_to_obj(img, ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
    else:
        raise ValueError(f'Unknown opt {args.opt}')
def main(args):
    # Init TDDFA or TDDFA_ONNX
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
    if args.onnx:
        from TDDFA_ONNX import TDDFA_ONNX
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)

    # Initialize FaceBoxes
    face_boxes = FaceBoxes()

    # Given a video path
    fn = args.video_fp.split('/')[-1]
    reader = imageio.get_reader(args.video_fp)

    fps = reader.get_meta_data()['fps']
    dense_flag = args.opt in ('3d',)
    nick_orig = cv2.imread('Assets4FacePaper/nick.bmp')
    tv_neutral, tc_neutral, tv_set, tc_set, nick_param = load_vertices_set(
        'Assets4FacePaper',
        ['sam_0_0.jpg', 'sam_0_1.jpg', 'sam_0_2.jpg',
         'sam_0_3.jpg', 'sam_0_4.jpg', 'sam_0_5.jpg',
         'sam_0_6.jpg'], face_boxes, tddfa
    )
    sv_neutral, sc_neutral, sv_set, sc_set, thanh_param = load_vertices_set(
        'Assets4FacePaper',
        ['Thanh.jpg', 'Thanh1.jpg', 'Thanh2.jpg',
         'Thanh3.jpg', 'Thanh4.jpg', 'Thanh5.jpg',
         'Thanh6.jpg'], face_boxes, tddfa
    )

    _, _, nick_alpha_shp, nick_alpha_exp = _parse_param(nick_param)
    nick_act_masks = calc_activation_masks(tv_neutral, tv_set)
    delta_set_nick = get_delta_vertices_set(tv_neutral, tv_set)

    _, _, thanh_alpha_shp, thanh_alpha_exp = _parse_param(thanh_param)
    thanh_act_masks = calc_activation_masks(sv_neutral, sv_set)
    delta_set_thanh = get_delta_vertices_set(sv_neutral, sv_set)

    nick_ver = tc_neutral
    nick_color = tc_neutral

    suffix = get_suffix(args.video_fp)
    video_wfp = f'examples/results/videos/{fn.replace(suffix, "")}_{args.opt}.mp4'

    # run
    pre_ver = None
    for i, frame in tqdm(enumerate(reader)):
        frame_bgr = frame[..., ::-1]  # RGB->BGR

        if i == 0:
            # the first frame, detect face, here we only use the first face, you can change depending on your need
            boxes = face_boxes(frame_bgr)
            boxes = [boxes[0]]
            param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
            ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]

            # refine
            param_lst, roi_box_lst = tddfa(frame_bgr, [ver],
                                           crop_policy='landmark')
            R, offset, _ , alpha_exp = _parse_param(param_lst[0])
            ver = recon_dense_explicit(R, offset, nick_alpha_shp, alpha_exp,
                                       roi_box_lst[0], tddfa.size)

        else:
            param_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver],
                                           crop_policy='landmark')

            roi_box = roi_box_lst[0]
            # todo: add confidence threshold to judge the tracking is failed
            if abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] - roi_box[1]) < 2020:
                boxes = face_boxes(frame_bgr)
                boxes = [boxes[0]]
                param_lst, roi_box_lst = tddfa(frame_bgr, boxes)

            R, offset, _ , alpha_exp = _parse_param(param_lst[0])
            ver = recon_dense_explicit(R, offset, nick_alpha_shp, alpha_exp,
                                       roi_box_lst[0], tddfa.size)


        # Write object
        pre_ver = ver  # for tracking

        ser_to_obj_multiple_colors(nick_color, [ver],
                                   height=int(nick_orig.shape[0]*1.5),
                                   wfp='./sam_exp_only_obj/' + str(i) +'.obj')
        print(f'Dump to sam_exp_only_obj/{str(i)}.obj')
        if i > 1000:
            break
Beispiel #11
0
def main(args):
    cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)

    # Init FaceBoxes and TDDFA, recommend using onnx flag
    if args.onnx:
        import os
        os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
        os.environ['OMP_NUM_THREADS'] = '4'

        from FaceBoxes.FaceBoxes_ONNX import FaceBoxes_ONNX
        from TDDFA_ONNX import TDDFA_ONNX

        face_boxes = FaceBoxes_ONNX()
        tddfa = TDDFA_ONNX(**cfg)
    else:
        gpu_mode = args.mode == 'gpu'
        tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)
        face_boxes = FaceBoxes()

    # Given a still image path and load to BGR channel
    img = cv2.imread(args.img_fp)

    # Detect faces, get 3DMM params and roi boxes
    boxes = face_boxes(img)
    n = len(boxes)
    if n == 0:
        print(f'No face detected, exit')
        sys.exit(-1)
    print(f'Detect {n} faces')

    param_lst, roi_box_lst = tddfa(img, boxes)

    # Visualization and serialization
    dense_flag = args.opt in ('2d_dense', '3d', 'depth', 'pncc', 'uv_tex',
                              'ply', 'obj')
    old_suffix = get_suffix(args.img_fp)
    new_suffix = f'.{args.opt}' if args.opt in ('ply', 'obj') else '.jpg'

    wfp = f'examples/results/{args.img_fp.split("/")[-1].replace(old_suffix, "")}_{args.opt}' + new_suffix

    ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)

    print(ver_lst[0].shape)

    print(tddfa.bfm.u.shape)

    lm68 = np.reshape(
        np.reshape(ver_lst[0].T, (-1, 1))[tddfa.bfm.keypoints], (-1, 3))
    print(lm68.shape)

    for i in range(lm68.shape[0]):
        lm68[i, 1] = img.shape[0] - lm68[i, 1]

    for i in range(ver_lst[0].shape[1]):
        ver_lst[0][1, i] = img.shape[0] - ver_lst[0][1, i]

    useful_tri = np.copy(tddfa.tri)

    for i in range(useful_tri.shape[0]):
        tmp = useful_tri[i, 2]
        useful_tri[i, 2] = useful_tri[i, 0]
        useful_tri[i, 0] = tmp

    useful_tri = useful_tri + 1

    np.save("asd_lm.npy", lm68)
    np.save("asd_v.npy", ver_lst[0].T)
    np.save("asd_f.npy", useful_tri)

    if args.opt == '2d_sparse':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '2d_dense':
        draw_landmarks(img,
                       ver_lst,
                       show_flag=args.show_flag,
                       dense_flag=dense_flag,
                       wfp=wfp)
    elif args.opt == '3d':
        render(img,
               ver_lst,
               tddfa.tri,
               alpha=0.6,
               show_flag=args.show_flag,
               wfp=wfp)
    elif args.opt == 'depth':
        # if `with_bf_flag` is False, the background is black
        depth(img,
              ver_lst,
              tddfa.tri,
              show_flag=args.show_flag,
              wfp=wfp,
              with_bg_flag=True)
    elif args.opt == 'pncc':
        pncc(img,
             ver_lst,
             tddfa.tri,
             show_flag=args.show_flag,
             wfp=wfp,
             with_bg_flag=True)
    elif args.opt == 'uv_tex':
        uv_tex(img, ver_lst, tddfa.tri, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'pose':
        viz_pose(img, param_lst, ver_lst, show_flag=args.show_flag, wfp=wfp)
    elif args.opt == 'ply':
        ser_to_ply(ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
    elif args.opt == 'obj':
        ser_to_obj(img, ver_lst, tddfa.tri, height=img.shape[0], wfp=wfp)
    else:
        raise ValueError(f'Unknown opt {args.opt}')