コード例 #1
0
def main(args):
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)

    # load test images
    testdata = datasets.TestData(args.inputpath,
                                 iscrop=args.iscrop,
                                 face_detector=args.detector)

    # run DECA
    deca_cfg.model.use_tex = args.useTex
    deca = DECA(config=deca_cfg, device=device)
    # for i in range(len(testdata)):
    for i in tqdm(range(len(testdata))):
        name = testdata[i]['imagename']
        images = testdata[i]['image'].to(device)[None, ...]
        codedict = deca.encode(images)
        opdict, visdict = deca.decode(codedict)  #tensor
        if args.saveDepth or args.saveKpt or args.saveObj or args.saveMat or args.saveImages:
            os.makedirs(os.path.join(savefolder, name), exist_ok=True)
        # -- save results
        if args.saveDepth:
            depth_image = deca.render.render_depth(
                opdict['transformed_vertices']).repeat(1, 3, 1, 1)
            visdict['depth_images'] = depth_image
            cv2.imwrite(os.path.join(savefolder, name, name + '_depth.jpg'),
                        util.tensor2image(depth_image[0]))
        if args.saveKpt:
            np.savetxt(os.path.join(savefolder, name, name + '_kpt2d.txt'),
                       opdict['landmarks2d'][0].cpu().numpy())
            np.savetxt(os.path.join(savefolder, name, name + '_kpt3d.txt'),
                       opdict['landmarks3d'][0].cpu().numpy())
        if args.saveObj:
            deca.save_obj(os.path.join(savefolder, name, name + '.obj'),
                          opdict)
        if args.saveMat:
            opdict = util.dict_tensor2npy(opdict)
            savemat(os.path.join(savefolder, name, name + '.mat'), opdict)
        if args.saveVis:
            cv2.imwrite(os.path.join(savefolder, 'vis_' + name + '.jpg'),
                        deca.visualize(visdict))
            cv2.imwrite(
                os.path.join(savefolder, 'detailed_images_' + name + '.jpg'),
                deca.visualize_detailed_image(visdict))

        if args.saveImages:
            for vis_name in [
                    'inputs', 'rendered_images', 'albedo_images',
                    'shape_images', 'shape_detail_images'
            ]:
                if vis_name not in visdict.keys():
                    continue
                image = util.tensor2image(visdict[vis_name][0])
                cv2.imwrite(
                    os.path.join(savefolder, name,
                                 name + '_' + vis_name + '.jpg'),
                    util.tensor2image(visdict[vis_name][0]))
    print(f'-- please check the results in {savefolder}')
コード例 #2
0
def main(args):
    # if args.rasterizer_type != 'standard':
    #     args.render_orig = False
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)

    # load test images 
    testdata = datasets.TestData(args.inputpath, iscrop=args.iscrop, face_detector=args.detector, sample_step=args.sample_step)

    # run DECA
    deca_cfg.model.use_tex = args.useTex
    deca_cfg.rasterizer_type = args.rasterizer_type
    deca_cfg.model.extract_tex = args.extractTex
    deca = DECA(config = deca_cfg, device=device)
    # for i in range(len(testdata)):
    for i in tqdm(range(len(testdata))):
        name = testdata[i]['imagename']
        images = testdata[i]['image'].to(device)[None,...]
        with torch.no_grad():
            codedict = deca.encode(images)
            opdict, visdict = deca.decode(codedict) #tensor
            if args.render_orig:
                tform = testdata[i]['tform'][None, ...]
                tform = torch.inverse(tform).transpose(1,2).to(device)
                original_image = testdata[i]['original_image'][None, ...].to(device)
                _, orig_visdict = deca.decode(codedict, render_orig=True, original_image=original_image, tform=tform)    
                orig_visdict['inputs'] = original_image            

        if args.saveDepth or args.saveKpt or args.saveObj or args.saveMat or args.saveImages:
            os.makedirs(os.path.join(savefolder, name), exist_ok=True)
        # -- save results
        if args.saveDepth:
            depth_image = deca.render.render_depth(opdict['trans_verts']).repeat(1,3,1,1)
            visdict['depth_images'] = depth_image
            cv2.imwrite(os.path.join(savefolder, name, name + '_depth.jpg'), util.tensor2image(depth_image[0]))
        if args.saveKpt:
            np.savetxt(os.path.join(savefolder, name, name + '_kpt2d.txt'), opdict['landmarks2d'][0].cpu().numpy())
            np.savetxt(os.path.join(savefolder, name, name + '_kpt3d.txt'), opdict['landmarks3d'][0].cpu().numpy())
        if args.saveObj:
            deca.save_obj(os.path.join(savefolder, name, name + '.obj'), opdict)
        if args.saveMat:
            opdict = util.dict_tensor2npy(opdict)
            savemat(os.path.join(savefolder, name, name + '.mat'), opdict)
        if args.saveVis:
            cv2.imwrite(os.path.join(savefolder, name + '_vis.jpg'), deca.visualize(visdict))
            if args.render_orig:
                cv2.imwrite(os.path.join(savefolder, name + '_vis_original_size.jpg'), deca.visualize(orig_visdict))
        if args.saveImages:
            for vis_name in ['inputs', 'rendered_images', 'albedo_images', 'shape_images', 'shape_detail_images', 'landmarks2d']:
                if vis_name not in visdict.keys():
                    continue
                image = util.tensor2image(visdict[vis_name][0])
                cv2.imwrite(os.path.join(savefolder, name, name + '_' + vis_name +'.jpg'), util.tensor2image(visdict[vis_name][0]))
                if args.render_orig:
                    image = util.tensor2image(orig_visdict[vis_name][0])
                    cv2.imwrite(os.path.join(savefolder, name, 'orig_' + name + '_' + vis_name +'.jpg'), util.tensor2image(orig_visdict[vis_name][0]))
    print(f'-- please check the results in {savefolder}')
コード例 #3
0
ファイル: demo_transfer.py プロジェクト: qiaone/DECA
def main(args):
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)

    # load test images
    testdata = datasets.TestData(args.image_path,
                                 iscrop=args.iscrop,
                                 face_detector=args.detector)
    expdata = datasets.TestData(args.exp_path,
                                iscrop=args.iscrop,
                                face_detector=args.detector)

    # run DECA
    i = 0
    deca = DECA(device=device)
    name = testdata[i]['imagename']
    savepath = '{}/{}.jpg'.format(savefolder, name)
    images = testdata[i]['image'].to(device)[None, ...]
    codedict = deca.encode(images)
    _, visdict = deca.decode(codedict)
    visdict = {x: visdict[x] for x in ['inputs', 'shape_detail_images']}

    # -- expression transfer
    # exp code from image
    exp_images = expdata[i]['image'].to(device)[None, ...]
    exp_codedict = deca.encode(exp_images)
    # transfer exp code
    codedict['pose'][:, 3:] = exp_codedict['pose'][:, 3:]
    codedict['exp'] = exp_codedict['exp']
    _, exp_visdict = deca.decode(codedict)
    visdict['transferred_shape'] = exp_visdict['shape_detail_images']
    cv2.imwrite(os.path.join(savefolder, name + '_animation.jpg'),
                deca.visualize(visdict))

    print(f'-- please check the results in {savefolder}')
コード例 #4
0
ファイル: demo_transfer.py プロジェクト: wuxiaolianggit/DECA
def main(args):
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)

    # load test images 
    testdata = datasets.TestData(args.image_path, iscrop=args.iscrop, face_detector=args.detector)
    expdata = datasets.TestData(args.exp_path, iscrop=args.iscrop, face_detector=args.detector)

    # run DECA
    deca_cfg.model.use_tex = args.useTex
    deca = DECA(config = deca_cfg, device=device)
    # identity reference
    i = 0
    name = testdata[i]['imagename']
    savepath = '{}/{}.jpg'.format(savefolder, name)
    images = testdata[i]['image'].to(device)[None,...]
    id_codedict = deca.encode(images)
    id_opdict, id_visdict = deca.decode(id_codedict)
    id_visdict = {x:id_visdict[x] for x in ['inputs', 'shape_detail_images']}   

    # -- expression transfer
    # exp code from image
    exp_images = expdata[i]['image'].to(device)[None,...]
    exp_codedict = deca.encode(exp_images)
    # transfer exp code
    id_codedict['pose'][:,3:] = exp_codedict['pose'][:,3:]
    id_codedict['exp'] = exp_codedict['exp']
    transfer_opdict, transfer_visdict = deca.decode(id_codedict)
    id_visdict['transferred_shape'] = transfer_visdict['shape_detail_images']
    cv2.imwrite(os.path.join(savefolder, name + '_animation.jpg'), deca.visualize(id_visdict))

    transfer_opdict['uv_texture_gt'] = id_opdict['uv_texture_gt']
    if args.saveDepth or args.saveKpt or args.saveObj or args.saveMat or args.saveImages:
        os.makedirs(os.path.join(savefolder, name, 'reconstruction'), exist_ok=True)
        os.makedirs(os.path.join(savefolder, name, 'animation'), exist_ok=True)
    
    # -- save results
    image_name = name
    for save_type in ['reconstruction', 'animation']:
        if save_type == 'reconstruction':
            visdict = id_codedict; opdict = id_opdict
        else:
            visdict = transfer_visdict; opdict = transfer_opdict
        if args.saveDepth:
            depth_image = deca.render.render_depth(opdict['transformed_vertices']).repeat(1,3,1,1)
            visdict['depth_images'] = depth_image
            cv2.imwrite(os.path.join(savefolder, name, save_type, name + '_depth.jpg'), util.tensor2image(depth_image[0]))
        if args.saveKpt:
            np.savetxt(os.path.join(savefolder, name, save_type, name + '_kpt2d.txt'), opdict['landmarks2d'][0].cpu().numpy())
            np.savetxt(os.path.join(savefolder, name, save_type, name + '_kpt3d.txt'), opdict['landmarks3d'][0].cpu().numpy())
        if args.saveObj:
            deca.save_obj(os.path.join(savefolder, name, save_type, name + '.obj'), opdict)
        if args.saveMat:
            opdict = util.dict_tensor2npy(opdict)
            savemat(os.path.join(savefolder, name, save_type, name + '.mat'), opdict)
        if args.saveImages:
            for vis_name in ['inputs', 'rendered_images', 'albedo_images', 'shape_images', 'shape_detail_images']:
                if vis_name not in visdict.keys():
                    continue
                image  =util.tensor2image(visdict[vis_name][0])
                cv2.imwrite(os.path.join(savefolder, name, save_type, name + '_' + vis_name +'.jpg'), util.tensor2image(visdict[vis_name][0]))
    print(f'-- please check the results in {savefolder}')
コード例 #5
0
ファイル: demo_teaser.py プロジェクト: vedanthpadigelwar/DECA
def main(args):
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)

    # load test images
    testdata = datasets.TestData(args.inputpath,
                                 iscrop=args.iscrop,
                                 face_detector=args.detector)
    expdata = datasets.TestData(args.exp_path,
                                iscrop=args.iscrop,
                                face_detector=args.detector)
    # DECA
    deca_cfg.rasterizer_type = args.rasterizer_type
    deca = DECA(config=deca_cfg, device=device)

    visdict_list_list = []
    for i in range(len(testdata)):
        name = testdata[i]['imagename']
        images = testdata[i]['image'].to(device)[None, ...]
        with torch.no_grad():
            codedict = deca.encode(images)
            opdict, visdict = deca.decode(codedict)  #tensor
        ### show shape with different views and expressions
        visdict_list = []
        max_yaw = 30
        yaw_list = list(range(0, max_yaw, 5)) + list(
            range(max_yaw, -max_yaw, -5)) + list(range(-max_yaw, 0, 5))
        for k in yaw_list:  #jaw angle from -50 to 50
            ## yaw angle
            euler_pose = torch.randn((1, 3))
            euler_pose[:, 1] = k  #torch.rand((self.batch_size))*160 - 80
            euler_pose[:,
                       0] = 0  #(torch.rand((self.batch_size))*60 - 30)*(2./euler_pose[:,1].abs())
            euler_pose[:,
                       2] = 0  #(torch.rand((self.batch_size))*60 - 30)*(2./euler_pose[:,1].abs())
            global_pose = batch_euler2axis(deg2rad(euler_pose[:, :3].cuda()))
            codedict['pose'][:, :3] = global_pose
            codedict['cam'][:, :] = 0.
            codedict['cam'][:, 0] = 8
            _, visdict_view = deca.decode(codedict)
            visdict = {
                x: visdict[x]
                for x in ['inputs', 'shape_detail_images']
            }
            visdict['pose'] = visdict_view['shape_detail_images']
            visdict_list.append(visdict)

        euler_pose = torch.zeros((1, 3))
        global_pose = batch_euler2axis(deg2rad(euler_pose[:, :3].cuda()))
        codedict['pose'][:, :3] = global_pose
        for (i, k) in enumerate(range(0, 31, 2)):  #jaw angle from -50 to 50
            # expression: jaw pose
            euler_pose = torch.randn((1, 3))
            euler_pose[:, 0] = k  #torch.rand((self.batch_size))*160 - 80
            euler_pose[:,
                       1] = 0  #(torch.rand((self.batch_size))*60 - 30)*(2./euler_pose[:,1].abs())
            euler_pose[:,
                       2] = 0  #(torch.rand((self.batch_size))*60 - 30)*(2./euler_pose[:,1].abs())
            jaw_pose = batch_euler2axis(deg2rad(euler_pose[:, :3].cuda()))
            codedict['pose'][:, 3:] = jaw_pose
            _, visdict_view = deca.decode(codedict)
            visdict_list[i]['exp'] = visdict_view['shape_detail_images']
            count = i

        for (i,
             k) in enumerate(range(len(expdata))):  #jaw angle from -50 to 50
            # expression: jaw pose
            exp_images = expdata[i]['image'].to(device)[None, ...]
            exp_codedict = deca.encode(exp_images)
            # transfer exp code
            codedict['pose'][:, 3:] = exp_codedict['pose'][:, 3:]
            codedict['exp'] = exp_codedict['exp']
            _, exp_visdict = deca.decode(codedict)
            visdict_list[i + count]['exp'] = exp_visdict['shape_detail_images']

        visdict_list_list.append(visdict_list)

    ### write gif
    writer = imageio.get_writer(os.path.join(savefolder, 'teaser.gif'),
                                mode='I')
    for i in range(len(yaw_list)):
        grid_image_list = []
        for j in range(len(testdata)):
            grid_image = deca.visualize(visdict_list_list[j][i])
            grid_image_list.append(grid_image)
        grid_image_all = np.concatenate(grid_image_list, 0)
        grid_image_all = rescale(
            grid_image_all, 0.6,
            multichannel=True)  # resize for showing in github
        writer.append_data(grid_image_all[:, :, [2, 1, 0]])

    print(f'-- please check the teaser figure in {savefolder}')
コード例 #6
0
def main(args):
    savefolder = args.savefolder
    device = args.device
    os.makedirs(savefolder, exist_ok=True)
    print(str(args.iscrop))

    # load test images
    testdata = datasets.TestData(args.inputpath,
                                 iscrop=args.iscrop,
                                 face_detector=args.detector)
    test_data_loader = DataLoader(testdata,
                                  batch_size=3,
                                  num_workers=4,
                                  shuffle=True,
                                  pin_memory=True,
                                  drop_last=True)

    # run DECA
    deca_cfg.model.use_tex = args.useTex
    if args.model_path != '':
        deca_cfg.pretrained_modelpath = args.model_path

    deca = DECA(config=deca_cfg, device=device, eval_detail=False)
    # for i in range(len(testdata)):
    for i, sample in enumerate(test_data_loader):
        names = sample['imagename']
        images = sample['image'].to(device)
        codedict = deca.encode(images)
        opdict, visdict = deca.decode(codedict)  #tensor
        if args.saveDepth or args.saveKpt or args.saveObj or args.saveMat or args.saveImages:
            for name in names:
                os.makedirs(os.path.join(savefolder, name), exist_ok=True)
        # -- save results
        if args.saveDepth:
            depth_images = deca.render.render_depth(
                opdict['transformed_vertices']).repeat(1, 3, 1, 1)
            visdict['depth_images'] = depth_images
            for j in range(len(names)):
                cv2.imwrite(
                    os.path.join(savefolder, names[j],
                                 names[j] + '_depth.jpg'),
                    util.tensor2image(depth_image[j]))
        if args.saveKpt:
            for j in range(len(names)):
                np.savetxt(
                    os.path.join(savefolder, names[j],
                                 names[j] + '_kpt2d.txt'),
                    opdict['landmarks2d'][j].cpu().numpy())
                np.savetxt(
                    os.path.join(savefolder, names[j],
                                 names[j] + '_kpt3d.txt'),
                    opdict['landmarks3d'][j].cpu().numpy())
        if args.saveObj:
            deca.save_obj(savefolder, names, opdict)
        if args.saveMat:
            opdict = util.dict_tensor2npy(opdict)
            savemat(os.path.join(savefolder, name, name + '.mat'), opdict)
        if args.saveVis:
            vis_imgs = deca.visualize(visdict)
            for j in range(len(names)):
                cv2.imwrite(os.path.join(savefolder, names[j] + '_vis.jpg'),
                            vis_imgs[j])
        if args.saveImages:
            for vis_name in [
                    'inputs', 'rendered_images', 'albedo_images',
                    'shape_images', 'shape_detail_images'
            ]:
                if vis_name not in visdict.keys():
                    continue
                image = util.tensor2image(visdict[vis_name][0])
                cv2.imwrite(
                    os.path.join(savefolder, name,
                                 name + '_' + vis_name + '.jpg'),
                    util.tensor2image(visdict[vis_name][0]))
    print(f'-- please check the results in {savefolder}')