コード例 #1
0
    def _init_model(self, g):
        pose = None
        betas = None
        trans = None

        if self.model is not None:
            pose = self.model.pose.r
            betas = self.model.betas.r
            trans = self.model.trans.r

        if g == 'f':
            self.model = load_model(
                'smpl/models/basicModel_f_lbs_10_207_0_v1.1.0.pkl')
        else:
            self.model = load_model(
                'smpl/models/basicmodel_m_lbs_10_207_0_v1.1.0.pkl')
        self._loaded_gender = g

        if pose is not None:
            self.model.pose[:] = pose
            self.model.betas[:] = betas
            self.model.trans[:] = trans

        self.light.set(v=self.model, f=self.model.f, num_verts=len(self.model))
        self.rn.set(v=self.model, f=self.model.f)
        self.camera.set(v=self.model)
        self.joints2d.set(v=self.model.J_transformed)

        self.draw()
コード例 #2
0
def test_our(img_path,
             joint_path,
             out_path="test.pkl",
             use_interpenetration=True,
             n_betas=10,
             flength=5000.,
             pix_thsh=25.,
             use_neutral=False,
             viz=True):
    with open('lsp_gender.csv') as f:
        genders = f.readlines()
    model_female = load_model(MODEL_FEMALE_PATH)
    model_male = load_model(MODEL_MALE_PATH)

    out_path = out_path + ".pkl"
    print img_path
    img = cv2.imread(img_path)

    gender = 'male'

    data = np.load(joint_path)['pose']
    joint = []
    for i in range(len(data[0])):
        joint.append([data[0][i], data[1][i]])
    conf = data[2]
    do_degrees = [0.]
    joint = np.array(joint)
    sph_regs = None
    params, vis = run_single_fit(img,
                                 joint,
                                 conf,
                                 model_male,
                                 regs=sph_regs,
                                 n_betas=n_betas,
                                 flength=flength,
                                 pix_thsh=pix_thsh,
                                 scale_factor=1,
                                 viz=viz,
                                 do_degrees=do_degrees)
    if viz:
        import matplotlib.pyplot as plt
        plt.ion()
        plt.show()
        plt.subplot(121)
        plt.imshow(img[:, :, ::-1])
        if do_degrees is not None:
            for di, deg in enumerate(do_degrees):
                plt.subplot(122)
                plt.cla()
                plt.imshow(vis[di])
                plt.draw()
                plt.title('%d deg' % deg)
                plt.pause(1)
    with open(out_path, 'w') as outf:
        pickle.dump(params, outf)

    # This only saves the first rendering.
    if do_degrees is not None:
        cv2.imwrite(out_path.replace('.pkl', '.png'), vis[0])
コード例 #3
0
ファイル: Main_Window.py プロジェクト: johndpope/Dance-Helper
    def __init__(self):
        super(self.__class__, self).__init__()
        self.setupUi(self)
        self.setWindowTitle('DANCE HELPER')

        self.file_name = ''
        self.frame = -1

        self.processOn = False

        self.file_paths = []
        self.isFirst = True

        self.compared = False
        self.mode1 = 0
        self.mode2 = 0
        self.m = load_model(
            './models/smpl/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
        self.m2 = load_model(
            './models/smpl/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
        self.pkl_paths = []
        self.pkl_paths2 = []
        self.op_joints = []
        self.op_joints2 = []
        self.ind = 0
        self.ind2 = 0

        self.rotx = 0.0
        self.roty = 0.0
        self.rotz = 0.0
        self.rotx2 = 0.0
        self.roty2 = 0.0
        self.rotz2 = 0.0

        self.res = None
        self.res2 = None
        self.img = None
        self.img2 = None

        self.w = None
        self.h = None
        self.w2 = None
        self.h2 = None

        self.label_sim_title.hide()

        model_temp = QStandardItemModel()
        RESULT_DIR = 'opmlify/result'
        for dirpath, dirname, filename in os.walk(RESULT_DIR):
            if self.isFirst:
                self.isFirst = False
                for a in dirname:
                    model_temp.appendRow(QStandardItem(a))
                    self.file_paths.append(RESULT_DIR + '/' + a)
                self.listView1.setModel(model_temp)
                self.listView2.setModel(model_temp)
        atexit.register(self.exit_handler)
コード例 #4
0
def load_smpl(gender='female'):
    from os.path import join
    path = '/home/alfred/smpl/models/'
    if gender == 'female':
        fname_female = 'basicModel_f_lbs_10_207_0_v1.0.0.pkl'
        m = load_model(join(path, fname_female))
    else:
        fname_male = 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
        m = load_model(join(path, fname_male))
    return m
コード例 #5
0
    def visualize_result_single(self, start, end, res_dir, render_util):
        from smpl_webuser.serialization import load_model
        smpl_origin = load_model(
            osp.join(self.model_root, 'smpl_cocoplus_neutral.pkl'))

        for i, result in enumerate(self.pred_results[start:end]):
            # get result subdir path and image file path
            img_path = self.data_list[result['data_idx']]['image_path']
            img_name = img_path.split('/')[-1]
            subdir = eval_utils.get_subdir(img_path)
            res_subdir = osp.join(res_dir, subdir)
            res_img_path = osp.join(res_subdir, img_name)
            # render predicted smpl to image
            img = eval_utils.pad_and_resize(cv2.imread(img_path))
            render_img = render_util.render_smpl_to_image(
                img.copy(), result['vis_verts'], result['cam'], self.renderer)
            render_img = np.concatenate((img, render_img), axis=1)
            blank_img = np.ones((224, 224, 3), dtype=np.uint8)
            render_img = np.concatenate((render_img, blank_img), axis=1)
            # render predicted smpl in multi-view
            smpl = copy.deepcopy(smpl_origin)
            render_pred_smpl = render_util.render_image(
                smpl, result['smpl_pose'], result['smpl_shape'])
            # save image
            res_img = np.concatenate((render_img, render_pred_smpl), axis=0)
            cv2.imwrite(res_img_path, res_img)

            if i % 10 == 0:
                print("{} Processed:{}/{}".format(
                    os.getpid(), i, len(self.pred_results[start:end])))
コード例 #6
0
def make_prdicted_mesh_neutral(predicted_params_path, flame_model_path):
    params = np.load(predicted_params_path,
                     allow_pickle=True,
                     encoding='latin1')
    #print(params)
    params = params[()]
    pose = np.zeros(15)
    #expression = np.zeros(100)
    shape = np.hstack(
        (params['shape'], np.zeros(300 - params['shape'].shape[0])))
    #pose = np.hstack((params['pose'], np.zeros(15-params['pose'].shape[0])))
    expression = np.hstack(
        (params['expression'], np.zeros(100 - params['expression'].shape[0])))
    flame_genral_model = load_model(flame_model_path)
    generated_neutral_mesh = verts_decorated(
        #ch.array([0.0,0.0,0.0]),
        ch.array(params['cam']),
        ch.array(pose),
        ch.array(flame_genral_model.r),
        flame_genral_model.J_regressor,
        ch.array(flame_genral_model.weights),
        flame_genral_model.kintree_table,
        flame_genral_model.bs_style,
        flame_genral_model.f,
        bs_type=flame_genral_model.bs_type,
        posedirs=ch.array(flame_genral_model.posedirs),
        betas=ch.array(
            np.hstack((shape, expression))
        ),  #betas=ch.array(np.concatenate((theta[0,75:85], np.zeros(390)))), #
        shapedirs=ch.array(flame_genral_model.shapedirs),
        want_Jtr=True)
    # neutral_mesh = Mesh(v=generated_neutral_mesh.r, f=generated_neutral_mesh.f)
    neutral_mesh = trimesh.Trimesh(vertices=generated_neutral_mesh.r,
                                   faces=generated_neutral_mesh.f)
    return neutral_mesh
コード例 #7
0
def main():
    args = parse_args()

    #dataset_size = 1000
    #dataloader = load_data(args.dataset_size, args)
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    num_output = 82
    model = myresnet50(device,
                       num_output=num_output,
                       use_pretrained=True,
                       num_views=args.num_views)
    gender = 'male'
    m = load_model('../../models/basicModel_%s_lbs_10_207_0_v1.0.0.pkl' %
                   gender[0])
    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_3/new_vertexes"
    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_4/scaled"
    parent_dic = "/home/yifu/workspace/Data/MPI-FAUST/training/registrations_obj/male/test_model_2"
    #path = "./trained_resnet.pth"
    save_name = 'trained_resnet_%d_%d.pth' % (num_output, 100000)
    path = os.path.join(parent_dic, save_name)
    model.load_state_dict(torch.load(path))

    #parent_dic = "/home/yifu/workspace/data_smpl/A_pose_3"
    #path = os.path.join(parent_dic, 'test')
    '''
    parent_dic = "/home/yifu/workspace/data_smpl/real_human"
    path = os.path.join(parent_dic, 'scaled')
    '''
    path = parent_dic

    evaluate_model(m, model, args.num_views, path, device, args)
コード例 #8
0
    def __init__(self, model_path):
        # Load SMPL model (here we load the female model)
        self.body = load_model(model_path)

        self.num_cam = 3
        self.num_theta = 72
        self.num_beta = 10
コード例 #9
0
def output_FLAME_meshes(flame_model_fname, params_fname, out_path):
    '''
    Reconstruct meshes given a sequence of FLAME paramters
    :param flame_model_fname:   path of the FLAME model
    :param params_fname         path of the FLAME paramters file
    :param out_path:            output path of the FLAME meshes
    '''

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    model = load_model(flame_model_fname)
    params = np.load(params_fname, allow_pickle=True).item()

    shape = params['shape']
    pose = params['pose']
    exp = params['expression']

    model.betas[:300] = shape

    num_frames = pose.shape[0]
    for frame_idx in range(num_frames):
        model.pose[:] = pose[frame_idx, :]
        model.betas[300:] = exp[frame_idx, :]
        out_fname = os.path.join(out_path, '%05d_FLAME.obj' % frame_idx)
        Mesh(model.r, model.f).write_obj(out_fname)
コード例 #10
0
ファイル: hello_world.py プロジェクト: zZay/flame-fitting
def generate_pose_data():
    # Load FLAME model (here we load the female model)
    # Make sure path is correct
    model_path = './models/female_model.pkl'
    model = load_model(
        model_path
    )  # the loaded model object is a 'chumpy' object, check https://github.com/mattloper/chumpy for details
    print "loaded model from:", model_path

    # Assign random pose and shape parameters
    model.pose[:] = np.random.randn(model.pose.size) * 0.0
    model.betas[:] = np.random.randn(model.betas.size) * 1.0
    # model.trans[:] = np.random.randn( model.trans.size ) * 0.01   # you may also manipulate the translation of mesh

    outmesh_dir = './output'
    safe_mkdir(outmesh_dir)

    # Save zero pose
    outmesh_path = join(outmesh_dir, 'pose_0.obj')
    write_simple_obj(mesh_v=model.r, mesh_f=model.f, filepath=outmesh_path)

    # Write to an .obj file
    model.pose[3:6] = np.random.randn(3) * 0.3
    outmesh_path = join(outmesh_dir, 'pose_t.obj')
    write_simple_obj(mesh_v=model.r, mesh_f=model.f, filepath=outmesh_path)
    np.savetxt('./output/pose_t.txt', model.pose.r)

    # Print message
    print 'output mesh saved to: ', outmesh_path
コード例 #11
0
ファイル: evaluator.py プロジェクト: liuguoyou/DCT_ICCV-2019
    def visualize_result(self, model_dir, res_dir):
        assert sys.version_info[0] == 2, "This code could only run in Python 2"
        from models.renderer import SMPLRenderer
        from smpl_webuser.serialization import load_model

        smpl_model_path = osp.join(model_dir, 'smpl_cocoplus_neutral.pkl')
        self.renderer = SMPLRenderer(img_size=224,
                                     face_path=osp.join(
                                         model_dir, 'smpl_faces.npy'))
        # build result subdirs first
        self.build_dirs()
        # start processing
        num_process = min(8, len(self.pred_results))
        num_each = len(self.pred_results) // num_process
        process_list = list()
        for i in range(num_process):
            start = i * num_each
            end = (i + 1) * num_each if i < num_process - 1 else len(
                self.pred_results)
            smpl = load_model(smpl_model_path)
            p = mp.Process(target=self.visualize_result_single,
                           args=(start, end, smpl))
            p.start()
            process_list.append(p)
        for p in process_list:
            p.join()
コード例 #12
0
def main(name):
    ## Load SMPL model (here we load the female model)
    ## Make sure path is correct
    m = load_model(
        '/Users/akase/smpl/smpl_webuser/hello_world/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
    )

    ### Assign random pose and shape parameters
    #m.pose[:] = np.random.rand(m.pose.size) * .2
    #m.betas[:] = np.random.rand(m.betas.size) * .03

    #自分で作った
    #    theta = np.loadtxt('/Users/akase/smpl/smpl_webuser/hello_world/theta/theta.csv',delimiter=',')
    theta = np.loadtxt('exp/theta/theta' + name + '.csv', delimiter=',')
    m.pose[:] = theta[3:75]
    m.betas[:] = theta[75:]

    ## Write to an .obj file
    outmesh_path = 'exp/objdata/' + name + '.obj'
    with open(outmesh_path, 'w') as fp:
        for v in m.r:
            fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))

        for f in m.f + 1:  # Faces are 1-based, not 0-based in obj files
            fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))

    ## Print message
    print '..Output mesh saved to: ', outmesh_path
コード例 #13
0
ファイル: io.py プロジェクト: xcyan/smalr_online
def load_animal_model(model_name='my_smpl_15.pkl'):
    from smpl_webuser.serialization import load_model
    from os.path import exists, join

    model_dir = '../../'
    model_path = join(model_dir, 'smpl_models', model_name)
    model = load_model(model_path)
    return model
コード例 #14
0
def load_smal_model(model_name='my_smpl_00781_4_all.pkl'):
    model_path = os.path.join(model_dir, model_name)

    model = load_model(model_path)
    v = align_smal_template_to_symmetry_axis(model.r.copy())

   
    return v, model.f
コード例 #15
0
def alter_sequence_shape(source_path,
                         out_path,
                         flame_model_fname,
                         pc_idx=0,
                         pc_range=(0, 3),
                         uv_template_fname='',
                         texture_img_fname=''):
    '''
    Load existing animation sequence in "zero pose" and change the identity dependent shape over time.
    :param source_path:         path of the animation sequence (files must be provided in OBJ file format)
    :param out_path:            output path of the altered sequence
    :param flame_model_fname:   path of the FLAME model
    :param pc_idx               Identity shape parameter to be varied in [0,300) as FLAME provides 300 shape paramters
    :param pc_range             Tuple (start/end, max/min) defining the range of the shape variation.
                                i.e. (0,3) varies the shape from 0 to 3 stdev and back to 0
    '''

    if pc_idx < 0 or pc_idx >= 300:
        print('shape parameter index out of range [0,300)')
        return

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # Load sequence files
    sequence_fnames = sorted(glob.glob(os.path.join(source_path, '*.obj')))
    num_frames = len(sequence_fnames)
    if num_frames == 0:
        print('No sequence meshes found')
        return

    # Load FLAME head model
    model = load_model(flame_model_fname)
    model_parms = np.zeros((num_frames, 300))

    # Generate interpolated shape parameters for each frame
    x1, y1 = [0, num_frames / 2], pc_range
    x2, y2 = [num_frames / 2, num_frames], pc_range[::-1]

    xsteps1 = np.arange(0, num_frames / 2)
    xsteps2 = np.arange(num_frames / 2, num_frames)

    model_parms[:, pc_idx] = np.hstack(
        (np.interp(xsteps1, x1, y1), np.interp(xsteps2, x2, y2)))

    predicted_vertices = np.zeros(
        (num_frames, model.v_template.shape[0], model.v_template.shape[1]))

    for frame_idx in range(num_frames):
        model.v_template[:] = Mesh(filename=sequence_fnames[frame_idx]).v
        model.betas[:300] = model_parms[frame_idx]
        predicted_vertices[frame_idx] = model.r

    output_sequence_meshes(predicted_vertices,
                           Mesh(model.v_template, model.f),
                           out_path,
                           uv_template_fname=uv_template_fname,
                           texture_img_fname=texture_img_fname)
コード例 #16
0
def compute_FLAME_params(source_path, params_out_fname, flame_model_fname,
                         template_fname):
    '''
    Load a template and an existing animation sequence in "zero pose" and compute the FLAME shape, jaw pose, and expression paramters. 
    Outputs one set of shape paramters for the entire sequence, and pose and expression parameters for each frame.
    :param source_path:         path of the animation sequence (files must be provided in OBJ file format)
    :param params_out_fname     output path of the FLAME paramters file
    :param flame_model_fname:   path of the FLAME model
    :param template_fname       "zero pose" template used to generate the sequence
    '''

    if not os.path.exists(os.path.dirname(params_out_fname)):
        os.makedirs(os.path.dirname(params_out_fname))

    # Load sequence files
    sequence_fnames = sorted(glob.glob(os.path.join(source_path, '*.obj')))
    num_frames = len(sequence_fnames)
    if num_frames == 0:
        print('No sequence meshes found')
        return

    model = load_model(flame_model_fname)

    print('Optimize for template identity parameters')
    template_mesh = Mesh(filename=template_fname)
    ch.minimize(template_mesh.v - model,
                x0=[model.betas[:300]],
                options={
                    'sparse_solver': lambda A, x: cg(A, x, maxiter=2000)[0]
                })

    betas = model.betas.r[:300].copy()
    model.betas[:] = 0.

    model.v_template[:] = template_mesh.v

    model_pose = np.zeros((num_frames, model.pose.shape[0]))
    model_exp = np.zeros((num_frames, 100))

    for frame_idx in range(num_frames):
        print('Process frame %d/%d' % (frame_idx + 1, num_frames))
        model.betas[:] = 0.
        model.pose[:] = 0.
        frame_vertices = Mesh(filename=sequence_fnames[frame_idx]).v
        # Optimize for jaw pose and facial expression
        ch.minimize(frame_vertices - model,
                    x0=[model.pose[6:9], model.betas[300:]],
                    options={
                        'sparse_solver': lambda A, x: cg(A, x, maxiter=2000)[0]
                    })
        model_pose[frame_idx] = model.pose.r.copy()
        model_exp[frame_idx] = model.betas.r[300:].copy()

    np.save(params_out_fname, {
        'shape': betas,
        'pose': model_pose,
        'expression': model_exp
    })
コード例 #17
0
def run_fitting_demo():

    # input landmarks
    lmk_path = './data/landmark_3d.pkl'
    lmk_3d = load_binary_pickle(lmk_path)
    print "loaded 3d landmark from:", lmk_path

    # model
    model_path = './models/male_model.pkl'  # change to 'female_model.pkl' or 'generic_model.pkl', if needed
    model = load_model(
        model_path
    )  # the loaded model object is a 'chumpy' object, check https://github.com/mattloper/chumpy for details
    print "loaded model from:", model_path

    # landmark embedding
    lmk_emb_path = './data/lmk_embedding_intraface_to_flame.pkl'
    lmk_face_idx, lmk_b_coords = load_embedding(lmk_emb_path)
    print "loaded lmk embedding"

    # output
    output_dir = './output'
    safe_mkdir(output_dir)

    # weights
    weights = {}
    weights['lmk'] = 1.0
    weights['shape'] = 0.001
    weights['expr'] = 0.001
    weights['pose'] = 0.1

    # optimization options
    import scipy.sparse as sp
    opt_options = {}
    opt_options['disp'] = 1
    opt_options['delta_0'] = 0.1
    opt_options['e_3'] = 1e-4
    opt_options['maxiter'] = 100
    sparse_solver = lambda A, x: sp.linalg.cg(
        A, x, maxiter=opt_options['maxiter'])[0]
    opt_options['sparse_solver'] = sparse_solver

    # run fitting
    mesh_v, mesh_f, parms = fit_lmk3d(
        lmk_3d=lmk_3d,  # input landmark 3d
        model=model,  # model
        lmk_face_idx=lmk_face_idx,
        lmk_b_coords=lmk_b_coords,  # landmark embedding
        weights=weights,  # weights for the objectives
        shape_num=300,
        expr_num=100,
        opt_options=opt_options)  # options

    # write result
    output_path = join(output_dir, 'fit_lmk3d_result.obj')
    write_simple_obj(mesh_v=mesh_v,
                     mesh_f=mesh_f,
                     filepath=output_path,
                     verbose=False)
コード例 #18
0
def alter_sequence_head_pose(source_path,
                             out_path,
                             flame_model_fname,
                             pose_idx=3,
                             rot_angle=np.pi / 6):
    '''
    Load existing animation sequence in "zero pose" and change the head pose (i.e. rotation around the neck) over time.
    :param source_path:         path of the animation sequence (files must be provided in OBJ file format)
    :param out_path:            output path of the altered sequence
    :param flame_model_fname:   path of the FLAME model
    :param pose_idx:            head pose parameter to be varied in [3,6)
    :param rot_angle:           maximum rotation angle in [0,2pi)
    '''

    if pose_idx < 3 or pose_idx >= 6:
        print('pose parameter index out of range [3,6)')
        return

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # Load sequence files
    sequence_fnames = sorted(glob.glob(os.path.join(source_path, '*.obj')))
    num_frames = len(sequence_fnames)
    if num_frames == 0:
        print('No sequence meshes found')
        return

    # Load FLAME head model
    model = load_model(flame_model_fname)
    model_parms = np.zeros((num_frames, model.pose.shape[0]))

    # Generate interpolated pose parameters for each frame
    x1, y1 = [0, num_frames // 4], [0, rot_angle]
    x2, y2 = [num_frames // 4, num_frames // 2], [rot_angle, 0]
    x3, y3 = [num_frames // 2, 3 * num_frames // 4], [0, -rot_angle]
    x4, y4 = [3 * num_frames // 4, num_frames], [-rot_angle, 0]

    xsteps1 = np.arange(0, num_frames // 4)
    xsteps2 = np.arange(num_frames // 4, num_frames / 2)
    xsteps3 = np.arange(num_frames // 2, 3 * num_frames // 4)
    xsteps4 = np.arange(3 * num_frames // 4, num_frames)

    model_parms[:, pose_idx] = np.hstack(
        (np.interp(xsteps1, x1, y1), np.interp(xsteps2, x2, y2),
         np.interp(xsteps3, x3, y3), np.interp(xsteps4, x4, y4)))

    predicted_vertices = np.zeros(
        (num_frames, model.v_template.shape[0], model.v_template.shape[1]))

    for frame_idx in range(num_frames):
        model.v_template[:] = Mesh(filename=sequence_fnames[frame_idx]).v
        model.pose[:] = model_parms[frame_idx]
        predicted_vertices[frame_idx] = model.r

    output_sequence_meshes(predicted_vertices, Mesh(model.v_template, model.f),
                           out_path)
コード例 #19
0
def main():
    """Set up paths to image and joint data, saves results.
    :param base_dir: folder containing LSP images and data
    :param out_dir: output folder
    :param use_interpenetration: boolean, if True enables the interpenetration term
    :param n_betas: number of shape coefficients considered during optimization
    :param flength: camera focal length (an estimate)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param use_neutral: boolean, if True enables uses the neutral gender SMPL model
    :param viz: boolean, if True enables visualization during optimization
    """

    # Render degrees: List of degrees in azimuth to render the final fit.
    # Note that rendering many views can take a while.
    import glob
    import os

    seqPath = '/home/xiul/databag/dbfusion/record0'
    allImgs = glob.glob(os.path.join(seqPath, 'xiu/*.png'))
    allImgs.sort()
    allPoses = np.loadtxt(
        os.path.join(seqPath, 'pose_parameters_per_frame.txt'))
    allBetas = np.loadtxt(os.path.join(seqPath, 'shape_parameters.txt'))
    camK = np.loadtxt(os.path.join(seqPath, 'cam_params.txt'), delimiter=',')

    allTrans = allPoses[:, 3:6]
    allPose = np.hstack((allPoses[:, 0:3], allPoses[:, 6:]))
    model = load_model(MODEL_MALE_PATH)

    useIds = range(len(allImgs))

    allJson = glob.glob('/home/xiul/databag/dbfusion/record0/openpose/*.json')
    allJson.sort()

    for idx in useIds[10::10]:
        tarIUV = cv2.imread(allImgs[idx])
        tarIUV = cv2.resize(tarIUV, (1280, 720))
        tarIUV = tarIUV[:, :, ::-1]
        tarIUV[tarIUV[:, :, 0] == 0, :] = 255
        tarIUV = tarIUV.astype(np.float)
        tarIUV = tarIUV / 255.0

        cJ2d, wJ2d = load_pose(allJson[idx])

        run_single_fit(tarIUV,
                       cJ2d,
                       wJ2d,
                       allPose[idx, 0:3],
                       allTrans[idx, :],
                       allPose[idx, 3:],
                       model,
                       camK,
                       viz=True,
                       ids=idx)
コード例 #20
0
def get_height(pose, betas):
    m = load_model('../../models/basicModel_m_lbs_10_207_0_v1.0.0.pkl')
    #height = (6677,411)
    m.pose[:] = pose
    m.betas[:] = betas[:]
    # rint(m.v_shaped[411][1])
    # print(m.v_shaped[6677][1])
    # print('height:')
    height = m.v_shaped[411][1] - m.v_shaped[6677][1]
    return height
コード例 #21
0
def run_fitting():
    # input landmarks
    lmk_path = './data/scan_lmks.npy'
    # measurement unit of landmarks ['m', 'cm', 'mm']
    unit = 'mm' 

    scale_factor = get_unit_factor('m') / get_unit_factor(unit)
    lmk_3d = scale_factor*np.load(lmk_path)
    print("loaded 3d landmark from:", lmk_path)

    # model
    model_path = './models/generic_model.pkl' # change to 'female_model.pkl' or 'male_model.pkl', if gender is known
    model = load_model(model_path)       # the loaded model object is a 'chumpy' object, check https://github.com/mattloper/chumpy for details
    print("loaded model from:", model_path)

    # landmark embedding
    lmk_emb_path = './models/flame_static_embedding.pkl' 
    lmk_face_idx, lmk_b_coords = load_embedding(lmk_emb_path)
    print("loaded lmk embedding")

    # output
    output_dir = './output'
    safe_mkdir(output_dir)

    # weights
    weights = {}
    # landmark term
    weights['lmk']   = 1.0   
    # shape regularizer (weight higher to regularize face shape more towards the mean)
    weights['shape'] = 0.001
    # expression regularizer (weight higher to regularize facial expression more towards the mean)
    weights['expr']  = 0.001
    # regularization of head rotation around the neck and jaw opening (weight higher for more regularization)
    weights['pose']  = 0.1
    
    # optimization options
    import scipy.sparse as sp
    opt_options = {}
    opt_options['disp']    = 1
    opt_options['delta_0'] = 0.1
    opt_options['e_3']     = 1e-4
    opt_options['maxiter'] = 100
    sparse_solver = lambda A, x: sp.linalg.cg(A, x, maxiter=opt_options['maxiter'])[0]
    opt_options['sparse_solver'] = sparse_solver

    # run fitting
    mesh_v, mesh_f, parms = fit_lmk3d( lmk_3d=lmk_3d,                                         # input landmark 3d
                                       model=model,                                           # model
                                       lmk_face_idx=lmk_face_idx, lmk_b_coords=lmk_b_coords,  # landmark embedding
                                       weights=weights,                                       # weights for the objectives
                                       shape_num=300, expr_num=100, opt_options=opt_options ) # options

    # write result
    output_path = join( output_dir, 'fit_lmk3d_result.obj' )
    write_simple_obj( mesh_v=mesh_v, mesh_f=mesh_f, filepath=output_path, verbose=False )
コード例 #22
0
    def __init__(self, obj_path, model_path, w=224, h=224):
        self.m = get_body_mesh(obj_path, trans=ch.array([0, 0, 4]), rotation=ch.array([np.pi / 2, 0, 0]))
        # Load SMPL model (here we load the female model)
        self.body = load_model(model_path)
        self.w = w
        self.h = h
        self.img_size = min(self.w, self.h)

        self.num_cam = 3
        self.num_theta = 72
        self.num_beta = 10
コード例 #23
0
def get_horse_template(model_name='my_smpl_00781_4_all.pkl', data_name='my_smpl_data_00781_4_all.pkl'):

    model_path = os.path.join(model_dir, model_name)
    model = load_model(model_path)
    nBetas = len(model.betas.r)
    data_path = os.path.join(model_dir, 'my_smpl_data_00781_4_all.pkl')
    data = pkl.load(open(data_path))
    # Select average zebra/horse
    betas = data['cluster_means'][2][:nBetas]
    model.betas[:] = betas
    v = model.r.copy()
    return v
コード例 #24
0
def load_animal_model(model_name='my_smpl_15.pkl'):
    from smpl_webuser.serialization import load_model
    from os.path import exists, join

    model_dir = '/scratch1/projects/MPI_data/' if exists(
        '/scratch1/projects') else '/is/ps/shared/silvia/'
    model_dir = '/Users/silvia/Dropbox/animal_proj_silvia/'
    model_dir = '../../'
    #model_path = join(model_dir, 'smpl_animal_models', model_name)
    model_path = join(model_dir, 'smpl_models', model_name)
    model = load_model(model_path)
    return model
コード例 #25
0
def generate_exp_sequence(output_dir):
    # Load FLAME model (here we load the female model)
    # Make sure path is correct
    model_path = './models/generic_model.pkl'
    model = load_model(
        model_path
    )  # the loaded model object is a 'chumpy' object, check https://github.com/mattloper/chumpy for details
    print "loaded model from:", model_path

    safe_mkdir(output_dir)
    safe_mkdir(output_dir + '/gtcoeff')
    safe_mkdir(output_dir + '/gtcoeff/pose_coeff')
    safe_mkdir(output_dir + '/gtcoeff/exp_coeff')

    # Assign random pose and shape parameters
    model.pose[:] = np.random.randn(model.pose.size) * 0.0
    model.betas[:] = np.random.randn(model.betas.size) * 0.0

    save_model_joints_info(model, output_dir)
    save_model_pose_bs(model, output_dir + "/pose_bs.txt")

    model.betas[0:300] = np.random.randn(300) * 0.5
    save_model_joints_info(model, output_dir + "/gtcoeff")

    # model.trans[:] = np.random.randn( model.trans.size ) * 0.01   # you may also manipulate the translation of mesh

    # Save zero pose
    outmesh_path = join(output_dir, '0000.obj')
    write_simple_obj(mesh_v=model.r, mesh_f=model.f, filepath=outmesh_path)
    np.savetxt(output_dir + '/gtcoeff/beta.txt', model.betas.r, fmt='%.8f')
    save_model_pose_info(model, output_dir + "/gtcoeff/pose_coeff/0000.txt")
    save_model_exp_info(model, output_dir + '/gtcoeff/exp_coeff/0000.txt')

    # Write to an .obj file
    for idx in range(1, 10):
        model.pose[0:5] = np.random.randn(5) * 0.01
        model.pose[6] = abs(np.random.randn(1)) * 0.3
        model.pose[7:9] = np.random.randn(2) * 0.01
        model.trans[:] = np.random.randn(model.trans.size) * 0.01
        model.betas[300:] = np.random.randn(100) * 1
        outmesh_path = join(output_dir, '{:04d}.obj'.format(idx))
        write_simple_obj(mesh_v=model.r, mesh_f=model.f, filepath=outmesh_path)
        save_model_pose_info(
            model, output_dir + "/gtcoeff/pose_coeff/{:04d}.txt".format(idx))
        save_model_exp_info(
            model, output_dir + "/gtcoeff/exp_coeff/{:04d}.txt".format(idx))

        # Print message
        print 'output mesh saved to: ', outmesh_path
コード例 #26
0
def main():
    """Set up paths to image and joint data, saves results.
    :param base_dir: folder containing LSP images and data
    :param out_dir: output folder
    :param use_interpenetration: boolean, if True enables the interpenetration term
    :param n_betas: number of shape coefficients considered during optimization
    :param flength: camera focal length (an estimate)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param use_neutral: boolean, if True enables uses the neutral gender SMPL model
    :param viz: boolean, if True enables visualization during optimization
    """

    # Render degrees: List of degrees in azimuth to render the final fit.
    # Note that rendering many views can take a while.
    import glob
    import os

    allImgs = glob.glob(os.path.join(ROOT_PATH, 'images/*'))
    allImgs.sort()

    model = load_model(MODEL_MALE_PATH)

    all_ids = range(len(allImgs))

    #only the last
    for idx in all_ids[::1]:
        cImg = allImgs[idx]
        cImgBase = os.path.basename(cImg)
        cImgName = os.path.splitext(cImgBase)[0]

        img_raw, img, uv, j2d, j2d_w, pose, trans, betas, cam, cam_old = prepare_data(
            ROOT_PATH, cImg)

        run_single_fit(img_raw,
                       img,
                       uv,
                       j2d,
                       j2d_w,
                       trans,
                       pose,
                       betas,
                       model,
                       cam,
                       cam_old,
                       viz=True,
                       imgname=cImgName)
コード例 #27
0
def main():
    # <===== PARSE ARGUMENTS
    import argparse
    parser = argparse.ArgumentParser(description='Fit SMPL body to mesh predictions.')
    parser.add_argument('--dataname', type=str, help='name of the data')

    args = parser.parse_args()
    dataname = args.dataname

    print('------- Option -------')
    print('\tdataname: %s' % dataname)
    # ======>

    # <========= LOAD SMPL MODEL
    m = load_model(join(SMPL_PATH, 'models/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'))
    # Init upright t-pose
    initial_model = m.copy()
    initial_model.pose[0:3] = np.array((np.pi, 0, 0))
コード例 #28
0
ファイル: hello_world.py プロジェクト: zZay/flame-fitting
def hello_world():
    # Load FLAME model (here we load the female model)
    # Make sure path is correct
    model_path = './models/female_model.pkl'
    model = load_model(
        model_path
    )  # the loaded model object is a 'chumpy' object, check https://github.com/mattloper/chumpy for details
    print "loaded model from:", model_path

    # Show component number
    print "\nFLAME coefficients:"
    print "shape (identity) coefficient shape =", model.betas[
        0:300].shape  # valid shape component range in "betas": 0-299
    print "expression coefficient shape       =", model.betas[
        300:].shape  # valid expression component range in "betas": 300-399
    print "pose coefficient shape             =", model.pose.shape

    print "\nFLAME model components:"
    print "shape (identity) component shape =", model.shapedirs[:, :,
                                                                0:300].shape
    print "expression component shape       =", model.shapedirs[:, :,
                                                                300:].shape
    print "pose corrective blendshape shape =", model.posedirs.shape
    print ""

    # -----------------------------------------------------------------------------

    # Assign random pose and shape parameters
    model.pose[:] = np.random.randn(model.pose.size) * 0.05
    model.pose[3:6] = np.random.randn(3) * 0.5
    model.betas[:] = np.random.randn(model.betas.size) * 1.0
    # model.trans[:] = np.random.randn( model.trans.size ) * 0.01   # you may also manipulate the translation of mesh

    # Write to an .obj file
    outmesh_dir = './output'
    safe_mkdir(outmesh_dir)
    outmesh_path = join(outmesh_dir, 'hello_flame.obj')
    write_simple_obj(mesh_v=model.r, mesh_f=model.f, filepath=outmesh_path)

    # Print message
    print 'output mesh saved to: ', outmesh_path
コード例 #29
0
def main():
    """Set up paths to image and joint data, saves results.
    data is stored as 
    root_path
    --images
    --hmr
    --densepose
    --openpose 
    --smplify: for result store
    """

    import glob
    import os

    allImgs = glob.glob(os.path.join(ROOT_PATH, 'images/*'))
    allImgs.sort()

    model = load_model(MODEL_MALE_PATH)

    all_ids = range(len(allImgs))

    for idx in all_ids[::5]:
        cImg = allImgs[idx]

        img_old, img, uv, j2d, j2d_w, pose, trans, betas, cam, cam_old = smplify_prepare_data(
            root_path, cImg)
        run_single_fit(img_old,
                       img,
                       uv,
                       j2d,
                       j2d_w,
                       trans,
                       pose,
                       betas,
                       model,
                       cam,
                       cam_old,
                       viz=True,
                       ids=idx)
コード例 #30
0
    def __init__(self, parent=None):
        super(domeGLWidget, self).__init__(parent)

        self.xRot = 0
        self.yRot = 0
        self.zRot = 0
        self.z_trans = 300
        self.x_trans = 0
        self.y_trans = 0
        self.z_near = 0.01
        self.z_far = 5000.
        self.frame_id = 0
        self.last_frameid = 0
        self.g_ambientLight = (0.35, 0.35, 0.35, 1.0)
        self.g_diffuseLight = (0.75, 0.75, 0.75, 0.7)
        self.g_specular = (1.0, 1.0, 1.0, 1.0)
        self.lastPos = QPoint()
        self.hdCams = []
        self.smpl_Lists = []
        self.ske_Lists = []

        self.smpl = load_model(
            '/home/xiul/workspace/SMPL_python/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
        )
        self.smpl_facenum = 0
        self.smpl_params = []
        self.vts_buffer = []
        self.vns_buffer = []
        self.inds_buffer = []
        self.face_num = []
        self.skeletons = []

        self.render_lock = QMutex()
        self.meshlib = meshWrapper(
            lib_file=
            '/home/xiul/workspace/PanopticDome/build/libPythonWrapper.so'
        )  # change this to your built shared library
        self.meshlib.load_totalmodel()
コード例 #31
0
ファイル: show_humaneva.py プロジェクト: mehameha998/simplify
    seq = args.seq + '_1_C1'

    data_dir = join(base_dir, 'results/human_eva', seq)

    results_path = join(data_dir, 'all_results.pkl')
    joints_path = join(data_dir, 'est_joints.npz')

    if 'S1' in seq:
        model_path = join(model_dir, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    else:
        model_path = join(model_dir, 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl')

    # Load everything:
    # SMPL model
    model = load_model(model_path)
    # detected joints
    est = np.load(joints_path)['est_joints']

    # SMPL parameters + camera
    print('opening %s' % results_path)
    with open(results_path, 'r') as f:
        res = pickle.load(f)
    poses = res['poses']
    betas = res['betas']

    # Camera rotation is always at identity,
    # The rotation of the body is encoded by the first 3 bits of poses.
    cam_ts = res['cam_ts']
    focal_length = res['focal_length']
    principal_pt = res['principal_pt']
コード例 #32
0
ファイル: fit_3d.py プロジェクト: mehameha998/simplify
def main(base_dir,
         out_dir,
         use_interpenetration=True,
         n_betas=10,
         flength=5000.,
         pix_thsh=25.,
         use_neutral=False,
         viz=True):
    """Set up paths to image and joint data, saves results.
    :param base_dir: folder containing LSP images and data
    :param out_dir: output folder
    :param use_interpenetration: boolean, if True enables the interpenetration term
    :param n_betas: number of shape coefficients considered during optimization
    :param flength: camera focal length (an estimate)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param use_neutral: boolean, if True enables uses the neutral gender SMPL model
    :param viz: boolean, if True enables visualization during optimization
    """

    img_dir = join(abspath(base_dir), 'images/')
    print img_dir
    data_dir = join(abspath(base_dir), 'results/lsp')

    if not exists(out_dir):
        makedirs(out_dir)
        _LOGGER.info("make out dir")

    # Render degrees: List of degrees in azimuth to render the final fit.
    # Note that rendering many views can take a while.
    do_degrees = [0.]

    sph_regs = None
    if not use_neutral:
        _LOGGER.info("Reading genders...")
        # File storing information about gender in LSP
        with open(join(data_dir, 'lsp_gender.csv')) as f:
            genders = f.readlines()
        model_female = load_model(MODEL_FEMALE_PATH)
        model_male = load_model(MODEL_MALE_PATH)
        if use_interpenetration:
            sph_regs_male = np.load(SPH_REGS_MALE_PATH)
            sph_regs_female = np.load(SPH_REGS_FEMALE_PATH)
    else:
        gender = 'neutral'
        model = load_model(MODEL_NEUTRAL_PATH)
        if use_interpenetration:
            sph_regs = np.load(SPH_REGS_NEUTRAL_PATH)

    # Load joints
    est = np.load(join(data_dir, 'est_joints.npz'))['est_joints']

    # Load images
    img_paths = sorted(glob(join(img_dir, '*[0-9].jpg')))
    for ind, img_path in enumerate(img_paths):
        out_path = '%s/%04d.pkl' % (out_dir, ind)
        if not exists(out_path):
            _LOGGER.info('Fitting 3D body on `%s` (saving to `%s`).', img_path,
                         out_path)
            img = cv2.imread(img_path)
            if img.ndim == 2:
                _LOGGER.warn("The image is grayscale!")
                img = np.dstack((img, img, img))

            joints = est[:2, :, ind].T
            conf = est[2, :, ind]

            if not use_neutral:
                gender = 'male' if int(genders[ind]) == 0 else 'female'
                if gender == 'female':
                    model = model_female
                    if use_interpenetration:
                        sph_regs = sph_regs_female
                elif gender == 'male':
                    model = model_male
                    if use_interpenetration:
                        sph_regs = sph_regs_male

            params, vis = run_single_fit(
                img,
                joints,
                conf,
                model,
                regs=sph_regs,
                n_betas=n_betas,
                flength=flength,
                pix_thsh=pix_thsh,
                scale_factor=2,
                viz=viz,
                do_degrees=do_degrees)
            if viz:
                import matplotlib.pyplot as plt
                plt.ion()
                plt.show()
                plt.subplot(121)
                plt.imshow(img[:, :, ::-1])
                if do_degrees is not None:
                    for di, deg in enumerate(do_degrees):
                        plt.subplot(122)
                        plt.cla()
                        plt.imshow(vis[di])
                        plt.draw()
                        plt.title('%d deg' % deg)
                        plt.pause(1)
                raw_input('Press any key to continue...')

            with open(out_path, 'w') as outf:
                pickle.dump(params, outf)

            # This only saves the first rendering.
            if do_degrees is not None:
                cv2.imwrite(out_path.replace('.pkl', '.png'), vis[0])
コード例 #33
0
=============================
Inside Terminal, navigate to the smpl/webuser/hello_world directory. You can run 
the hello world script now by typing the following:
>	python render_smpl.py


'''

import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from smpl_webuser.serialization import load_model

## Load SMPL model (here we load the female model)
m = load_model('../../models/basicModel_f_lbs_10_207_0_v1.0.0.pkl')

## Assign random pose and shape parameters
m.pose[:] = np.random.rand(m.pose.size) * .2
m.betas[:] = np.random.rand(m.betas.size) * .03
m.pose[0] = np.pi

## Create OpenDR renderer
rn = ColoredRenderer()

## Assign attributes to renderer
w, h = (640, 480)

rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w,w])/2., c=np.array([w,h])/2., k=np.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m, f=m.f, bgcolor=np.zeros(3))