Exemplo n.º 1
0
def generate_trimesh(npoints, device='cpu'):
    verts = torch.ones([npoints,3], dtype=torch.float32, requires_grad=True)
    verts = verts.cumsum(dim=0) / npoints
    faces = torch.arange(npoints, dtype=torch.int64).repeat(3).view(npoints,3)
    m = M.from_tensors(verts, faces)
    m.to(device)
    return m
def ball_pivot_surface_reconstruction(points: torch.Tensor) -> TriangleMesh:
    points = points.detach().cpu() if points.requires_grad else points.cpu()

    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points.cpu())
    pcd.estimate_normals()

    distances = pcd.compute_nearest_neighbor_distance()
    avg_dist = np.mean(distances)
    radius = 1 * avg_dist

    recon_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
        pcd,
        o3d.utility.DoubleVector([radius, radius * 2, radius * 4, radius * 8]))

    vertices = torch.tensor(recon_mesh.vertices, dtype=torch.float)
    faces = torch.tensor(recon_mesh.triangles, dtype=torch.long)

    faces_ex = faces.clone()
    faces_ex[..., 1] = faces[..., 2]
    faces_ex[..., 2] = faces[..., 1]
    faces = torch.cat([faces, faces_ex], 0)

    recon_mesh = TriangleMesh.from_tensors(vertices, faces)

    return recon_mesh
Exemplo n.º 3
0
def forward_step(th_scan_meshes, smpl, init_smpl_meshes):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """

    # forward

    verts, _, _, _ = smpl()
    th_smpl_meshes = [
        tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts
    ]
    # p3d_meshes = Meshes(verts=verts, faces=smpl.faces.expand(1,-1,-1))
    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([
        laplacian_loss(sc, sm)
        for sc, sm in zip(init_smpl_meshes, th_smpl_meshes)
    ])
    # lap1 = init_smpl_meshes[0].compute_laplacian()
    # lap2 = th_smpl_meshes[0].compute_laplacian()
    # loss['lap'] = (torch.sum((lap1 - lap2) ** 2, 1) * lap_weight).sum().unsqueeze(0)
    loss['offsets'] = torch.mean(torch.mean(smpl.offsets**2, axis=1), axis=1)
    # loss['edge'] = (edge_length(th_smpl_meshes[0]) - edge_length(init_smpl_meshes[0])).unsqueeze(0)
    # loss['normal'] = mesh_normal_consistency(p3d_meshes).unsqueeze(0)
    # loss['pen'] = interpenetration_loss(verts, smpl.faces.expand(1,-1,-1), search_tree, pen_distance, tri_filtering_module)
    return loss
Exemplo n.º 4
0
def forward_step(th_scan_meshes, smpl, th_pose_3d=None):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """
    # Get pose prior
    prior = get_prior(smpl.gender)

    # forward
    verts, _, _, _ = smpl()
    th_smpl_meshes = [
        tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts
    ]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
    loss['betas'] = torch.mean(smpl.betas**2, axis=1)
    loss['pose_pr'] = prior(smpl.pose)
    if th_pose_3d is not None:
        loss['pose_obj'] = batch_get_pose_obj(th_pose_3d, smpl)
    return loss
Exemplo n.º 5
0
def forward_step(th_scan_meshes, smplx, init_smplx_meshes, search_tree,
                 pen_distance, tri_filtering_module):
    """
    Performs a forward step, given smplx and scan meshes.
    Then computes the losses.
    """

    # forward
    # verts, _, _, _ = smplx()
    verts = smplx()
    th_SMPLX_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]
    p3d_meshes = Meshes(verts=verts, faces=smplx.faces.expand(1, -1, -1))
    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_SMPLX_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_SMPLX_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([
        laplacian_loss(sc, sm)
        for sc, sm in zip(init_smplx_meshes, th_SMPLX_meshes)
    ])
    loss['offsets'] = torch.mean(torch.mean(smplx.offsets**2, axis=1), axis=1)
    # loss['normal'] = mesh_normal_consistency(p3d_meshes).unsqueeze(0)
    # loss['interpenetration'] = interpenetration_loss(verts, smplx.faces, search_tree, pen_distance, tri_filtering_module, 1.0)
    return loss
Exemplo n.º 6
0
def generate_trimesh(npoints, device='cpu'):
    verts = torch.ones([npoints, 3], dtype=torch.float32)
    verts = verts.cumsum(dim=0) / npoints
    faces = torch.arange(npoints * 3, dtype=torch.int32).view(npoints, 3)
    m = M.from_tensors(verts, faces)
    m.to(device)
    return m
Exemplo n.º 7
0
def split_meshes(meshes, features, index, angle=70):
    # compute faces to split
    faces_to_split = compute_splitting_faces(meshes,
                                             index,
                                             angle,
                                             show=(index == 1))
    # split mesh with selected faces
    new_verts, new_faces, new_face_archive, new_face_list, new_features = split_info(
        meshes, faces_to_split, features, index)
    new_mesh = TriangleMesh.from_tensors(new_verts, new_faces)
    new_mesh_i = TriangleMesh.from_tensors(new_verts, new_faces)
    # compute new adj matrix
    new_adj = new_mesh.compute_adjacency_matrix_full().clone()
    new_adj = normalize_adj(new_adj)
    # update the meshes dictionary
    meshes['init'].append(new_mesh)
    meshes['update'].append(new_mesh_i)
    meshes['adjs'].append(new_adj)
    meshes['face_lists'].append(new_face_list)
    meshes['face_archive'].append(new_face_archive)

    return new_features
Exemplo n.º 8
0
def forward_step(th_scan_meshes,
                 smplx,
                 scan_part_labels,
                 smplx_part_labels,
                 search_tree=None,
                 pen_distance=None,
                 tri_filtering_module=None):
    """
    Performs a forward step, given smplx and scan meshes.
    Then computes the losses.
    """
    # Get pose prior
    prior = get_prior(smplx.gender, precomputed=True)

    # forward
    # verts, _, _, _ = smplx()
    verts = smplx()
    th_smplx_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]

    scan_verts = [sm.vertices for sm in th_scan_meshes]
    smplx_verts = [sm.vertices for sm in th_smplx_meshes]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(scan_verts, th_smplx_meshes)
    loss['m2s'] = batch_point_to_surface(smplx_verts, th_scan_meshes)
    loss['betas'] = torch.mean(smplx.betas**2, axis=1)
    # loss['pose_pr'] = prior(smplx.pose)
    loss['interpenetration'] = interpenetration_loss(verts, smplx.faces,
                                                     search_tree, pen_distance,
                                                     tri_filtering_module, 1.0)
    loss['part'] = []
    for n, (sc_v, sc_l) in enumerate(zip(scan_verts, scan_part_labels)):
        tot = 0
        for i in range(NUM_PARTS):  # we currently use 14 parts
            if i not in sc_l:
                continue
            ind = torch.where(sc_l == i)[0]
            sc_part_points = sc_v[ind].unsqueeze(0)
            sm_part_points = smplx_verts[n][torch.where(
                smplx_part_labels[n] == i)[0]].unsqueeze(0)
            dist = chamfer_distance(sc_part_points,
                                    sm_part_points,
                                    w1=1.,
                                    w2=1.)
            tot += dist
        loss['part'].append(tot / NUM_PARTS)
    loss['part'] = torch.stack(loss['part'])
    return loss
Exemplo n.º 9
0
def split_mesh(mesh):
    faces = mesh.faces.clone()
    tracker = dict()
    vertex_count = mesh.vertices.shape[0]
    constant_vertex_count = vertex_count
    columns = np.zeros((vertex_count, 0))
    new_faces = []

    for face in faces:
        x, y, z = face.int()
        new_verts = []
        edges = [[x, y], [y, z], [z, x]]

        for a, b in edges:

            key = [a, b]
            key.sort()
            key = str(key)
            if key in tracker:
                new_verts.append(tracker[key])
            else:
                new_verts.append(vertex_count)
                column = np.zeros((constant_vertex_count, 1))
                column[a] = .5
                column[b] = .5
                columns = np.concatenate((columns, column), axis=1)
                tracker[key] = vertex_count
                vertex_count += 1

        v1, v2, v3 = new_verts
        new_faces.append([x, v1, v3])
        new_faces.append([v1, y, v2])
        new_faces.append([v2, z, v3])
        new_faces.append([v1, v2, v3])

    split_mx = torch.FloatTensor(columns).to(face.device)

    new_faces = torch.LongTensor(new_faces).to(face.device)

    new_verts = split_features(split_mx, mesh.vertices)
    updated_mesh = TriangleMesh.from_tensors(new_verts,
                                             new_faces,
                                             enable_adjacency=True)

    return updated_mesh, split_mx
Exemplo n.º 10
0
def test_from_tensors(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj',
                                 with_vt=True,
                                 texture_res=4)
    if device == 'cuda':
        mesh.cuda()

    verts = mesh.vertices.clone()
    faces = mesh.faces.clone()
    uvs = mesh.uvs.clone()
    face_textures = mesh.face_textures.clone()
    textures = mesh.textures.clone()

    mesh = TriangleMesh.from_tensors(verts,
                                     faces,
                                     uvs=uvs,
                                     face_textures=face_textures,
                                     textures=textures)
Exemplo n.º 11
0
def forward_step(th_scan_meshes, smpl, init_smpl_meshes):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """

    # forward
    verts, _, _, _ = smpl()
    th_smpl_meshes = [tm.from_tensors(vertices=v,
                                      faces=smpl.faces) for v in verts]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface([sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface([sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([laplacian_loss(sc, sm) for sc, sm in zip(init_smpl_meshes, th_smpl_meshes)])
    loss['offsets'] = torch.mean(torch.mean(smpl.offsets**2, axis=1), axis=1)
    return loss
Exemplo n.º 12
0
def forward_step_SMPL(th_scan_meshes, smpl, scan_part_labels, smpl_part_labels, args):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """
    # Get pose prior
    prior = get_prior(smpl.gender, precomputed=True)

    # forward
    verts, _, _, _ = smpl()
    th_smpl_meshes = [tm.from_tensors(vertices=v,
                                      faces=smpl.faces) for v in verts]

    scan_verts = [sm.vertices for sm in th_scan_meshes]
    smpl_verts = [sm.vertices for sm in th_smpl_meshes]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(scan_verts, th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface(smpl_verts, th_scan_meshes)
    loss['betas'] = torch.mean(smpl.betas ** 2, axis=1)
    loss['pose_pr'] = prior(smpl.pose)

    # if args.num_joints == 14:
    if args.use_parts:
        loss['part'] = []
        for n, (sc_v, sc_l) in enumerate(zip(scan_verts, scan_part_labels)):
            tot = 0
            # for i in range(args.num_joints):  # we currently use 14 parts
            for i in range(14):  # we currently use 14 parts
                if i not in sc_l:
                    continue
                ind = torch.where(sc_l == i)[0]
                sc_part_points = sc_v[ind].unsqueeze(0)
                sm_part_points = smpl_verts[n][torch.where(smpl_part_labels[n] == i)[0]].unsqueeze(0)
                dist = chamfer_distance(sc_part_points, sm_part_points, w1=1., w2=1.)
                tot += dist
            # loss['part'].append(tot / args.num_joints)
            loss['part'].append(tot / 14)

        loss['part'] = torch.stack(loss['part'])

    return loss
Exemplo n.º 13
0
    def compose_meshes(meshes: list) -> TriangleMesh:
        vertices = []
        faces = []

        vertices_num = 0

        for i, mesh in enumerate(meshes):
            vertices_now = mesh.vertices.clone()
            faces_now = mesh.faces.clone()

            vertices.append(vertices_now)
            faces.append(faces_now + vertices_num)

            vertices_num += vertices_now.size(0)

        result_mesh = TriangleMesh.from_tensors(vertices=torch.cat(vertices),
                                                faces=torch.cat(faces))
        result_mesh.to(DEVICE)

        return result_mesh
def visualize_refine_vp_meshes(image: torch.Tensor, vp_meshes: list, save_name: str, predict_vertices: torch.Tensor):
    image = to_pil(image.cpu()).resize((256, 256), Image.BILINEAR)
    mesh, uv, texture = merge_meshes(vp_meshes)
    deformed_mesh = TriangleMesh.from_tensors(predict_vertices, mesh.faces)

    tmp_uv = torch.rand(uv.size()).cuda()
    tmp_texture = torch.full_like(texture, 0.5)

    gif_imgs = []
    render_img_direct_pose = phong_render(deformed_mesh, uv, texture, 1, 0, 0)

    for azim in range(0, 360, 30):
        predict_img = phong_render(deformed_mesh, uv, texture, 1, 0, azim)
        vp_img = phong_render(mesh, uv, texture, 1, 0, azim)
        single_color_img = phong_render(deformed_mesh, tmp_uv, tmp_texture, 1, 0, azim)

        imgs = [image, render_img_direct_pose, vp_img, predict_img, single_color_img]
        gif_imgs.append(concat_pil_image(imgs))

    gif_imgs[0].save(save_name, format='GIF', append_images=gif_imgs[1:], save_all=True, duration=300, loop=0)
def compose_vp_meshes(vp_meshes: list):
    vertices = []
    faces = []

    last_vertices_num = 0

    for i in range(len(vp_meshes)):
        vp_vertices = vp_meshes[i].vertices
        vertices.append(vp_vertices)

        vp_faces = vp_meshes[i].faces
        vp_faces += last_vertices_num
        faces.append(vp_faces)

        last_vertices_num += vp_vertices.size(0)

    vertices = torch.cat(vertices)
    faces = torch.cat(faces)

    mesh = TriangleMesh.from_tensors(vertices=vertices, faces=faces)
    mesh.cuda()

    return mesh
Exemplo n.º 16
0
def merge_meshes(meshes: list) -> (TriangleMesh, torch.Tensor):
    vertex_num = 0
    vertices, faces, texture, uv = [], [], [], []

    for i, mesh in enumerate(meshes):
        vertices.append(mesh.vertices)
        faces.append(mesh.faces + vertex_num)
        uv.append(
            torch.full((mesh.vertices.size(0), 2), i / len(meshes) + 0.01))
        texture.append(get_random_colors())

        vertex_num += mesh.vertices.size(0)

    vertices = torch.cat(vertices)
    faces = torch.cat(faces)

    uv = torch.cat(uv)[None].to(DEVICE)
    texture = torch.cat(texture, 2)[None].to(DEVICE)

    merged_mesh = TriangleMesh.from_tensors(vertices, faces)
    merged_mesh.to(DEVICE)

    return merged_mesh, uv, texture
Exemplo n.º 17
0
def SMPLD_register(args):
    cfg = config.load_config(args.config, 'configs/default.yaml')
    out_dir = cfg['training']['out_dir']
    generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
    is_cuda = (torch.cuda.is_available() and not args.no_cuda)
    device = torch.device("cuda" if is_cuda else "cpu")

    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        logger, _ = create_logger(generation_dir, phase='reg_subject{}_sequence{}'.format(args.subject_idx, args.sequence_idx), create_tf_logs=False)
    else:
        logger, _ = create_logger(generation_dir, phase='reg_all', create_tf_logs=False)

    # Get dataset
    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        dataset = config.get_dataset('test', cfg, sequence_idx=args.sequence_idx, subject_idx=args.subject_idx)
    else:
        dataset = config.get_dataset('test', cfg)

    batch_size = cfg['generation']['batch_size']

    # Loader
    test_loader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, num_workers=1, shuffle=False)

    model_counter = defaultdict(int)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    inner_dists = []
    outer_dists = []

    for it, data in enumerate(tqdm(test_loader)):
        idxs = data['idx'].cpu().numpy()
        loc = data['points.loc'].cpu().numpy()
        batch_size = idxs.shape[0]
        # Directories to load corresponding informations
        mesh_dir = os.path.join(generation_dir, 'meshes')   # directory for posed and (optionally) unposed implicit outer/inner meshes
        label_dir = os.path.join(generation_dir, 'labels')   # directory for part labels
        register_dir = os.path.join(generation_dir, 'registrations')   # directory for part labels

        if args.use_raw_scan:
            scan_dir = dataset.dataset_folder   # this is the folder that contains CAPE raw scans
        else:
            scan_dir = None

        all_posed_minimal_meshes = []
        all_posed_cloth_meshes = []
        all_posed_vertices = []
        all_unposed_vertices = []
        scan_part_labels = []

        for idx in idxs:
            model_dict = dataset.get_model_dict(idx)

            subset = model_dict['subset']
            subject = model_dict['subject']
            sequence = model_dict['sequence']
            gender = model_dict['gender']
            filebase = os.path.basename(model_dict['data_path'])[:-4]

            folder_name = os.path.join(subset, subject, sequence)
            # TODO: we assume batch size stays the same if one resumes the job
            # can be more flexible to support different batch sizes before and
            # after resume
            register_file = os.path.join(register_dir, folder_name, filebase + 'minimal.registered.ply')
            if os.path.exists(register_file):
                # batch already computed, break
                break

            # points_dict = np.load(model_dict['data_path'])
            # gender = str(points_dict['gender'])

            mesh_dir_ = os.path.join(mesh_dir, folder_name)
            label_dir_ = os.path.join(label_dir, folder_name)

            if scan_dir is not None:
                scan_dir_ = os.path.join(scan_dir, subject, sequence)

            # Load part labels and vertex translations
            label_file_name = filebase + '.minimal.npz'
            label_dict = dict(np.load(os.path.join(label_dir_, label_file_name)))
            labels = torch.tensor(label_dict['part_labels'].astype(np.int64)).to(device)   # part labels for each vertex (14 or 24)
            scan_part_labels.append(labels)

            # Load minimal implicit surfaces
            mesh_file_name = filebase + '.minimal.posed.ply'
            # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
            posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
            posed_vertices = np.array(posed_mesh.vertices)
            all_posed_vertices.append(posed_vertices)

            posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                    torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
            all_posed_minimal_meshes.append(posed_mesh)

            mesh_file_name = filebase + '.minimal.unposed.ply'
            if os.path.exists(os.path.join(mesh_dir_, mesh_file_name)) and args.init_pose:
                # unposed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                unposed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
                unposed_vertices = np.array(unposed_mesh.vertices)
                all_unposed_vertices.append(unposed_vertices)

            if args.use_raw_scan:
                # Load raw scans
                mesh_file_name = filebase + '.ply'
                # posed_mesh = Mesh(filename=os.path.join(scan_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(scan_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32') / 1000, requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)
            else:
                # Load clothed implicit surfaces
                mesh_file_name = filebase + '.cloth.posed.ply'
                # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)

        if args.num_joints == 24:
            bm = BodyModel(bm_path='body_models/smpl/male/model.pkl', num_betas=10, batch_size=batch_size).to(device)
            parents = bm.kintree_table[0].detach().cpu().numpy()
            labels = bm.weights.argmax(1)
            # Convert 24 parts to 14 parts
            smpl2ipnet = torch.from_numpy(SMPL2IPNET_IDX).to(device)
            labels = smpl2ipnet[labels].clone().unsqueeze(0)
            del bm
        elif args.num_joints == 14:
            with open('body_models/misc/smpl_parts_dense.pkl', 'rb') as f:
                part_labels = pkl.load(f)

            labels = np.zeros((6890,), dtype=np.int64)
            for n, k in enumerate(part_labels):
                labels[part_labels[k]] = n
            labels = torch.tensor(labels).to(device).unsqueeze(0)
        else:
            raise ValueError('Got {} joints but umber of joints can only be either 14 or 24'.format(args.num_joints))

        th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).to(device)

        # We assume loaded meshes are properly scaled and offsetted to the orignal SMPL space,
        if len(all_posed_minimal_meshes) > 0 and len(all_unposed_vertices) == 0:
            # IPNet optimization without vertex traslation
            # raise NotImplementedError('Optimization for IPNet is not implemented yet.')
            if args.num_joints == 24:
                for idx in range(len(scan_part_labels)):
                    scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            prior = get_prior(gender=gender, precomputed=True)
            pose_init = torch.zeros((batch_size, 72))
            pose_init[:, 3:] = prior.mean
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        elif len(all_posed_minimal_meshes) > 0:
            # NASA+PTFs optimization with vertex traslations
            # Compute poses from implicit surfaces and correspondences
            # TODO: we could also compute bone-lengths if we train PTFs to predict A-pose with a global translation
            # that equals to the centroid of the pointcloud
            poses = compute_poses(all_posed_vertices, all_unposed_vertices, scan_part_labels, parents, args)
            # Convert 24 parts to 14 parts
            for idx in range(len(scan_part_labels)):
                scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            pose_init = torch.from_numpy(poses).float()
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        else:
            inner_vertices = outer_vertices = None

        if args.use_raw_scan:
            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
        else:
            # Evaluate registered mesh
            gt_smpl_mesh = data['points.minimal_smpl_vertices'].to(device)
            gt_smpld_mesh = data['points.smpl_vertices'].to(device)
            if inner_vertices is None:
                # if vertices are None, we assume they already exist due to previous runs
                inner_vertices = []
                outer_vertices = []
                for i, idx in enumerate(idxs):

                    model_dict = dataset.get_model_dict(idx)

                    subset = model_dict['subset']
                    subject = model_dict['subject']
                    sequence = model_dict['sequence']
                    filebase = os.path.basename(model_dict['data_path'])[:-4]

                    folder_name = os.path.join(subset, subject, sequence)
                    register_dir_ = os.path.join(register_dir, folder_name)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'minimal.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'minimal.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    inner_vertices.append(registered_v)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'cloth.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    outer_vertices.append(registered_v)

                inner_vertices = torch.stack(inner_vertices, dim=0)
                outer_vertices = torch.stack(outer_vertices, dim=0)

            inner_dist = torch.norm(gt_smpl_mesh - inner_vertices, dim=2).mean(-1)
            outer_dist = torch.norm(gt_smpld_mesh - outer_vertices, dim=2).mean(-1)

            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                logger.info('Inner distance for input {}: {} cm'.format(filebase, inner_dist[i].item()))
                logger.info('Outer distance for input {}: {} cm'.format(filebase, outer_dist[i].item()))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))

            inner_dists.extend(inner_dist.detach().cpu().numpy())
            outer_dists.extend(outer_dist.detach().cpu().numpy())

    logger.info('Mean inner distance: {} cm'.format(np.mean(inner_dists)))
    logger.info('Mean outer distance: {} cm'.format(np.mean(outer_dists)))
Exemplo n.º 18
0
def optimize_sequence(true_poses, gt_translation, args, save_path: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    smpl_file_name = "../SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
    uv_map_file_name = "../textures/smpl_uv_map.npy"
    uv = np.load(uv_map_file_name)
    texture_file_name = "../textures/female1.jpg"
    with open(texture_file_name, 'rb') as file:
        texture = Image.open(BytesIO(file.read()))
    results = []
    model = smplx.create(smpl_file_name, model_type='smpl')
    model = model.to(device)
    gaussian_filter = get_gaussian_filter(args.kernel_size, args.sigma)
    gaussian_filter = gaussian_filter.to(device)
    betas = torch.tensor([[
        -0.3596, -1.0232, -1.7584, -2.0465, 0.3387, -0.8562, 0.8869, 0.5013,
        0.5338, -0.0210
    ]]).to(device)

    true_images = []
    init_images = []
    result_images = []
    losses_frames = []
    pose_losses_frames = []
    iterations = args.iterations
    # for f_id, true_pose in tqdm(enumerate(true_poses[150:170:10])):
    for f_id, true_pose in tqdm(enumerate(true_poses[150:300:20])):
        if f_id > 0:
            iterations = 200
        losses = []
        pose_losses = []
        output_true = model(betas=betas,
                            return_verts=True,
                            body_pose=true_pose)
        faces = torch.tensor(model.faces * 1.0).to(device)
        mesh_true = TriangleMesh.from_tensors(output_true.vertices[0], faces)
        vertices_true = mesh_true.vertices.unsqueeze(0)
        faces = mesh_true.faces.unsqueeze(0)
        textures = torch.ones(1,
                              faces.shape[1],
                              args.texture_size,
                              args.texture_size,
                              args.texture_size,
                              3,
                              dtype=torch.float32,
                              device='cuda')
        renderer_full = Renderer(camera_mode='look_at',
                                 image_size=args.image_size)
        renderer_full.eye = get_points_from_angles(args.camera_distance,
                                                   args.elevation,
                                                   args.azimuth)
        images, _, _ = renderer_full(vertices_true, faces, textures)
        true_image = images[0].permute(1, 2, 0)
        true_images.append(
            (255 * true_image.detach().cpu().numpy()).astype(np.uint8))
        if args.gaussian_blur:
            true_image = gaussian_filter(
                true_image.unsqueeze(0).permute(0, 3, 2,
                                                1)).permute(0, 3, 2, 1)[0]
        true_image = true_image.detach()
        if f_id == 0 or args.init_pose == "zero":
            perturbed_pose = torch.zeros(69).view(1, -1).to(device)
        else:
            perturbed_pose = perturbed_pose
        perturbed_pose = Variable(perturbed_pose, requires_grad=True)
        optim = torch.optim.Adam([perturbed_pose], lr=1e-2)
        image_size = args.image_size
        if args.coarse_to_fine:
            image_size = int(image_size / 2**args.coarse_to_fine_steps)
            image_size = args.image_size
        kernel_size = args.kernel_size
        renderer = renderer_full
        for i in range(iterations):
            # print("Iter: ", i, "kernel size: ", kernel_size)
            if args.blur_to_no_blur and i % int(
                    iterations / args.blur_to_no_blur_steps) == 0:
                gaussian_filter = get_gaussian_filter(kernel_size,
                                                      sigma=args.sigma)
                gaussian_filter = gaussian_filter.to(device)
                images, _, _ = renderer(vertices_true, faces, textures)
                true_image = images[0].permute(1, 2, 0)
                true_image = gaussian_filter(
                    true_image.unsqueeze(0).permute(0, 3, 2,
                                                    1)).permute(0, 3, 2, 1)[0]
                true_image = true_image.detach()
                kernel_size = int(kernel_size / 2)
            if args.coarse_to_fine and i % int(
                    iterations / args.coarse_to_fine_steps) == 0:
                renderer = Renderer(camera_mode='look_at',
                                    image_size=image_size)
                renderer.eye = get_points_from_angles(args.camera_distance,
                                                      args.elevation,
                                                      args.azimuth)
                images, _, _ = renderer(vertices_true, faces, textures)
                true_image = images[0].permute(1, 2, 0)
                if args.gaussian_blur:
                    true_image = gaussian_filter(
                        true_image.unsqueeze(0).permute(0, 3, 2, 1)).permute(
                            0, 3, 2, 1)[0]
                true_image = true_image.detach()
                image_size *= 2
            optim.zero_grad()
            output = model(betas=betas,
                           return_verts=True,
                           body_pose=perturbed_pose)
            vertices_goal = output.vertices[0]
            mesh = TriangleMesh.from_tensors(vertices_goal, faces)
            vertices = vertices_goal.unsqueeze(0)
            images, _, _ = renderer(vertices, faces, textures)
            image = images[0].permute(1, 2, 0)
            if i == 0:
                perturbed_images, _, _ = renderer_full(vertices, faces,
                                                       textures)
                perturbed_image = perturbed_images[0].permute(1, 2, 0)
                perturbed_image = (
                    255 * perturbed_image.detach().cpu().numpy()).astype(
                        np.uint8)
                init_images.append(perturbed_image)
            if args.gaussian_blur:
                image = gaussian_filter(
                    image.unsqueeze(0).permute(0, 3, 2,
                                               1)).permute(0, 3, 2, 1)[0]
            if i == iterations - 1:
                images, _, _ = renderer_full(vertices, faces, textures)
                result_image = images[0].permute(1, 2, 0)
            if args.photo_loss == "L1":
                loss = (image - true_image).abs().mean()
            else:
                loss = ((image - true_image)**2).mean().sqrt()
            pose_loss = (perturbed_pose - true_pose).abs().mean()
            # angle prior for elbow and knees
            if args.angle_prior:
                # Angle prior for knees and elbows
                angle_prior_loss = (args.angle_prior_weight**
                                    2) * angle_prior(perturbed_pose).sum(
                                        dim=-1)[0]
                print("Angle Prior: ", angle_prior_loss.item())
                # Pose prior loss
                pose_prior = MaxMixturePrior(prior_folder='SPIN/data',
                                             num_gaussians=8,
                                             dtype=torch.float32).to(device)
                pose_prior_loss = (args.pose_prior_weight**2) * pose_prior(
                    perturbed_pose, betas)[0]
                print("Pose Prior: ", pose_prior_loss.item())
                loss += angle_prior_loss + pose_prior_loss
                # Pose prior loss
                # pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas)
            print("Iter: {} Loss: {}".format(i, loss.item()))
            loss.backward()
            optim.step()
            losses.append(loss.item())
            pose_losses.append(pose_loss.item())
            imageio.imwrite("{}/iteration_{:03d}.png".format(save_path, i),
                            (255 * image.detach().cpu().numpy()).astype(
                                np.uint8))
        result_images.append(
            (255 * result_image.detach().cpu().numpy()).astype(np.uint8))
        losses_frames.append(losses)
        pose_losses_frames.append(pose_losses)
    return losses_frames, pose_losses_frames, result_images, init_images, true_images
Exemplo n.º 19
0
def optimize(args, save_path):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    smpl_file_name = "../SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
    uv_map_file_name = "../textures/smpl_uv_map.npy"
    uv = np.load(uv_map_file_name)
    texture_file_name = "../textures/female1.jpg"
    with open(texture_file_name, 'rb') as file:
        texture = Image.open(BytesIO(file.read()))
    model = smplx.create(smpl_file_name, model_type='smpl')
    model = model.to(device)

    gaussian_filter = get_gaussian_filter(args.kernel_size, args.sigma)
    gaussian_filter = gaussian_filter.to(device)

    betas = torch.tensor([[
        -0.3596, -1.0232, -1.7584, -2.0465, 0.3387, -0.8562, 0.8869, 0.5013,
        0.5338, -0.0210
    ]]).to(device)
    if args.perturb_betas:
        perturbed_betas = Variable(torch.tensor(
            [[3, -1.0232, 1.8, 2.0465, -0.3387, 0.9, 0.8869, -0.5013, -1,
              2]]).to(device),
                                   requires_grad=True)
    else:
        perturbed_betas = betas
    expression = torch.tensor([[
        2.7228, -1.8139, 0.6270, -0.5565, 0.3251, 0.5643, -1.2158, 1.4149,
        0.4050, 0.6516
    ]]).to(device)
    perturbed_pose = torch.ones(69).view(1, -1).to(device) * np.deg2rad(4)
    #perturbed_pose[0, 38] = -np.deg2rad(60)
    #perturbed_pose[0, 41] = np.deg2rad(60)

    perturbed_pose = Variable(perturbed_pose, requires_grad=True)
    canonical_pose0 = torch.zeros(2).view(1, -1).to(device)
    canonical_pose1 = torch.zeros(35).view(1, -1).to(device)
    canonical_pose2 = torch.zeros(2).view(1, -1).to(device)
    canonical_pose3 = torch.zeros(27).view(1, -1).to(device)
    arm_angle_l = Variable(torch.tensor([-np.deg2rad(65)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)
    arm_angle_r = Variable(torch.tensor([np.deg2rad(65)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)
    leg_angle_l = Variable(torch.tensor([np.deg2rad(20)
                                         ]).float().view(1, -1).to(device),
                           requires_grad=True)

    output_true = model(betas=betas,
                        expression=expression,
                        return_verts=True,
                        body_pose=None)

    # Normalize vertices
    # output = model(betas=betas, expression=expression,
    #               return_verts=True, body_pose=perturbed_pose)

    # vertices_goal = output.vertices[0]
    # vertices_abs_max = torch.abs(vertices_goal).max().detach()
    # vertices_min = vertices_goal.min(0)[0][None, :].detach()
    # vertices_max = vertices_goal.max(0)[0][None, :].detach()

    faces = torch.tensor(model.faces * 1.0).to(device)

    mesh_true = TriangleMesh.from_tensors(output_true.vertices[0], faces)
    vertices_true = mesh_true.vertices.unsqueeze(0)
    # vertices = pre_normalize_vertices(mesh.vertices, vertices_min, vertices_max,
    #                                  vertices_abs_max).unsqueeze(0)

    faces = mesh_true.faces.unsqueeze(0)

    textures = torch.ones(1,
                          faces.shape[1],
                          args.texture_size,
                          args.texture_size,
                          args.texture_size,
                          3,
                          dtype=torch.float32,
                          device='cuda')
    renderer_full = Renderer(camera_mode='look_at', image_size=args.image_size)
    renderer_full.eye = get_points_from_angles(args.camera_distance,
                                               args.elevation, args.azimuth)
    images, _, _ = renderer_full(vertices_true, faces, textures)
    true_image = images[0].permute(1, 2, 0)
    if args.gaussian_blur:
        true_image = gaussian_filter(
            true_image.unsqueeze(0).permute(0, 3, 2, 1)).permute(0, 3, 2, 1)[0]
    true_image = true_image.detach()
    imageio.imwrite(save_path + "/true_image.png",
                    (255 * true_image.detach().cpu().numpy()).astype(np.uint8))

    if args.specific_angles_only and args.perturb_betas:
        optim = torch.optim.Adam(
            [arm_angle_l, arm_angle_r, leg_angle_l, perturbed_betas], lr=1e-2)
    elif args.specific_angles_only:
        optim = torch.optim.Adam([arm_angle_l, arm_angle_r, leg_angle_l],
                                 lr=1e-2)
    elif args.perturb_betas:
        optim = torch.optim.Adam([perturbed_pose, perturbed_betas], lr=1e-2)
    else:
        optim = torch.optim.Adam([perturbed_pose], lr=1e-2)
    results = []
    arm_parameters_l = []
    arm_parameters_r = []
    beta_diffs = []
    losses = []
    image_size = args.image_size
    if args.coarse_to_fine:
        image_size = int(image_size / 2**args.coarse_to_fine_steps)
    renderer = renderer_full
    for i in range(args.iterations):
        if args.coarse_to_fine and i % int(
                args.iterations / args.coarse_to_fine_steps) == 0:
            renderer = Renderer(camera_mode='look_at', image_size=image_size)
            renderer.eye = get_points_from_angles(args.camera_distance,
                                                  args.elevation, args.azimuth)
            images, _, _ = renderer(vertices_true, faces, textures)
            true_image = images[0].permute(1, 2, 0)
            if args.gaussian_blur:
                true_image = gaussian_filter(
                    true_image.unsqueeze(0).permute(0, 3, 2,
                                                    1)).permute(0, 3, 2, 1)[0]
            true_image = true_image.detach()
            image_size *= 2
        optim.zero_grad()
        if args.specific_angles_only:
            perturbed_pose = torch.cat([
                canonical_pose0, leg_angle_l, canonical_pose1, arm_angle_l,
                canonical_pose2, arm_angle_r, canonical_pose3
            ],
                                       dim=-1)
        output = model(betas=perturbed_betas,
                       expression=expression,
                       return_verts=True,
                       body_pose=perturbed_pose)

        vertices_goal = output.vertices[0]

        mesh = TriangleMesh.from_tensors(vertices_goal, faces)

        vertices = vertices_goal.unsqueeze(0)
        # vertices = pre_normalize_vertices(mesh.vertices, vertices_min, vertices_max,
        #                              vertices_abs_max).unsqueeze(0)

        images, _, _ = renderer(vertices, faces, textures)
        image = images[0].permute(1, 2, 0)
        if i == 0:
            perturbed_images, _, _ = renderer_full(vertices, faces, textures)
            perturbed_image = perturbed_images[0].permute(1, 2, 0)
            perturbed_image = perturbed_image.detach()
            imageio.imwrite(save_path + "/perturbed_image.png",
                            (255 *
                             perturbed_image.detach().cpu().numpy()).astype(
                                 np.uint8))
        if args.gaussian_blur:
            image = gaussian_filter(image.unsqueeze(0).permute(
                0, 3, 2, 1)).permute(0, 3, 2, 1)[0]
        loss = (image - true_image).abs().mean()
        loss.backward()
        optim.step()

        results.append((255 * image.detach().cpu().numpy()).astype(np.uint8))
        if args.specific_angles_only:
            arm_parameters_l.append(arm_angle_l.item())
            arm_parameters_r.append(arm_angle_r.item())
        if args.perturb_betas:
            beta_diffs.append((betas - perturbed_betas).abs().mean().item())
        losses.append(loss.item())
        print("Loss: ", loss.item())
    return losses, results, arm_parameters_l, arm_parameters_r, beta_diffs
Exemplo n.º 20
0
def fit_SMPLXD(scans,
               smplx_pkl,
               gender='male',
               save_path=None,
               scale_file=None,
               interpenetration=True):

    search_tree = None
    pen_distance = None
    tri_filtering_module = None
    max_collisions = 128
    df_cone_height = 0.0001
    point2plane = False
    penalize_outside = True
    part_segm_fn = '/home/chen/IPNet_SMPLX/assets/smplx_parts_segm.pkl'
    ign_part_pairs = ["9,16", "9,17", "6,16", "6,17", "1,2", "12,22"]
    if interpenetration:
        from mesh_intersection.bvh_search_tree import BVH
        import mesh_intersection.loss as collisions_loss
        from mesh_intersection.filter_faces import FilterFaces
        search_tree = BVH(max_collisions=max_collisions)

        pen_distance = collisions_loss.DistanceFieldPenetrationLoss(
            sigma=df_cone_height,
            point2plane=point2plane,
            vectorized=True,
            penalize_outside=penalize_outside)
        if part_segm_fn:
            part_segm_fn = os.path.expandvars(part_segm_fn)
            with open(part_segm_fn, 'rb') as faces_parents_file:
                face_segm_data = pkl.load(faces_parents_file,
                                          encoding='latin1')
            faces_segm = face_segm_data['segm']
            faces_parents = face_segm_data['parents']
            tri_filtering_module = FilterFaces(
                faces_segm=faces_segm,
                faces_parents=faces_parents,
                ign_part_pairs=ign_part_pairs).cuda()

    # Get SMPLX faces
    # spx = SmplPaths(gender=gender)
    spx = SMPLX(model_path="/home/chen/SMPLX/models/smplx",
                batch_size=1,
                gender=gender)
    smplx_faces = spx.faces
    th_faces = torch.tensor(smplx_faces.astype('float32'),
                            dtype=torch.long).cuda()

    # Batch size
    batch_sz = len(scans)

    # Init SMPLX
    global_pose, body_pose, left_hand_pose, right_hand_pose = [], [], [], []
    expression, jaw_pose, leye_pose, reye_pose = [], [], [], []
    betas, trans = [], []
    for spkl in smplx_pkl:
        smplx_dict = pkl.load(open(spkl, 'rb'))
        g, bp, lh, rh, e, j, le, re, b, t = (
            smplx_dict['global_pose'], smplx_dict['body_pose'],
            smplx_dict['left_hand_pose'], smplx_dict['right_hand_pose'],
            smplx_dict['expression'], smplx_dict['jaw_pose'],
            smplx_dict['leye_pose'], smplx_dict['reye_pose'],
            smplx_dict['betas'], smplx_dict['trans'])
        global_pose.append(g)
        body_pose.append(bp)
        left_hand_pose.append(lh)
        right_hand_pose.append(rh)
        expression.append(e)
        jaw_pose.append(j)
        leye_pose.append(le)
        reye_pose.append(re)
        if len(b) == 10:
            # temp = np.zeros((300,))
            temp = np.zeros((10, ))
            temp[:10] = b
            b = temp.astype('float32')
        betas.append(b)
        trans.append(t)
    global_pose, body_pose, left_hand_pose, right_hand_pose = np.array(global_pose), np.array(body_pose), \
                                                              np.array(left_hand_pose), np.array(right_hand_pose)
    expression, jaw_pose, leye_pose, reye_pose = np.array(expression), np.array(jaw_pose), \
                                                 np.array(leye_pose), np.array(reye_pose)
    betas, trans = np.array(betas), np.array(trans)

    global_pose, body_pose, left_hand_pose, right_hand_pose = torch.tensor(global_pose), torch.tensor(body_pose), \
                                                              torch.tensor(left_hand_pose), torch.tensor(right_hand_pose)
    expression, jaw_pose, leye_pose, reye_pose = torch.tensor(expression), torch.tensor(jaw_pose), \
                                                 torch.tensor(leye_pose), torch.tensor(reye_pose)
    betas, trans = torch.tensor(betas), torch.tensor(trans)
    # smplx = th_batch_SMPLX(batch_sz, betas, pose, trans, faces=th_faces, gender=gender).cuda()
    smplx = th_batch_SMPLX(batch_sz,
                           betas,
                           global_pose,
                           body_pose,
                           left_hand_pose,
                           right_hand_pose,
                           trans,
                           expression,
                           jaw_pose,
                           leye_pose,
                           reye_pose,
                           faces=th_faces,
                           gender=gender).to(DEVICE)
    # verts, _, _, _ = smplx()
    verts = smplx()
    init_smplx_meshes = [
        tm.from_tensors(vertices=v.clone().detach(), faces=smplx.faces)
        for v in verts
    ]

    # Load scans
    th_scan_meshes = []
    for scan in scans:
        print('scan path ...', scan)
        temp = Mesh(filename=scan)
        th_scan = tm.from_tensors(
            torch.tensor(temp.v.astype('float32'),
                         requires_grad=False,
                         device=DEVICE),
            torch.tensor(temp.f.astype('int32'),
                         requires_grad=False,
                         device=DEVICE).long())
        th_scan_meshes.append(th_scan)

    if scale_file is not None:
        for n, sc in enumerate(scale_file):
            dat = np.load(sc, allow_pickle=True)
            th_scan_meshes[n].vertices += torch.tensor(dat[1]).to(DEVICE)
            th_scan_meshes[n].vertices *= torch.tensor(dat[0]).to(DEVICE)

    # Optimize
    optimize_offsets(th_scan_meshes, smplx, init_smplx_meshes, 5, 10,
                     search_tree, pen_distance, tri_filtering_module)
    # optimize_offsets_only(th_scan_meshes, smplx, init_smplx_meshes, 5, 8, search_tree, pen_distance, tri_filtering_module)
    print('Done')

    # verts, _, _, _ = smplx()
    verts = smplx.get_vertices_clean_hand()
    th_smplx_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = ['full.ply']  # [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(
            th_smplx_meshes,
            [join(save_path, n.replace('.ply', '_smplxd.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])
        # Save params
        for g, bp, lh, rh, e, j, le, re, b, t, d, n in zip(
                smplx.global_pose.cpu().detach().numpy(),
                smplx.body_pose.cpu().detach().numpy(),
                smplx.left_hand_pose.cpu().detach().numpy(),
                smplx.right_hand_pose.cpu().detach().numpy(),
                smplx.expression.cpu().detach().numpy(),
                smplx.jaw_pose.cpu().detach().numpy(),
                smplx.leye_pose.cpu().detach().numpy(),
                smplx.reye_pose.cpu().detach().numpy(),
                smplx.betas.cpu().detach().numpy(),
                smplx.trans.cpu().detach().numpy(),
                smplx.offsets_clean_hand.cpu().detach().numpy(), names):
            smplx_dict = {
                'global_pose': g,
                'body_pose': bp,
                'left_hand_pose': lh,
                'right_hand_pose': rh,
                'expression': e,
                'jaw_pose': j,
                'leye_pose': le,
                'reye_pose': re,
                'betas': b,
                'trans': t,
                'offsets': d
            }
            pkl.dump(
                smplx_dict,
                open(join(save_path, n.replace('.ply', '_smplxd.pkl')), 'wb'))

    return (smplx.global_pose.cpu().detach().numpy(),
            smplx.body_pose.cpu().detach().numpy(),
            smplx.left_hand_pose.cpu().detach().numpy(),
            smplx.right_hand_pose.cpu().detach().numpy(),
            smplx.expression.cpu().detach().numpy(),
            smplx.jaw_pose.cpu().detach().numpy(),
            smplx.leye_pose.cpu().detach().numpy(),
            smplx.reye_pose.cpu().detach().numpy(),
            smplx.betas.cpu().detach().numpy(),
            smplx.trans.cpu().detach().numpy(),
            smplx.offsets_clean_hand.cpu().detach().numpy())
Exemplo n.º 21
0
def generate_acd_mix_dataset(dataset_path, args):
    dataset = ShapeNetDataset(args.type)
    obj_paths = []
    dataset_obj_num = args.obj_num
    n = 0

    for data in tqdm(dataset.shapenet_datas):
        if data.canonical_obj_path in obj_paths:
            continue
        obj_paths.append(data.canonical_obj_path)

    for i in tqdm(range(dataset_obj_num)):
        rand_two_obj_paths = random.sample(obj_paths, 2)

        obj1_path = rand_two_obj_paths[0]
        obj2_path = rand_two_obj_paths[1]

        k_m1 = TriangleMesh.from_obj(obj1_path)
        k_m2 = TriangleMesh.from_obj(obj2_path)

        k_m1.vertices /= k_m1.vertices.max()
        k_m2.vertices /= k_m2.vertices.max()

        t_m1 = get_trimesh_from_kaolinmesh(k_m1)
        t_m2 = get_trimesh_from_kaolinmesh(k_m2)

        t_hulls1 = acd(t_m1)
        k_hulls1 = [get_kaolinmesh_from_trimesh(t_hull) for t_hull in t_hulls1]

        t_hulls2 = acd(t_m2)
        k_hulls2 = [get_kaolinmesh_from_trimesh(t_hull) for t_hull in t_hulls2]

        a_k_hulls1 = augment(k_hulls1)
        a_k_hulls2 = augment(k_hulls2)

        a_t_hulls1 = [
            get_trimesh_from_kaolinmesh(a_k_hull) for a_k_hull in a_k_hulls1
        ]
        a_t_hulls2 = [
            get_trimesh_from_kaolinmesh(a_k_hull) for a_k_hull in a_k_hulls2
        ]

        k_result = merge_meshes(a_t_hulls1 + a_t_hulls2)

        mesh, uv, texture = approximate_convex_decomposition(k_result,
                                                             hull_num=8)

        for j in range(20):
            now_mesh = TriangleMesh.from_tensors(mesh.vertices.clone(),
                                                 mesh.faces.clone())

            dist = 3.0 + torch.rand(1).item() * 2
            elev = (torch.rand(1).item() - 0.5) * 90
            azim = torch.rand(1).item() * 360

            rgb, silhouette, _ = PhongRenderer.render(now_mesh, dist, elev,
                                                      azim, uv, texture)

            rgb = rgb[0].cpu().permute(2, 0, 1)
            silhouette = silhouette[0].cpu().permute(2, 0, 1)
            img = torch.cat([rgb, silhouette], 0)

            pil_img = to_pil(img)
            now_mesh.vertices = ShapeNetDataset.transform_to_view_center(
                now_mesh.vertices, dist=dist, elev=elev, azim=azim)

            pil_img.save(os.path.join(dataset_path, 'img_%.6d.png' % n))
            now_mesh.save_mesh(os.path.join(dataset_path, 'mesh_%.6d.obj' % n))
            json_data = json.dumps({'dist': dist, 'elev': elev, 'azim': azim})
            with open(os.path.join(dataset_path, 'meta_%.6d.json' % n),
                      'w') as f:
                f.write(json_data)
                f.close()
            n += 1
Exemplo n.º 22
0
def fit_SMPLX(scans,
              pose_files=None,
              gender='male',
              save_path=None,
              display=None):
    """
    :param save_path:
    :param scans: list of scan paths
    :param pose_files:
    :return:
    """
    # Get SMPL faces
    sp = SmplPaths(gender=gender)
    smpl_faces = sp.get_faces()
    th_faces = torch.tensor(smpl_faces.astype('float32'),
                            dtype=torch.long).cuda()

    # Batch size
    batch_sz = len(scans)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    if False:
        """Test by loading GT SMPL params"""
        betas, pose, trans = torch.tensor(
            GT_SMPL['betas'].astype('float32')).unsqueeze(0), torch.tensor(
                GT_SMPL['pose'].astype('float32')).unsqueeze(0), torch.zeros(
                    (batch_sz, 3))
    else:
        prior = get_prior(gender=gender)
        pose_init = torch.zeros((batch_sz, 72))
        pose_init[:, 3:] = prior.mean
        betas, pose, trans = torch.zeros(
            (batch_sz, 300)), pose_init, torch.zeros((batch_sz, 3))

    # Init SMPL, pose with mean smpl pose, as in ch.registration
    smpl = th_batch_SMPL(batch_sz, betas, pose, trans, faces=th_faces).cuda()

    # Load scans and center them. Once smpl is registered, move it accordingly.
    # Do not forget to change the location of 3D joints/ landmarks accordingly.
    th_scan_meshes, centers = [], []
    for scan in scans:
        print('scan path ...', scan)
        th_scan = tm.from_obj(scan)
        # cent = th_scan.vertices.mean(axis=0)
        # centers.append(cent)
        # th_scan.vertices -= cent
        th_scan.vertices = th_scan.vertices.cuda()
        th_scan.faces = th_scan.faces.cuda()
        th_scan.vertices.requires_grad = False
        th_scan.cuda()
        th_scan_meshes.append(th_scan)

    # Load pose information if pose file is given
    # Bharat: Shouldn't we structure th_pose_3d as [key][batch, ...] as opposed to current [batch][key]? See batch_get_pose_obj() in body_objectives.py
    th_pose_3d = None
    if pose_files is not None:
        th_no_right_hand_visible, th_no_left_hand_visible, th_pose_3d = [], [], []
        for pose_file in pose_files:
            with open(pose_file) as f:
                pose_3d = json.load(f)
                th_no_right_hand_visible.append(
                    np.max(
                        np.array(pose_3d['hand_right_keypoints_3d']).reshape(
                            -1, 4)[:, 3]) < HAND_VISIBLE)
                th_no_left_hand_visible.append(
                    np.max(
                        np.array(pose_3d['hand_left_keypoints_3d']).reshape(
                            -1, 4)[:, 3]) < HAND_VISIBLE)

                pose_3d['pose_keypoints_3d'] = torch.from_numpy(
                    np.array(pose_3d['pose_keypoints_3d']).astype(
                        np.float32).reshape(-1, 4))
                pose_3d['face_keypoints_3d'] = torch.from_numpy(
                    np.array(pose_3d['face_keypoints_3d']).astype(
                        np.float32).reshape(-1, 4))
                pose_3d['hand_right_keypoints_3d'] = torch.from_numpy(
                    np.array(pose_3d['hand_right_keypoints_3d']).astype(
                        np.float32).reshape(-1, 4))
                pose_3d['hand_left_keypoints_3d'] = torch.from_numpy(
                    np.array(pose_3d['hand_left_keypoints_3d']).astype(
                        np.float32).reshape(-1, 4))
            th_pose_3d.append(pose_3d)

        prior_weight = get_prior_weight(th_no_right_hand_visible,
                                        th_no_left_hand_visible).cuda()

        # Optimize pose first
        optimize_pose_only(th_scan_meshes,
                           smpl,
                           pose_iterations,
                           pose_steps_per_iter,
                           th_pose_3d,
                           prior_weight,
                           display=None if display is None else 0)

    # Optimize pose and shape
    optimize_pose_shape(th_scan_meshes,
                        smpl,
                        iterations,
                        steps_per_iter,
                        th_pose_3d,
                        display=None if display is None else 0)

    verts, _, _, _ = smpl()
    th_smpl_meshes = [
        tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts
    ]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(
            th_smpl_meshes,
            [join(save_path, n.replace('.obj', '_smpl.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])

        # Save params
        for p, b, t, n in zip(smpl.pose.cpu().detach().numpy(),
                              smpl.betas.cpu().detach().numpy(),
                              smpl.trans.cpu().detach().numpy(), names):
            smpl_dict = {'pose': p, 'betas': b, 'trans': t}
            pkl.dump(
                smpl_dict,
                open(join(save_path, n.replace('.obj', '_smpl.pkl')), 'wb'))

        return smpl.pose.cpu().detach().numpy(), smpl.betas.cpu().detach(
        ).numpy(), smpl.trans.cpu().detach().numpy()
Exemplo n.º 23
0
def fit_SMPLD(scans, smpl_pkl, gender='male', save_path=None, scale_file=None):
    # Get SMPL faces
    sp = SmplPaths(gender=gender)
    smpl_faces = sp.get_faces()
    th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).cuda()

    # Batch size
    batch_sz = len(scans)

    # Init SMPL
    pose, betas, trans = [], [], []
    for spkl in smpl_pkl:
        smpl_dict = pkl.load(open(spkl, 'rb'))
        p, b, t = smpl_dict['pose'], smpl_dict['betas'], smpl_dict['trans']
        pose.append(p)
        if len(b) == 10:
            temp = np.zeros((300,))
            temp[:10] = b
            b = temp.astype('float32')
        betas.append(b)
        trans.append(t)
    pose, betas, trans = np.array(pose), np.array(betas), np.array(trans)

    betas, pose, trans = torch.tensor(betas), torch.tensor(pose), torch.tensor(trans)
    smpl = th_batch_SMPL(batch_sz, betas, pose, trans, faces=th_faces).cuda()

    verts, _, _, _ = smpl()
    init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                        faces=smpl.faces) for v in verts]

    # Load scans
    th_scan_meshes = []
    for scan in scans:
        print('scan path ...', scan)
        temp = Mesh(filename=scan)
        th_scan = tm.from_tensors(torch.tensor(temp.v.astype('float32'), requires_grad=False, device=DEVICE),
                                  torch.tensor(temp.f.astype('int32'), requires_grad=False, device=DEVICE).long())
        th_scan_meshes.append(th_scan)

    if scale_file is not None:
        for n, sc in enumerate(scale_file):
            dat = np.load(sc, allow_pickle=True)
            th_scan_meshes[n].vertices += torch.tensor(dat[1]).to(DEVICE)
            th_scan_meshes[n].vertices *= torch.tensor(dat[0]).to(DEVICE)

    # Optimize
    optimize_offsets(th_scan_meshes, smpl, init_smpl_meshes, 5, 10)
    print('Done')

    verts, _, _, _ = smpl()
    th_smpl_meshes = [tm.from_tensors(vertices=v,
                                      faces=smpl.faces) for v in verts]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(th_smpl_meshes, [join(save_path, n.replace('.ply', '_smpld.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])
        # Save params
        for p, b, t, d, n in zip(smpl.pose.cpu().detach().numpy(), smpl.betas.cpu().detach().numpy(),
                                 smpl.trans.cpu().detach().numpy(), smpl.offsets.cpu().detach().numpy(), names):
            smpl_dict = {'pose': p, 'betas': b, 'trans': t, 'offsets': d}
            pkl.dump(smpl_dict, open(join(save_path, n.replace('.ply', '_smpld.pkl')), 'wb'))

    return smpl.pose.cpu().detach().numpy(), smpl.betas.cpu().detach().numpy(), \
           smpl.trans.cpu().detach().numpy(), smpl.offsets.cpu().detach().numpy()
Exemplo n.º 24
0
    def train(
        self,
        batch,
        pretrain=False,
    ):
        inp = batch.get('inp').to(self.device)
        gt_verts = batch.get('gt_verts').to(self.device)
        betas = batch.get('betas').to(self.device)
        pose = batch.get('pose').to(self.device)
        trans = batch.get('trans').to(self.device)

        self.optimizer.zero_grad()
        weights_from_net = self.model(inp).view(self.bs, self.layer_size,
                                                self.num_neigh)
        weights_from_net = self.out_layer(weights_from_net)

        if pretrain:
            loss = (weights_from_net - self.init_weight).abs().sum(-1).mean()
        else:
            input_copy = inp[:, self.idx2, :3]
            pred_x = weights_from_net * input_copy[:, :, :, 0]
            pred_y = weights_from_net * input_copy[:, :, :, 1]
            pred_z = weights_from_net * input_copy[:, :, :, 2]

            pred_verts = torch.sum(torch.stack((pred_x, pred_y, pred_z),
                                               axis=3),
                                   axis=2)

            # local neighbourhood regulaiser
            current_argmax = torch.argmax(weights_from_net, axis=2)
            idx = torch.stack([
                torch.index_select(self.layer_neigh, 1, current_argmax[i])[0]
                for i in range(self.bs)
            ])
            current_argmax_verts = torch.stack([
                torch.index_select(inp[i, :, :3], 0, idx[i])
                for i in range(self.bs)
            ])
            current_argmax_verts = torch.stack(
                [current_argmax_verts for i in range(self.num_neigh)], dim=2)
            dist_from_max = current_argmax_verts - input_copy  # todo: should it be input copy??

            dist_from_max = torch.sqrt(
                torch.sum(dist_from_max * dist_from_max, dim=3))
            local_regu = torch.sum(dist_from_max * weights_from_net) / (
                self.bs * self.num_neigh * self.layer_size)

            body_tmp = self.smpl.forward(beta=betas, theta=pose, trans=trans)
            body_mesh = [
                tm.from_tensors(vertices=v, faces=self.smpl_faces)
                for v in body_tmp
            ]

            if self.garment_layer == 'Body':
                # update body verts with prediction
                body_tmp[:, self.vert_indices, :] = pred_verts
                # get skin cutout
                loss_data = data_loss(self.garment_layer, pred_verts,
                                      inp[:, self.vert_indices, :],
                                      self.geo_weights)
            else:
                loss_data = data_loss(self.garment_layer, pred_verts, gt_verts)

            # create mesh for predicted and smpl mesh
            pred_mesh = [
                tm.from_tensors(vertices=v, faces=self.garment_f_torch)
                for v in pred_verts
            ]
            gt_mesh = [
                tm.from_tensors(vertices=v, faces=self.garment_f_torch)
                for v in gt_verts
            ]

            loss_lap = lap_loss(pred_mesh, gt_mesh)

            # calculate normal for gt, pred and body
            loss_norm, body_normals, pred_normals = normal_loss(
                self.bs, pred_mesh, gt_mesh, body_mesh, self.num_faces)

            # interpenetration loss
            loss_interp = interp_loss(self.sideddistance,
                                      self.relu,
                                      pred_verts,
                                      gt_verts,
                                      body_tmp,
                                      body_normals,
                                      self.layer_size,
                                      d_tol=self.d_tol)

            loss = loss_data + 100. * loss_lap + local_regu + loss_interp  # loss_norm

        loss.backward()
        self.optimizer.step()
        return loss
Exemplo n.º 25
0
def get_kaolinmesh_from_trimesh(tri_mesh: trimesh.Trimesh):
    vertices = torch.tensor(tri_mesh.vertices, dtype=torch.float)
    faces = torch.tensor(tri_mesh.faces, dtype=torch.long)
    return TriangleMesh.from_tensors(vertices, faces)
Exemplo n.º 26
0
def fit_SMPLXD(scans,
               smpl_pkl=None,
               gender='male',
               save_path=None,
               display=False):
    # Get SMPL faces
    sp = SmplPaths(gender=gender)
    smpl_faces = sp.get_faces()
    th_faces = torch.tensor(smpl_faces.astype('float32'),
                            dtype=torch.long).cuda()

    # Batch size
    batch_sz = len(scans)

    # Init SMPL
    if smpl_pkl is None or smpl_pkl[0] is None:
        print('SMPL not specified, fitting SMPL now')
        pose, betas, trans = fit_SMPLX(scans, None, gender, save_path, display)
    else:
        pose, betas, trans = [], [], []
        for spkl in smpl_pkl:
            smpl_dict = pkl.load(open(spkl, 'rb'), encoding='latin-1')
            p, b, t = smpl_dict['pose'], smpl_dict['betas'], smpl_dict['trans']
            pose.append(p)
            if len(b) == 10:
                temp = np.zeros((300, ))
                temp[:10] = b
                b = temp.astype('float32')
            betas.append(b)
            trans.append(t)
        pose, betas, trans = np.array(pose), np.array(betas), np.array(trans)

    betas, pose, trans = torch.tensor(betas), torch.tensor(pose), torch.tensor(
        trans)
    smplx = th_batch_SMPLX(batch_sz, betas, pose, trans, faces=th_faces).cuda()

    verts, _, _, _ = smplx()
    init_smplx_meshes = [
        tm.from_tensors(vertices=v.clone().detach(), faces=smplx.faces)
        for v in verts
    ]

    # Load scans
    th_scan_meshes = []
    for scan in scans:
        th_scan = tm.from_obj(scan)
        if save_path is not None:
            th_scan.save_mesh(join(save_path, split(scan)[1]))
        th_scan.vertices = th_scan.vertices.cuda()
        th_scan.faces = th_scan.faces.cuda()
        th_scan.vertices.requires_grad = False
        th_scan_meshes.append(th_scan)

    # Optimize
    optimize_offsets(th_scan_meshes, smplx, init_smplx_meshes, 5, 10)
    print('Done')

    verts, _, _, _ = smplx()
    th_SMPLX_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(
            th_SMPLX_meshes,
            [join(save_path, n.replace('.obj', '_smpld.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])
        # Save params
        for p, b, t, d, n in zip(smplx.pose.cpu().detach().numpy(),
                                 smplx.betas.cpu().detach().numpy(),
                                 smplx.trans.cpu().detach().numpy(),
                                 smplx.offsets.cpu().detach().numpy(), names):
            smpl_dict = {'pose': p, 'betas': b, 'trans': t, 'offsets': d}
            pkl.dump(
                smpl_dict,
                open(join(save_path, n.replace('.obj', '_smpld.pkl')), 'wb'))

    return smplx.pose.cpu().detach().numpy(), smplx.betas.cpu().detach().numpy(), \
           smplx.trans.cpu().detach().numpy(), smplx.offsets.cpu().detach().numpy()
Exemplo n.º 27
0
def fit_SMPLX(scans,
              scan_labels,
              gender='male',
              save_path=None,
              scale_file=None,
              display=None,
              interpenetration=True):
    """
    :param save_path:
    :param scans: list of scan paths
    :param pose_files:
    :return:
    """
    search_tree = None
    pen_distance = None
    tri_filtering_module = None
    max_collisions = 128
    df_cone_height = 0.0001
    point2plane = False
    penalize_outside = True
    part_segm_fn = '/home/chen/IPNet_SMPLX/assets/smplx_parts_segm.pkl'
    ign_part_pairs = ["9,16", "9,17", "6,16", "6,17", "1,2", "12,22"]
    if interpenetration:
        from mesh_intersection.bvh_search_tree import BVH
        import mesh_intersection.loss as collisions_loss
        from mesh_intersection.filter_faces import FilterFaces
        search_tree = BVH(max_collisions=max_collisions)

        pen_distance = collisions_loss.DistanceFieldPenetrationLoss(
            sigma=df_cone_height,
            point2plane=point2plane,
            vectorized=True,
            penalize_outside=penalize_outside)
        if part_segm_fn:
            part_segm_fn = os.path.expandvars(part_segm_fn)
            with open(part_segm_fn, 'rb') as faces_parents_file:
                face_segm_data = pkl.load(faces_parents_file,
                                          encoding='latin1')
            faces_segm = face_segm_data['segm']
            faces_parents = face_segm_data['parents']
            tri_filtering_module = FilterFaces(
                faces_segm=faces_segm,
                faces_parents=faces_parents,
                ign_part_pairs=ign_part_pairs).cuda()

    # Get SMPLX faces
    # spx = SmplPaths(gender=gender)
    spx = SMPLX(model_path="/home/chen/SMPLX/models/smplx",
                batch_size=1,
                gender=gender)
    smplx_faces = spx.faces
    th_faces = torch.tensor(smplx_faces.astype('float32'),
                            dtype=torch.long).to(DEVICE)

    # Load SMPLX parts
    part_labels = pkl.load(
        open('/home/chen/IPNet_SMPLX/assets/smplx_parts_dense.pkl', 'rb'))
    labels = np.zeros((10475, ), dtype='int32')
    for n, k in enumerate(part_labels):
        labels[part_labels[k]] = n
    labels = torch.tensor(labels).unsqueeze(0).to(DEVICE)

    # Load scan parts
    scan_part_labels = []
    for sc_l in scan_labels:
        temp = torch.tensor(np.load(sc_l).astype('int32')).to(DEVICE)
        scan_part_labels.append(temp)

    # Batch size
    batch_sz = len(scans)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    # prior = get_prior(gender=gender, precomputed=True)
    if gender == 'male':
        temp_model = pkl.load(open(
            '/home/chen/SMPLX/models/smplx/SMPLX_MALE.pkl', 'rb'),
                              encoding='latin1')
    elif gender == 'female':
        temp_model = pkl.load(open(
            '/home/chen/SMPLX/models/smplx/SMPLX_FEMALE.pkl', 'rb'),
                              encoding='latin1')
    else:
        print('Wrong gender input!')
        exit()
    left_hand_mean = torch.tensor(temp_model['hands_meanl']).unsqueeze(0)
    right_hand_mean = torch.tensor(temp_model['hands_meanr']).unsqueeze(0)
    # pose_init = torch.zeros((batch_sz, 69))
    # TODO consider to add the prior for smplx
    # pose_init[:, 3:] = prior.mean
    # betas, pose, trans = torch.zeros((batch_sz, 300)), pose_init, torch.zeros((batch_sz, 3))
    betas, global_pose, body_pose, trans = torch.zeros(
        (batch_sz, 10)), torch.zeros((batch_sz, 3)), torch.zeros(
            (batch_sz, 63)), torch.zeros((batch_sz, 3))
    left_hand_pose, right_hand_pose, expression, jaw_pose = left_hand_mean, right_hand_mean, torch.zeros(
        (batch_sz, 10)), torch.zeros((batch_sz, 3))
    leye_pose, reye_pose = torch.zeros((batch_sz, 3)), torch.zeros(
        (batch_sz, 3))
    # Init SMPLX, pose with mean smplx pose, as in ch.registration
    smplx = th_batch_SMPLX(batch_sz,
                           betas,
                           global_pose,
                           body_pose,
                           left_hand_pose,
                           right_hand_pose,
                           trans,
                           expression,
                           jaw_pose,
                           leye_pose,
                           reye_pose,
                           faces=th_faces,
                           gender=gender).to(DEVICE)
    smplx_part_labels = torch.cat([labels] * batch_sz, axis=0)

    th_scan_meshes, centers = [], []
    for scan in scans:
        print('scan path ...', scan)
        temp = Mesh(filename=scan)
        th_scan = tm.from_tensors(
            torch.tensor(temp.v.astype('float32'),
                         requires_grad=False,
                         device=DEVICE),
            torch.tensor(temp.f.astype('int32'),
                         requires_grad=False,
                         device=DEVICE).long())
        th_scan_meshes.append(th_scan)

    if scale_file is not None:
        for n, sc in enumerate(scale_file):
            dat = np.load(sc, allow_pickle=True)
            th_scan_meshes[n].vertices += torch.tensor(dat[1]).to(DEVICE)
            th_scan_meshes[n].vertices *= torch.tensor(dat[0]).to(DEVICE)

    # Optimize pose first
    optimize_pose_only(th_scan_meshes,
                       smplx,
                       pose_iterations,
                       pose_steps_per_iter,
                       scan_part_labels,
                       smplx_part_labels,
                       search_tree,
                       pen_distance,
                       tri_filtering_module,
                       display=None if display is None else 0)

    # Optimize pose and shape
    optimize_pose_shape(th_scan_meshes,
                        smplx,
                        iterations,
                        steps_per_iter,
                        scan_part_labels,
                        smplx_part_labels,
                        search_tree,
                        pen_distance,
                        tri_filtering_module,
                        display=None if display is None else 0)

    # verts, _, _, _ = smplx()
    verts = smplx()
    th_smplx_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(
            th_smplx_meshes,
            [join(save_path, n.replace('.ply', '_smplx.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])

        # Save params
        for g, bp, lh, rh, e, j, le, re, b, t, n in zip(
                smplx.global_pose.cpu().detach().numpy(),
                smplx.body_pose.cpu().detach().numpy(),
                smplx.left_hand_pose.cpu().detach().numpy(),
                smplx.right_hand_pose.cpu().detach().numpy(),
                smplx.expression.cpu().detach().numpy(),
                smplx.jaw_pose.cpu().detach().numpy(),
                smplx.leye_pose.cpu().detach().numpy(),
                smplx.reye_pose.cpu().detach().numpy(),
                smplx.betas.cpu().detach().numpy(),
                smplx.trans.cpu().detach().numpy(), names):
            smplx_dict = {
                'global_pose': g,
                'body_pose': bp,
                'left_hand_pose': lh,
                'right_hand_pose': rh,
                'expression': e,
                'jaw_pose': j,
                'leye_pose': le,
                'reye_pose': re,
                'betas': b,
                'trans': t
            }
            pkl.dump(
                smplx_dict,
                open(join(save_path, n.replace('.ply', '_smplx.pkl')), 'wb'))

        return (smplx.global_pose.cpu().detach().numpy(),
                smplx.body_pose.cpu().detach().numpy(),
                smplx.left_hand_pose.cpu().detach().numpy(),
                smplx.right_hand_pose.cpu().detach().numpy(),
                smplx.expression.cpu().detach().numpy(),
                smplx.jaw_pose.cpu().detach().numpy(),
                smplx.leye_pose.cpu().detach().numpy(),
                smplx.reye_pose.cpu().detach().numpy(),
                smplx.betas.cpu().detach().numpy(),
                smplx.trans.cpu().detach().numpy())
Exemplo n.º 28
0
def fit_SMPL(scans, scan_labels, gender='male', save_path=None, scale_file=None, display=None):
    """
    :param save_path:
    :param scans: list of scan paths
    :param pose_files:
    :return:
    """
    # Get SMPL faces
    sp = SmplPaths(gender=gender)
    smpl_faces = sp.get_faces()
    th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).to(DEVICE)

    # Load SMPL parts
    part_labels = pkl.load(open('/BS/bharat-3/work/IPNet/assets/smpl_parts_dense.pkl', 'rb'))
    labels = np.zeros((6890,), dtype='int32')
    for n, k in enumerate(part_labels):
        labels[part_labels[k]] = n
    labels = torch.tensor(labels).unsqueeze(0).to(DEVICE)

    # Load scan parts
    scan_part_labels = []
    for sc_l in scan_labels:
        temp = torch.tensor(np.load(sc_l).astype('int32')).to(DEVICE)
        scan_part_labels.append(temp)

    # Batch size
    batch_sz = len(scans)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    prior = get_prior(gender=gender, precomputed=True)
    pose_init = torch.zeros((batch_sz, 72))
    pose_init[:, 3:] = prior.mean
    betas, pose, trans = torch.zeros((batch_sz, 300)), pose_init, torch.zeros((batch_sz, 3))

    # Init SMPL, pose with mean smpl pose, as in ch.registration
    smpl = th_batch_SMPL(batch_sz, betas, pose, trans, faces=th_faces).to(DEVICE)
    smpl_part_labels = torch.cat([labels] * batch_sz, axis=0)

    th_scan_meshes, centers = [], []
    for scan in scans:
        print('scan path ...', scan)
        temp = Mesh(filename=scan)
        th_scan = tm.from_tensors(torch.tensor(temp.v.astype('float32'), requires_grad=False, device=DEVICE),
                                  torch.tensor(temp.f.astype('int32'), requires_grad=False, device=DEVICE).long())
        th_scan_meshes.append(th_scan)

    if scale_file is not None:
        for n, sc in enumerate(scale_file):
            dat = np.load(sc, allow_pickle=True)
            th_scan_meshes[n].vertices += torch.tensor(dat[1]).to(DEVICE)
            th_scan_meshes[n].vertices *= torch.tensor(dat[0]).to(DEVICE)

    # Optimize pose first
    optimize_pose_only(th_scan_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels, smpl_part_labels,
                       display=None if display is None else 0)

    # Optimize pose and shape
    optimize_pose_shape(th_scan_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                        display=None if display is None else 0)

    verts, _, _, _ = smpl()
    th_smpl_meshes = [tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts]

    if save_path is not None:
        if not exists(save_path):
            os.makedirs(save_path)

        names = [split(s)[1] for s in scans]

        # Save meshes
        save_meshes(th_smpl_meshes, [join(save_path, n.replace('.ply', '_smpl.obj')) for n in names])
        save_meshes(th_scan_meshes, [join(save_path, n) for n in names])

        # Save params
        for p, b, t, n in zip(smpl.pose.cpu().detach().numpy(), smpl.betas.cpu().detach().numpy(),
                              smpl.trans.cpu().detach().numpy(), names):
            smpl_dict = {'pose': p, 'betas': b, 'trans': t}
            pkl.dump(smpl_dict, open(join(save_path, n.replace('.ply', '_smpl.pkl')), 'wb'))

        return smpl.pose.cpu().detach().numpy(), smpl.betas.cpu().detach().numpy(), smpl.trans.cpu().detach().numpy()
Exemplo n.º 29
0
 def one(tri_mesh):
     vertices = torch.tensor(tri_mesh.vertices, dtype=torch.float)
     faces = torch.tensor(tri_mesh.faces, dtype=torch.long)
     return TriangleMesh.from_tensors(vertices, faces)