def test_load_obj(device='cpu'):
    mesh = TriangleMesh.from_obj(('tests/model.obj'))
    if device == 'cuda':
        mesh.cuda()
    assert mesh.vertices.shape[0] > 0
    assert mesh.vertices.shape[1] == 3
    assert mesh.faces.shape[0] > 0
    assert mesh.faces.shape[1] == 3

    mesh = TriangleMesh.from_obj('tests/model.obj',
                                 with_vt=True,
                                 texture_res=4)
    if device == 'cuda':
        mesh.cuda()
    assert mesh.textures.shape[0] > 0

    mesh = TriangleMesh.from_obj('tests/model.obj',
                                 with_vt=True,
                                 texture_res=4,
                                 enable_adjacency=True)
    assert mesh.vv.shape[0] > 0
    assert mesh.edges.shape[0] > 0
    assert mesh.vv_count.shape[0] > 0
    assert mesh.ve.shape[0] > 0
    assert mesh.ve_count.shape[0] > 0
    assert mesh.ff.shape[0] > 0
    assert mesh.ff_count.shape[0] > 0
    assert mesh.ef.shape[0] > 0
    assert mesh.ef_count.shape[0] > 0
    assert mesh.ee.shape[0] > 0
    assert mesh.ee_count.shape[0] > 0
    if device == 'cuda':
        mesh.cuda()
Пример #2
0
def test_laplacian_loss(device='cpu'):
    mesh1 = TriangleMesh.from_obj('tests/model.obj')
    mesh2 = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh1.cuda()
        mesh2.cuda()
    mesh2.vertices = mesh2.vertices * 1.5
    assert kal.metrics.mesh.laplacian_loss(mesh1, mesh2) > 0
    assert kal.metrics.mesh.laplacian_loss(mesh1, mesh1) == 0
Пример #3
0
def test_chamfer_distance(device='cpu'):
    mesh1 = TriangleMesh.from_obj('tests/model.obj')
    mesh2 = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh1.cuda()
        mesh2.cuda()

    mesh2.vertices = mesh2.vertices * 1.5
    distance = kal.metrics.mesh.chamfer_distance(mesh1, mesh2, num_points=100)
    distance = kal.metrics.mesh.chamfer_distance(mesh1, mesh2, num_points=200)
    assert kal.metrics.mesh.chamfer_distance(mesh1, mesh1,
                                             num_points=500) <= 0.1
def test_load_and_save_Tensors(device='cpu'):
    mesh1 = TriangleMesh.from_obj(('tests/model.obj'))
    if device == 'cuda':
        mesh1.cuda()
    mesh1.save_tensors('copy.npz')
    assert os.path.isfile('copy.npz')
    mesh2 = TriangleMesh.load_tensors('copy.npz')
    if device == 'cuda':
        mesh2.cuda()
    assert (torch.abs(mesh1.vertices - mesh2.vertices)).sum() == 0
    assert (torch.abs(mesh1.faces - mesh2.faces)).sum() == 0
    os.remove("copy.npz")
def test_compute_laplacian(device='cpu'):
    mesh = TriangleMesh.from_obj(('tests/model.obj'))
    if device == 'cuda':
        mesh.cuda()
    lap = mesh.compute_laplacian()
    assert ((lap**2).sum(dim=1) >
            .1).sum() == 0  # asserting laplacian of sphere is small
def ball_pivot_surface_reconstruction(points: torch.Tensor) -> TriangleMesh:
    points = points.detach().cpu() if points.requires_grad else points.cpu()

    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points.cpu())
    pcd.estimate_normals()

    distances = pcd.compute_nearest_neighbor_distance()
    avg_dist = np.mean(distances)
    radius = 1 * avg_dist

    recon_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
        pcd,
        o3d.utility.DoubleVector([radius, radius * 2, radius * 4, radius * 8]))

    vertices = torch.tensor(recon_mesh.vertices, dtype=torch.float)
    faces = torch.tensor(recon_mesh.triangles, dtype=torch.long)

    faces_ex = faces.clone()
    faces_ex[..., 1] = faces[..., 2]
    faces_ex[..., 2] = faces[..., 1]
    faces = torch.cat([faces, faces_ex], 0)

    recon_mesh = TriangleMesh.from_tensors(vertices, faces)

    return recon_mesh
Пример #7
0
def generate_trimesh(npoints, device='cpu'):
    verts = torch.ones([npoints, 3], dtype=torch.float32)
    verts = verts.cumsum(dim=0) / npoints
    faces = torch.arange(npoints * 3, dtype=torch.int32).view(npoints, 3)
    m = M.from_tensors(verts, faces)
    m.to(device)
    return m
Пример #8
0
def test_check_sign_fast(device='cuda'):
	mesh = TriangleMesh.from_obj('tests/model.obj')
	mesh.to(device)
	points = torch.rand(1000, 3).to(device) - .5
	signs = kal.rep.SDF.check_sign_fast(mesh, points)
	assert (signs == True).float().sum() > 0
	assert (signs == False).sum() > 0
Пример #9
0
def generate_trimesh(npoints, device='cpu'):
    verts = torch.ones([npoints,3], dtype=torch.float32, requires_grad=True)
    verts = verts.cumsum(dim=0) / npoints
    faces = torch.arange(npoints, dtype=torch.int64).repeat(3).view(npoints,3)
    m = M.from_tensors(verts, faces)
    m.to(device)
    return m
Пример #10
0
def forward_step(th_scan_meshes, smpl, th_pose_3d=None):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """
    # Get pose prior
    prior = get_prior(smpl.gender)

    # forward
    verts, _, _, _ = smpl()
    th_smpl_meshes = [
        tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts
    ]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
    loss['betas'] = torch.mean(smpl.betas**2, axis=1)
    loss['pose_pr'] = prior(smpl.pose)
    if th_pose_3d is not None:
        loss['pose_obj'] = batch_get_pose_obj(th_pose_3d, smpl)
    return loss
Пример #11
0
def forward_step(th_scan_meshes, smplx, init_smplx_meshes, search_tree,
                 pen_distance, tri_filtering_module):
    """
    Performs a forward step, given smplx and scan meshes.
    Then computes the losses.
    """

    # forward
    # verts, _, _, _ = smplx()
    verts = smplx()
    th_SMPLX_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]
    p3d_meshes = Meshes(verts=verts, faces=smplx.faces.expand(1, -1, -1))
    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_SMPLX_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_SMPLX_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([
        laplacian_loss(sc, sm)
        for sc, sm in zip(init_smplx_meshes, th_SMPLX_meshes)
    ])
    loss['offsets'] = torch.mean(torch.mean(smplx.offsets**2, axis=1), axis=1)
    # loss['normal'] = mesh_normal_consistency(p3d_meshes).unsqueeze(0)
    # loss['interpenetration'] = interpenetration_loss(verts, smplx.faces, search_tree, pen_distance, tri_filtering_module, 1.0)
    return loss
Пример #12
0
def forward_step(th_scan_meshes, smpl, init_smpl_meshes):
    """
    Performs a forward step, given smpl and scan meshes.
    Then computes the losses.
    """

    # forward

    verts, _, _, _ = smpl()
    th_smpl_meshes = [
        tm.from_tensors(vertices=v, faces=smpl.faces) for v in verts
    ]
    # p3d_meshes = Meshes(verts=verts, faces=smpl.faces.expand(1,-1,-1))
    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(
        [sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
    loss['m2s'] = batch_point_to_surface(
        [sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([
        laplacian_loss(sc, sm)
        for sc, sm in zip(init_smpl_meshes, th_smpl_meshes)
    ])
    # lap1 = init_smpl_meshes[0].compute_laplacian()
    # lap2 = th_smpl_meshes[0].compute_laplacian()
    # loss['lap'] = (torch.sum((lap1 - lap2) ** 2, 1) * lap_weight).sum().unsqueeze(0)
    loss['offsets'] = torch.mean(torch.mean(smpl.offsets**2, axis=1), axis=1)
    # loss['edge'] = (edge_length(th_smpl_meshes[0]) - edge_length(init_smpl_meshes[0])).unsqueeze(0)
    # loss['normal'] = mesh_normal_consistency(p3d_meshes).unsqueeze(0)
    # loss['pen'] = interpenetration_loss(verts, smpl.faces.expand(1,-1,-1), search_tree, pen_distance, tri_filtering_module)
    return loss
Пример #13
0
    def __init__(self, mesh_path, image_path, args):
        super(Model, self).__init__()

        self.args = args

        ###########################
        # Load mesh
        ###########################

        mesh = TriangleMesh.from_obj(mesh_path)
        mesh.cuda()
        # Normalize into unit cube, and expand such that batch size = 1
        vertices = normalize_vertices(mesh.vertices).unsqueeze(0)
        faces = mesh.faces.unsqueeze(0)

        self.register_buffer('vertices', vertices)
        self.register_buffer('faces', faces)

        ###########################
        # Initialize texture (NMR format)
        ###########################

        textures = torch.ones(1,
                              self.faces.shape[1],
                              self.args.texture_size,
                              self.args.texture_size,
                              self.args.texture_size,
                              3,
                              dtype=torch.float32,
                              device='cuda')
        self.register_buffer('textures', textures)

        ###########################
        # Load target image
        ###########################

        image_ref = torch.from_numpy(
            (imread(image_path)[:, :, :3].max(-1) > 0.1).astype(
                np.float32))[None, ::]
        self.register_buffer('image_ref', image_ref)

        from skimage.io import imsave
        imsave(image_path + '.test.png',
               image_ref.numpy().transpose((1, 2, 0)))

        ###########################
        # Initialize camera position
        ###########################

        self.camera_position = nn.Parameter(
            torch.from_numpy(np.array(INITIAL_CAMERA_POS, dtype=np.float32)))

        ###########################
        # Setup renderer
        ###########################

        renderer = Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer
Пример #14
0
 def _load_points(obj_path):
     mesh = TriangleMesh.from_obj(obj_path)
     mesh.vertices -= torch.mean(mesh.vertices, 0)
     mesh.vertices /= 128
     mesh.vertices = mesh.vertices[:, [0, 2, 1]]
     mesh.vertices[:, 0] *= -1
     mesh.vertices /= 1.7
     return mesh.sample(2048)[0]
Пример #15
0
def test_pointcloud_to_voxelgrid(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()
    pts, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 1000)

    voxels = kal.conversions.pointcloud_to_voxelgrid(pts, 32, 0.1)
    assert (voxels.shape == (32, 32, 32))
def load_cuboid():
    obj_path = os.path.dirname(__file__) + '/objects/sphere.obj'
    mesh = TriangleMesh.from_obj(obj_path)
    mesh.vertices -= torch.mean(mesh.vertices, 0)  # zero center
    mesh.vertices /= torch.mean(torch.norm(mesh.vertices,
                                           dim=1))  # normalize length
    mesh.to(DEVICE)
    return mesh
def test_laplacian_smoothing(device='cpu'):
    mesh = TriangleMesh.from_obj(('tests/model.obj'))
    if device == 'cuda':
        mesh.cuda()
    v1 = mesh.vertices.clone()
    mesh.laplacian_smoothing(iterations=3)
    v2 = mesh.vertices.clone()
    assert (torch.abs(v1 - v2)).sum() > 0
def test_sample_mesh(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()

    points, choices = mesh.sample(100)
    assert (set(points.shape) == set([100, 3]))
    points, choices = mesh.sample(10000)
    assert (set(points.shape) == set([10000, 3]))
Пример #19
0
def test_to_trianglemesh_to_pointcloud(device):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()

    points, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 10)
    assert (set(points.shape) == set([10, 3]))
    points, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 10000)
    assert (set(points.shape) == set([10000, 3]))
def test_from_tensors(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj',
                                 with_vt=True,
                                 texture_res=4)
    if device == 'cuda':
        mesh.cuda()

    verts = mesh.vertices.clone()
    faces = mesh.faces.clone()
    uvs = mesh.uvs.clone()
    face_textures = mesh.face_textures.clone()
    textures = mesh.textures.clone()

    mesh = TriangleMesh.from_tensors(verts,
                                     faces,
                                     uvs=uvs,
                                     face_textures=face_textures,
                                     textures=textures)
Пример #21
0
def test_trianglemesh_to_sdf(device):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()
    print(mesh.device)
    sdf = kal.conversions.trianglemesh_to_sdf(mesh)
    distances = sdf(torch.rand(100, 3).to(device) - .5)
    assert (set(distances.shape) == set([100]))
    assert ((distances > 0).sum()) > 0
    assert ((distances < 0).sum()) > 0
Пример #22
0
def test_edge_length(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()
    length1 = kal.metrics.mesh.edge_length(mesh)
    mesh.vertices = mesh.vertices * 2
    length2 = kal.metrics.mesh.edge_length(mesh)
    assert (length1 < length2)
    mesh.vertices = mesh.vertices * 0
    assert kal.metrics.mesh.edge_length(mesh) == 0
def test_adj_computations(device='cpu'):
    mesh = TriangleMesh.from_obj(('tests/model.obj'))
    if device == 'cuda':
        mesh.cuda()

    adj_full = mesh.compute_adjacency_matrix_full()
    adj_sparse = mesh.compute_adjacency_matrix_sparse().coalesce()

    assert adj_full.shape[0] == mesh.vertices.shape[0]
    assert ((adj_full - adj_sparse.to_dense()) != 0).sum() == 0
Пример #24
0
def main():
    args = parse_arguments()

    ###########################
    # Load mesh
    ###########################

    mesh = TriangleMesh.from_obj(args.mesh)
    vertices = mesh.vertices
    faces = mesh.faces.int()

    # Expand such that batch size = 1

    vertices = vertices[None, :, :].cuda()
    faces = faces[None, :, :].cuda()

    ###########################
    # Normalize mesh position
    ###########################

    vertices_max = vertices.max()
    vertices_min = vertices.min()
    vertices_middle = (vertices_max + vertices_min) / 2.
    vertices = (vertices - vertices_middle) * MESH_SIZE

    ###########################
    # Generate vertex color
    ###########################

    vert_min = torch.min(vertices, dim=1, keepdims=True)[0]
    vert_max = torch.max(vertices, dim=1, keepdims=True)[0]
    colors = (vertices - vert_min) / (vert_max - vert_min)

    ###########################
    # Render
    ###########################

    renderer = Renderer(HEIGHT, WIDTH, mode='VertexColor')

    loop = tqdm.tqdm(list(range(0, 360, 4)))
    loop.set_description('Drawing')

    os.makedirs(args.output_path, exist_ok=True)
    writer = imageio.get_writer(os.path.join(args.output_path, 'example.gif'), mode='I')
    for azimuth in loop:
        renderer.set_look_at_parameters([90 - azimuth],
                                        [CAMERA_ELEVATION],
                                        [CAMERA_DISTANCE])

        predictions, _, _ = renderer(points=[vertices, faces[0].long()], colors=[colors])
        image = predictions.detach().cpu().numpy()[0]
        writer.append_data((image * 255).astype(np.uint8))

    writer.close()
Пример #25
0
def test_trianglemesh_to_voxelgrid(device):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    if device == 'cuda':
        mesh.cuda()
    voxels = kal.conversions.trianglemesh_to_voxelgrid(mesh,
                                                       32,
                                                       normalize='unit')
    assert (set(voxels.shape) == set([32, 32, 32]))
    voxels = kal.conversions.trianglemesh_to_voxelgrid(mesh,
                                                       64,
                                                       normalize='unit')
    assert (set(voxels.shape) == set([64, 64, 64]))
Пример #26
0
def main():
    filename_input = os.path.join(data_dir, 'banana.obj')
    filename_output = os.path.join(output_directory, 'example1.gif')

    ###########################
    # camera settings
    ###########################
    camera_distance = 2
    elevation = 30

    ###########################
    # load object
    ###########################
    mesh = TriangleMesh.from_obj(filename_input)
    vertices = mesh.vertices
    faces = mesh.faces.int()
    face_textures = (faces).clone()

    vertices = vertices[None, :, :].cuda()
    faces = faces[None, :, :].cuda()
    face_textures[None, :, :].cuda()

    ###########################
    # normalize verts
    ###########################
    vertices_max = vertices.max()
    vertices_min = vertices.min()
    vertices_middle = (vertices_max + vertices_min) / 2.
    vertices = vertices - vertices_middle

    coef = 5
    vertices = vertices * coef

    ###########################
    # Soft Rasterizer
    ###########################
    textures = torch.ones(1, faces.shape[1], 2, 3, dtype=torch.float32).cuda()
    mesh = sr.Mesh(vertices, faces, textures)
    renderer = sr.SoftRenderer(camera_mode='look_at')
    loop = tqdm.tqdm(list(range(0, 360, 4)))
    loop.set_description('Drawing SR')
    writer = imageio.get_writer(os.path.join(output_directory_sr,
                                             'rotation.gif'),
                                mode='I')
    for azimuth in loop:
        mesh.reset_()
        renderer.transform.set_eyes_from_angles(camera_distance, elevation,
                                                azimuth)
        images = renderer.render_mesh(mesh)
        image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))
        writer.append_data((255 * image).astype(np.uint8))
    writer.close()
Пример #27
0
def split_meshes(meshes, features, index, angle=70):
    # compute faces to split
    faces_to_split = compute_splitting_faces(meshes,
                                             index,
                                             angle,
                                             show=(index == 1))
    # split mesh with selected faces
    new_verts, new_faces, new_face_archive, new_face_list, new_features = split_info(
        meshes, faces_to_split, features, index)
    new_mesh = TriangleMesh.from_tensors(new_verts, new_faces)
    new_mesh_i = TriangleMesh.from_tensors(new_verts, new_faces)
    # compute new adj matrix
    new_adj = new_mesh.compute_adjacency_matrix_full().clone()
    new_adj = normalize_adj(new_adj)
    # update the meshes dictionary
    meshes['init'].append(new_mesh)
    meshes['update'].append(new_mesh_i)
    meshes['adjs'].append(new_adj)
    meshes['face_lists'].append(new_face_list)
    meshes['face_archive'].append(new_face_archive)

    return new_features
Пример #28
0
def main():
    filename_input = os.path.join(data_dir, 'banana.obj')
    filename_output = os.path.join(output_directory, 'example1.gif')

    ###########################
    # camera settings
    ###########################
    camera_distance = 2
    elevation = 30

    ###########################
    # load object
    ###########################
    mesh = TriangleMesh.from_obj(filename_input)
    vertices = mesh.vertices
    faces = mesh.faces.int()
    face_textures = (faces).clone()

    vertices = vertices[None, :, :].cuda()
    faces = faces[None, :, :].cuda()
    face_textures[None, :, :].cuda()

    ###########################
    # normalize verts
    ###########################
    vertices_max = vertices.max()
    vertices_min = vertices.min()
    vertices_middle = (vertices_max + vertices_min) / 2.
    vertices = vertices - vertices_middle

    coef = 5
    vertices = vertices * coef

    ###########################
    # DIB-Renderer
    ###########################
    renderer = Dib_Renderer(256, 256, mode='VertexColor')
    textures = torch.ones(1, vertices.shape[1], 3).cuda()
    loop = tqdm.tqdm(list(range(0, 360, 4)))
    loop.set_description('Drawing Dib_Renderer VertexColor')
    writer = imageio.get_writer(os.path.join(output_directory_dib,
                                             'rotation_VertexColor.gif'),
                                mode='I')
    for azimuth in loop:
        renderer.set_look_at_parameters([90 - azimuth], [elevation],
                                        [camera_distance])
        predictions, _, _ = renderer.forward(
            points=[vertices, faces[0].long()], colors=[textures])
        image = predictions.detach().cpu().numpy()[0]
        writer.append_data((image * 255).astype(np.uint8))
    writer.close()
Пример #29
0
def forward_step(th_scan_meshes,
                 smplx,
                 scan_part_labels,
                 smplx_part_labels,
                 search_tree=None,
                 pen_distance=None,
                 tri_filtering_module=None):
    """
    Performs a forward step, given smplx and scan meshes.
    Then computes the losses.
    """
    # Get pose prior
    prior = get_prior(smplx.gender, precomputed=True)

    # forward
    # verts, _, _, _ = smplx()
    verts = smplx()
    th_smplx_meshes = [
        tm.from_tensors(vertices=v, faces=smplx.faces) for v in verts
    ]

    scan_verts = [sm.vertices for sm in th_scan_meshes]
    smplx_verts = [sm.vertices for sm in th_smplx_meshes]

    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface(scan_verts, th_smplx_meshes)
    loss['m2s'] = batch_point_to_surface(smplx_verts, th_scan_meshes)
    loss['betas'] = torch.mean(smplx.betas**2, axis=1)
    # loss['pose_pr'] = prior(smplx.pose)
    loss['interpenetration'] = interpenetration_loss(verts, smplx.faces,
                                                     search_tree, pen_distance,
                                                     tri_filtering_module, 1.0)
    loss['part'] = []
    for n, (sc_v, sc_l) in enumerate(zip(scan_verts, scan_part_labels)):
        tot = 0
        for i in range(NUM_PARTS):  # we currently use 14 parts
            if i not in sc_l:
                continue
            ind = torch.where(sc_l == i)[0]
            sc_part_points = sc_v[ind].unsqueeze(0)
            sm_part_points = smplx_verts[n][torch.where(
                smplx_part_labels[n] == i)[0]].unsqueeze(0)
            dist = chamfer_distance(sc_part_points,
                                    sm_part_points,
                                    w1=1.,
                                    w2=1.)
            tot += dist
        loss['part'].append(tot / NUM_PARTS)
    loss['part'] = torch.stack(loss['part'])
    return loss
Пример #30
0
def test_point_to_surface(device='cpu'):
    torch.manual_seed(1)
    torch.cuda.manual_seed(1)
    mesh = TriangleMesh.from_obj('tests/model.obj')
    points = torch.rand(500, 3) - .5
    if device == 'cuda':
        mesh.cuda()
        points = points.cuda()

    distance = kal.metrics.mesh.point_to_surface(points, mesh)

    assert (distance > 1).sum() == 0
    assert (distance <= 0).sum() == 0
    assert (distance.sum() <= .2)