def plot_reconstruction(x, patch_uvs, patch_tx, patch_models, scale=1.0): """ Plot a dense, upsampled point cloud :param x: A [n, 3] tensor containing the input point cloud :param patch_uvs: A list of tensors, each of shape [n_i, 2] of UV positions for the given patch :param patch_tx: A list of tuples (t_i, s_i, r_i) of transformations (t_i is a translation, s_i is a scaling, and r_i is a rotation matrix) which map the points in a neighborhood to a centered and whitened point set :param patch_models: A list of neural networks representing the lifting function for each chart in the atlas :param scale: Scale parameter to sample uv values from a smaller or larger subset of [0, 1]^2 (i.e. scale*[0, 1]^2) :return: A list of tensors, each of shape [n_i, 3] where each tensor is the average prediction of the overlapping charts a the samples """ from mayavi import mlab with torch.no_grad(): for i in range(len(patch_models)): n = 128 translate_i, scale_i, rotate_i = patch_tx[i] uv_i = utils.meshgrid_from_lloyd_ts(patch_uvs[i].cpu().numpy(), n, scale=scale).astype(np.float32) uv_i = torch.from_numpy(uv_i).to(patch_uvs[0]) y_i = patch_models[i](uv_i) mesh_v = ((y_i.squeeze() @ rotate_i.transpose(0, 1)) / scale_i - translate_i).cpu().numpy() mesh_f = utils.meshgrid_face_indices(n) mlab.triangular_mesh(mesh_v[:, 0], mesh_v[:, 1], mesh_v[:, 2], mesh_f, color=(0.2, 0.2, 0.8)) mlab.points3d(x[:, 0], x[:, 1], x[:, 2], scale_factor=0.001) mlab.show()
def upsample_surface(patch_uvs, patch_tx, patch_models, devices, scale=1.0, num_samples=8, normal_samples=64, compute_normals=True): vertices = [] normals = [] with torch.no_grad(): for i in range(len(patch_models)): if (i + 1) % 10 == 0: print("Upsamling %d/%d" % (i+1, len(patch_models))) device = devices[i % len(devices)] n = num_samples translate_i, scale_i, rotate_i = (patch_tx[i][j].to(device) for j in range(len(patch_tx[i]))) uv_i = utils.meshgrid_from_lloyd_ts(patch_uvs[i].cpu().numpy(), n, scale=scale).astype(np.float32) uv_i = torch.from_numpy(uv_i).to(patch_uvs[i]) y_i = patch_models[i](uv_i) mesh_v = ((y_i.squeeze() @ rotate_i.transpose(0, 1)) / scale_i - translate_i).cpu().numpy() if compute_normals: mesh_f = utils.meshgrid_face_indices(n) mesh_n = pcu.per_vertex_normals(mesh_v, mesh_f) normals.append(mesh_n) vertices.append(mesh_v) vertices = np.concatenate(vertices, axis=0).astype(np.float32) if compute_normals: normals = np.concatenate(normals, axis=0).astype(np.float32) else: print("Fixing normals...") normals = pcu.estimate_normals(vertices, k=normal_samples) return vertices, normals
def plot_reconstruction(patch_uvs, patch_tx, patch_models, scale=1.0): from mayavi import mlab with torch.no_grad(): for i in range(len(patch_models)): n = 128 translate_i, scale_i, rotate_i = patch_tx[i] uv_i = utils.meshgrid_from_lloyd_ts(patch_uvs[i].cpu().numpy(), n, scale=scale).astype(np.float32) uv_i = torch.from_numpy(uv_i).to(patch_uvs[0]) y_i = patch_models[i](uv_i) mesh_v = ((y_i.squeeze() @ rotate_i.transpose(0, 1)) / scale_i - translate_i).cpu().numpy() mesh_f = utils.meshgrid_face_indices(n) mlab.triangular_mesh(mesh_v[:, 0], mesh_v[:, 1], mesh_v[:, 2], mesh_f, color=(0.2, 0.2, 0.8)) mlab.show()
def export_reconstruction(patch_uvs, patch_tx, patch_models, scale=1.0): from mayavi import mlab with torch.no_grad(): for i in range(len(patch_models)): n = 128 translate_i, scale_i, rotate_i = patch_tx[i] uv_i = utils.meshgrid_from_lloyd_ts(patch_uvs[i].cpu().numpy(), n, scale=scale).astype(np.float32) uv_i = torch.from_numpy(uv_i).to(patch_uvs[0]) y_i = patch_models[i](uv_i) mesh_v = ((y_i.squeeze() @ rotate_i.transpose(0, 1)) / scale_i - translate_i).cpu().numpy() mesh_f = utils.meshgrid_face_indices(n) print("The size of mesh vertices is ") print(mesh_v.shape) print("The size of mesh faces is ") print(mesh_f.shape) output_mesh(mesh_v, mesh_f, 'output%d.obj'%(i))
def plot_reconstruction(uv, x, transform, model, pad=1.0): from mayavi import mlab with torch.no_grad(): n = 128 translate, scale, rotate = transform uv_dense = utils.meshgrid_from_lloyd_ts(uv.cpu().numpy(), n, scale=pad).astype(np.float32) uv_dense = torch.from_numpy(uv_dense).to(uv) y_dense = model(uv_dense) # x = ((x.squeeze() @ rotate.transpose(0, 1)) / scale - translate).cpu().numpy() # mesh_v = ((y_dense.squeeze() @ rotate.transpose(0, 1)) / scale - translate).cpu().numpy() x = x.squeeze().cpu().numpy() mesh_v = y_dense.squeeze().cpu().numpy() mesh_f = utils.meshgrid_face_indices(n) mlab.points3d(x[:, 0], x[:, 1], x[:, 2], scale_factor=0.01) mlab.triangular_mesh(mesh_v[:, 0], mesh_v[:, 1], mesh_v[:, 2], mesh_f, color=(0.2, 0.2, 0.8)) mlab.show()
def plot_reconstruction(patch_uvs, patch_tx, patch_models, scale=1.0): from mayavi import mlab with open("output.obj", 'w') as out: with torch.no_grad(): start_ind = 1 for i in range(len(patch_models)): n = 128 translate_i, scale_i, rotate_i = patch_tx[i] uv_i = utils.meshgrid_from_lloyd_ts(patch_uvs[i].cpu().numpy(), n, scale=scale).astype( np.float32) uv_i = torch.from_numpy(uv_i).to(patch_uvs[0]) y_i = patch_models[i](uv_i) mesh_v = ( (y_i.squeeze() @ rotate_i.transpose(0, 1)) / scale_i - translate_i).cpu().numpy() mesh_f = utils.meshgrid_face_indices(n) mlab.triangular_mesh(mesh_v[:, 0], mesh_v[:, 1], mesh_v[:, 2], mesh_f, color=(0.2, 0.2, 0.8)) for iv in range(0, mesh_v.shape[0]): out.write('v %f %f %f\n' % (mesh_v[iv, 0], mesh_v[iv, 1], mesh_v[iv, 2])) for fi in range(0, mesh_f.shape[0]): out.write('f %d %d %d\n' % (mesh_f[fi, 0] + start_ind, mesh_f[fi, 1] + start_ind, mesh_f[fi, 2] + start_ind)) start_ind += mesh_v.shape[0] mlab.show()