Exemple #1
0
    def test_ball_query_output_simple(self):
        device = get_random_cuda_device()
        N, P1, P2, K = 5, 8, 16, 4
        sphere = ico_sphere(level=2, device=device).extend(N)
        points_1 = sample_points_from_meshes(sphere, P1)
        points_2 = sample_points_from_meshes(sphere, P2) * 5.0
        radius = 6.0

        naive_out = self._ball_query_naive(
            points_1, points_2, lengths1=None, lengths2=None, K=K, radius=radius
        )
        cuda_out = ball_query(points_1, points_2, K=K, radius=radius)

        # All points should have N sample neighbors as radius is large
        # Zero is a valid index but can only be present once (i.e. no zero padding)
        naive_out_zeros = (naive_out.idx == 0).sum(dim=-1).max()
        cuda_out_zeros = (cuda_out.idx == 0).sum(dim=-1).max()
        self.assertTrue(naive_out_zeros == 0 or naive_out_zeros == 1)
        self.assertTrue(cuda_out_zeros == 0 or cuda_out_zeros == 1)

        # All points should now have zero sample neighbors as radius is small
        radius = 0.5
        naive_out = self._ball_query_naive(
            points_1, points_2, lengths1=None, lengths2=None, K=K, radius=radius
        )
        cuda_out = ball_query(points_1, points_2, K=K, radius=radius)
        naive_out_allzeros = (naive_out.idx == -1).all()
        cuda_out_allzeros = (cuda_out.idx == -1).sum()
        self.assertTrue(naive_out_allzeros)
        self.assertTrue(cuda_out_allzeros)
Exemple #2
0
def get_loss(mesh,
             trg_mesh,
             w_chamfer,
             w_edge,
             w_normal,
             w_laplacian,
             n_points=5000):
    # We sample 5k points from the surface of each mesh
    sample_trg = sample_points_from_meshes(trg_mesh, n_points)
    sample_src = sample_points_from_meshes(mesh, n_points)

    # We compare the two sets of pointclouds by computing (a) the chamfer loss

    loss_chamfer, _ = chamfer_distance(sample_trg, sample_src)

    # and (b) the edge length of the predicted mesh
    loss_edge = mesh_edge_loss(mesh)

    # mesh normal consistency
    loss_normal = mesh_normal_consistency(mesh)

    # mesh laplacian smoothing
    loss_laplacian = mesh_laplacian_smoothing(mesh, method="uniform")

    # Weighted sum of the losses
    loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian

    return loss
Exemple #3
0
def save_predictions(model, loader, output_dir):
    """
    This function is used save predicted and gt meshes
    """
    # Note that all eval runs on main process
    assert comm.is_main_process()
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module

    device = torch.device("cuda:0")
    os.makedirs(output_dir, exist_ok=True)

    for batch in tqdm.tqdm(loader):
        batch = loader.postprocess(batch, device)
        model_kwargs = {}
        module = model.module if hasattr(model, "module") else model
        if isinstance(module, VoxMeshMultiViewHead):
            model_kwargs["intrinsics"] = batch["intrinsics"]
            model_kwargs["extrinsics"] = batch["extrinsics"]
        if isinstance(module, VoxDepthHead):
            model_kwargs["masks"] = batch["masks"]
            if module.mvsnet is None:
                model_kwargs["depths"] = batch["depths"]
        model_outputs = model(batch["imgs"], **model_kwargs)

        # TODO: debug only
        # save_debug_predictions(batch, model_outputs)
        # continue

        pred_mesh = model_outputs["meshes_pred"][-1]
        gt_mesh = batch["meshes"]
        pred_mesh = pred_mesh.scale_verts(P2M_SCALE)
        gt_mesh = gt_mesh.scale_verts(P2M_SCALE)

        pred_points = sample_points_from_meshes(
            pred_mesh, NUM_PRED_SURFACE_SAMPLES, return_normals=False
        )
        gt_points = sample_points_from_meshes(
            gt_mesh, NUM_GT_SURFACE_SAMPLES, return_normals=False
        )

        pred_points = pred_points.cpu().detach().numpy()
        gt_points = gt_points.cpu().detach().numpy()

        batch_size = pred_points.shape[0]
        for batch_idx in range(batch_size):
            label, label_appendix = batch["id_strs"][batch_idx].split("-")[:2]
            pred_filename = os.path.join(
                output_dir, "{}_{}_predict.xyz".format(label, label_appendix)
            )
            gt_filename = os.path.join(
                output_dir, "{}_{}_ground.xyz".format(label, label_appendix)
            )

            np.savetxt(pred_filename, pred_points[batch_idx])
            np.savetxt(gt_filename, gt_points[batch_idx])
 def test_all_empty_meshes(self):
     """
     Check sample_points_from_meshes raises an exception if all meshes are
     invalid.
     """
     device = torch.device("cuda:0")
     verts1 = torch.tensor([], dtype=torch.float32, device=device)
     faces1 = torch.tensor([], dtype=torch.int64, device=device)
     meshes = Meshes(verts=[verts1, verts1, verts1], faces=[faces1, faces1, faces1])
     with self.assertRaises(ValueError) as err:
         sample_points_from_meshes(meshes, num_samples=100, return_normals=True)
     self.assertTrue("Meshes are empty." in str(err.exception))
Exemple #5
0
def plot_pointcloud(mesh, title=""):
    # Sample points uniformly from the surface of the mesh.
    points = sample_points_from_meshes(mesh, 5000)
    x, y, z = points.clone().detach().cpu().squeeze().unbind(1)
    """
    fig = go.Figure()
    fig = px.scatter_3d(
        x = x,
        y = y,
        z = z,
        labels={'x':'x', 'y':'y', 'z':'z'}
    )
    fig.update_layout(
        title_text = f"PointCloud: {title}"
    )
    fig.show()
    """

    # Matplot 3D scatter plot
    fig = plt.figure(figsize=(5, 5))
    ax = Axes3D(fig)
    ax.scatter3D(x, z, -y)
    ax.set_xlabel("x")
    ax.set_ylabel("z")
    ax.set_zlabel("y")
    ax.set_title(title)
    ax.view_init(190, 30)
    plt.show()
Exemple #6
0
    def postprocess(self, batch, device=None):
        if device is None:
            device = torch.device("cuda")
        imgs, meshes, points, normals, voxels, Ps, id_strs = batch
        imgs = imgs.to(device)
        if meshes is not None:
            meshes = meshes.to(device)
        if points is not None and normals is not None:
            points = points.to(device)
            normals = normals.to(device)
        else:
            points, normals = sample_points_from_meshes(
                meshes, num_samples=self.num_samples, return_normals=True
            )
        if voxels is not None:
            if torch.is_tensor(voxels):
                # We used cached voxels on disk, just cast and return
                voxels = voxels.to(device)
            else:
                # We got a list of voxel_coords, and need to compute voxels on-the-fly
                voxel_coords = voxels
                Ps = Ps.to(device)
                voxels = []
                for i, cur_voxel_coords in enumerate(voxel_coords):
                    cur_voxel_coords = cur_voxel_coords.to(device)
                    cur_voxels = self._voxelize(cur_voxel_coords, Ps[i])
                    voxels.append(cur_voxels)
                voxels = torch.stack(voxels, dim=0)

        if self.return_id_str:
            return imgs, meshes, points, normals, voxels, id_strs
        else:
            return imgs, meshes, points, normals, voxels
Exemple #7
0
def _sample_meshes(meshes, num_samples):
    """
    Helper to either sample points uniformly from the surface of a mesh
    (with normals), or take the verts of the mesh as samples.

    Inputs:
        - meshes: A MeshList
        - num_samples: An integer, or the string 'verts'

    Outputs:
        - verts: Either a Tensor of shape (N, S, 3) if we take the same number of
          samples from each mesh; otherwise a list of length N, whose ith element
          is a Tensor of shape (S_i, 3)
        - normals: Either a Tensor of shape (N, S, 3) or None if we take verts
          as samples.
    """
    if num_samples == "verts":
        normals = None
        if meshes.equisized:
            verts = meshes.verts_batch
        else:
            verts = meshes.verts_list
    else:
        verts, normals = sample_points_from_meshes(meshes,
                                                   num_samples,
                                                   return_normals=True)
    return verts, normals
def load_ShapeNet_pointclouds(data_path, split_path, codes_path, this_device):
    codes = torch.load(codes_path)
    #print(codes['latent_codes']['weight'].shape)
    split = json.load(open(split_path))
    object_list = get_objectnames_from_split(split)
    object_paths = []
    i = 0
    for object_name in object_list:
        object_paths.append(
            os.path.join(data_path, object_name,
                         "models/model_normalized.obj"))
    start1 = time.perf_counter()
    print(start1)
    print("Start loading of objects.")
    input_meshes = load_objs_as_meshes(object_paths,
                                       device=this_device,
                                       load_textures=False)
    loading_time = time.perf_counter() - start1
    print("Loading of objects finished. Time necessary to load " +
          str(len(object_paths)) + " objects is: " + str(loading_time) +
          " seconds.")
    print("Start sampling of points.")
    start2 = time.perf_counter()
    number_samples = 4096
    input_pointclouds = sample_points_from_meshes(input_meshes, number_samples)
    sampling_time = time.perf_counter() - start2
    print("Sampling of points finished. Time necessary to sample " +
          str(number_samples) + " points from " + str(len(object_paths)) +
          " objects each is: " + str(sampling_time) + " seconds.")

    return input_pointclouds, codes['latent_codes']['weight']
Exemple #9
0
    def forward(self, src_mesh):
        loss = 0

        # Sample from target meshes
        target_verts = sample_points_from_meshes(self.target_meshes, 3000)

        if self.consider_loss("chamfer"):
            loss_chamfer, _ = chamfer_distance(target_verts,
                                               src_mesh.verts_padded())
            loss += self.loss_weights["w_chamfer"] * loss_chamfer

        if self.consider_loss("edge"):
            loss_edge = mesh_edge_loss(
                src_mesh)  # and (b) the edge length of the predicted mesh
            loss += self.loss_weights["w_edge"] * loss_edge

        if self.consider_loss("normal"):
            loss_normal = mesh_normal_consistency(
                src_mesh)  # mesh normal consistency
            loss += self.loss_weights["w_normal"] * loss_normal

        if self.consider_loss("laplacian"):
            loss_laplacian = mesh_laplacian_smoothing(
                src_mesh, method="uniform")  # mesh laplacian smoothing
            loss += self.loss_weights["w_laplacian"] * loss_laplacian

        return loss
Exemple #10
0
def animate_pointcloud(mesh,
                       anim_file,
                       points_to_sample,
                       restore_anim=True,
                       is_mesh=True):
    if os.path.isfile(anim_file) and restore_anim:
        return anim_file
    frames = []
    for plot_i in range(24):
        if is_mesh:
            points = sample_points_from_meshes(mesh, points_to_sample)
            x, y, z = points.clone().detach().cpu().squeeze().unbind(1)
        else:
            x, y, z = mesh.unbind(1)
        fig = plt.figure(figsize=(5, 5))
        canvas = FigureCanvas(fig)
        ax = Axes3D(fig)
        ax.scatter3D(x, z, -y)
        ax.view_init(elev=190, azim=360 * (plot_i / 24))
        plt.axis('off')
        plt.close()
        canvas.draw()
        s, (width, height) = canvas.print_to_buffer()
        frames.append(np.frombuffer(s, np.uint8).reshape((height, width, 4)))
    imageio.mimsave(anim_file, frames, 'GIF', fps=8)
    return anim_file
Exemple #11
0
def get_plot3d_mesh_img(mesh,
                        title="plot mesh",
                        n_sample=500,
                        fig_size=(5, 5),
                        view_points=(190, 30)):
    points = sample_points_from_meshes(mesh, n_sample)
    x, y, z = points.clone().detach().cpu().squeeze().unbind(1)
    fig = plt.figure(figsize=fig_size)
    ax = Axes3D(fig)
    ax.scatter3D(x, z, -y)
    ax.set_xlabel('x')
    ax.set_ylabel('z')
    ax.set_zlabel('y')
    ax.set_title(title)
    ax.view_init(view_points[0], view_points[1])

    buffer = io.BytesIO()  # bufferを用意
    plt.savefig(buffer, format='png')  # bufferに保持
    buffer_np = np.frombuffer(buffer.getvalue(),
                              dtype=np.uint8)  # bufferからの読み出し
    buffer_cv = cv2.imdecode(buffer_np, 1)  # デコード
    buffer_cv = buffer_cv[:, :, ::-1]  # BGR->RGB
    #print( "buffer_cv.shape : ", buffer_cv.shape )
    img = Image.fromarray(buffer_cv)
    #print( "img : ", img)
    return img
Exemple #12
0
    def loss(self, data, epoch):

         
        pred = self.forward(data)  
        # embed()
        # loss_coef = max(1/(2**(epoch//10000)), 0.1)



        # CE_Loss = nn.CrossEntropyLoss()
        # ce_loss = CE_Loss(pred[0][-1][3], data['y_voxels'])
        weight = data['base_plane'].float().cuda()
        CE_Loss = nn.CrossEntropyLoss(reduction='none')
        ce_loss = CE_Loss(pred[0][-1][3], data['y_voxels'].cuda()) * weight
        ce_loss = ce_loss.mean()

        chamfer_loss = torch.tensor(0).float().cuda()
        edge_loss = torch.tensor(0).float().cuda()
        laplacian_loss = torch.tensor(0).float().cuda()
        normal_consistency_loss = torch.tensor(0).float().cuda()  

        for c in range(self.config.num_classes-1):
            target = data['surface_points'][c].cuda() 
            for k, (vertices, faces, _, _, _) in enumerate(pred[c][1:]):

                pred_mesh = Meshes(verts=list(vertices), faces=list(faces))
                pred_points = sample_points_from_meshes(pred_mesh, 3000)

                chamfer_loss +=  chamfer_distance(pred_points, target)[0]
                laplacian_loss +=   mesh_laplacian_smoothing(pred_mesh, method="uniform")
                normal_consistency_loss += mesh_normal_consistency(pred_mesh)
                edge_loss += mesh_edge_loss(pred_mesh)

            # vertices, faces, _, _, _ = pred[c][-1]
            # pred_mesh = Meshes(verts=list(vertices), faces=list(faces))
            # pred_points = sample_points_from_meshes(pred_mesh, 3000)
            #
            # chamfer_loss += chamfer_distance(pred_points, target)[0]*5
            # laplacian_loss += mesh_laplacian_smoothing(pred_mesh, method="uniform")*5
            # normal_consistency_loss += mesh_normal_consistency(pred_mesh)*5
            # edge_loss += mesh_edge_loss(pred_mesh)*5
            #
            # # chamfer_loss = chamfer_loss/2
            # # laplacian_loss = laplacian_loss/2
            # # normal_consistency_loss = normal_consistency_loss/2
            # # edge_loss = edge_loss/2

        loss = 1 * chamfer_loss + 1 * ce_loss + 0.1 * laplacian_loss + 1 * edge_loss + 0.1 * normal_consistency_loss
        # loss = 1 * chamfer_loss + 0.1 * laplacian_loss + 1 * edge_loss + 0.1 * normal_consistency_loss
        # loss = 1 * chamfer_loss + 0.1 * laplacian_loss + loss_coef * edge_loss + 0.1 * normal_consistency_loss

        log = {"loss": loss.detach(),
               "chamfer_loss": chamfer_loss.detach(), 
               # "loss_coef": loss_coef,
               "ce_loss": ce_loss.detach(),
               "normal_consistency_loss": normal_consistency_loss.detach(),
               "edge_loss": edge_loss.detach(),
               "laplacian_loss": laplacian_loss.detach()}
        return loss, log
    def test_verts_nan(self):
        num_verts = 30
        num_faces = 50
        for device in ["cpu", "cuda:0"]:
            for invalid in ["nan", "inf"]:
                verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
                # randomly assign an invalid type
                verts[torch.randperm(num_verts)[:10]] = float(invalid)
                faces = torch.randint(
                    num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
                )
                meshes = Meshes(verts=[verts], faces=[faces])

                with self.assertRaisesRegex(ValueError, "Meshes contain nan or inf."):
                    sample_points_from_meshes(
                        meshes, num_samples=100, return_normals=True
                    )
    def test_texture_sampling_cow(self):
        # test texture sampling for the cow example by converting
        # the cow mesh and its texture uv to a pointcloud with texture

        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        for text_type in ("uv", "atlas"):
            # Load mesh + texture
            if text_type == "uv":
                mesh = load_objs_as_meshes(
                    [obj_filename], device=device, load_textures=True, texture_wrap=None
                )
            elif text_type == "atlas":
                mesh = load_objs_as_meshes(
                    [obj_filename],
                    device=device,
                    load_textures=True,
                    create_texture_atlas=True,
                    texture_atlas_size=8,
                    texture_wrap=None,
                )

            points, normals, textures = sample_points_from_meshes(
                mesh, num_samples=50000, return_normals=True, return_textures=True
            )
            pointclouds = Pointclouds(points, normals=normals, features=textures)

            for pos in ("front", "back"):
                # Init rasterizer settings
                if pos == "back":
                    azim = 0.0
                elif pos == "front":
                    azim = 180
                R, T = look_at_view_transform(2.7, 0, azim)
                cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

                raster_settings = PointsRasterizationSettings(
                    image_size=512, radius=1e-2, points_per_pixel=1
                )

                rasterizer = PointsRasterizer(
                    cameras=cameras, raster_settings=raster_settings
                )
                compositor = NormWeightedCompositor()
                renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)
                images = renderer(pointclouds)

                rgb = images[0, ..., :3].squeeze().cpu()
                if DEBUG:
                    filename = "DEBUG_cow_mesh_to_pointcloud_%s_%s.png" % (
                        text_type,
                        pos,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
Exemple #15
0
    def __init__(self,
                 n_it: int,
                 param_group: str,
                 SMBLD: SMBLDMesh,
                 target_meshes: Meshes,
                 mesh_names=[],
                 name="optimise",
                 loss_weights=None,
                 lr=1e-3,
                 lr_decay=1.0,
                 out_dir="static_fits_output",
                 custom_lrs=None):
        """
		n_its = integer, number of iterations in stage
		parameters = list of items over which to be optimised
		get_mesh = function that returns Mesh object for identifying losses
		name = name of stage

		lr_decay = factor by which lr decreases at each it"""

        self.n_it = n_it
        self.name = name
        self.out_dir = out_dir
        self.target_meshes = target_meshes
        self.mesh_names = mesh_names
        self.SMBLD = SMBLD

        self.loss_weights = default_weights.copy()
        if loss_weights is not None:
            for k, v in loss_weights.items():
                self.loss_weights[k] = v

        self.losses_to_plot = []  # Store losses for review later

        if custom_lrs is not None:
            for attr in custom_lrs:
                assert hasattr(SMBLD, attr), f"attr '{attr}' not in SMBLD."

        self.param_group = SMBLDMeshParamGroup(SMBLD, param_group, custom_lrs)

        self.optimizer = torch.optim.Adam(self.param_group, lr=lr)
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer, lambda epoch: lr *
            (lr_decay)**epoch)  # Decay of learning rate

        with torch.no_grad():
            self.prev_mesh = self.SMBLD.get_meshes()
            self.prev_verts, _ = SMBLD.get_verts(
            )  # Get template verts to use for ARAP method

        self.n_verts = self.prev_verts.shape[1]

        # Sample from target meshes - an equal number to the SMBLD mesh
        self.target_verts = sample_points_from_meshes(self.target_meshes, 3000)

        self.consider_loss = lambda loss_name: self.loss_weights[
            f"w_{loss_name}"] > 0  # function to check if loss is non-zero
Exemple #16
0
def compute_geometric_metrics(pred_mesh_path, gdth_mesh_path):

    with torch.no_grad():
        pred_mesh = trimesh.load(pred_mesh_path)
        gdth_mesh = trimesh.load(gdth_mesh_path)

        pred_vertices = pred_mesh.vertices
        gdth_vertices = gdth_mesh.vertices

        pred_pts = pred_mesh.sample(10000)
        gdth_pts = gdth_mesh.sample(10000)

        pred_vertices -= np.mean(pred_pts, axis=0)
        gdth_vertices -= np.mean(gdth_pts, axis=0)

        pred_vertices /= np.max(np.linalg.norm(pred_vertices, axis=1))
        gdth_vertices /= np.max(np.linalg.norm(gdth_vertices, axis=1))

        #     sph = regular_on_sphere_points(1024)
        #     both =  np.vstack((np.vstack((pred_vertices, gdth_vertices)), sph))
        #     np.savetxt('test.xyz',both)

        pred_vertices = torch.from_numpy(pred_vertices).float()
        gdth_vertices = torch.from_numpy(gdth_vertices).float()

        pred_faces = torch.from_numpy(pred_mesh.faces)
        gdth_faces = torch.from_numpy(gdth_mesh.faces)

        pred_mesh = Meshes(verts=[pred_vertices], faces=[pred_faces])
        gdth_mesh = Meshes(verts=[gdth_vertices], faces=[gdth_faces])

        pred_points, pred_normals = sample_points_from_meshes(
            pred_mesh, num_samples=10000, return_normals=True)
        gt_points, gt_normals = sample_points_from_meshes(gdth_mesh,
                                                          num_samples=10000,
                                                          return_normals=True)

        metrics = _compute_sampling_metrics(pred_points,
                                            pred_normals,
                                            gt_points,
                                            gt_normals,
                                            eps=1e-8)

    return metrics
def get_deform_verts(target_mesh, points_to_sample=5000, sphere_level=4):
    device = torch.device("cuda:0")

    src_mesh = ico_sphere(sphere_level, device)

    deform_verts = torch.full(src_mesh.verts_packed().shape,
                              0.0,
                              device=device,
                              requires_grad=True)

    learning_rate = 0.01
    num_iter = 500
    w_chamfer = 1.0
    w_edge = 0.05
    w_normal = 0.0005
    w_laplacian = 0.005

    optimizer = torch.optim.Adam([deform_verts],
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))

    for _ in range(num_iter):
        optimizer.zero_grad()

        new_src_mesh = src_mesh.offset_verts(deform_verts)

        sample_trg = sample_points_from_meshes(target_mesh, points_to_sample)
        sample_src = sample_points_from_meshes(new_src_mesh, points_to_sample)

        loss_chamfer, _ = chamfer_distance(sample_trg, sample_src)
        loss_edge = mesh_edge_loss(new_src_mesh)
        loss_normal = mesh_normal_consistency(new_src_mesh)
        loss_laplacian = mesh_laplacian_smoothing(new_src_mesh,
                                                  method="uniform")
        loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian

        loss.backward()
        optimizer.step()
    print(
        f"{datetime.now()} Loss Chamfer:{loss_chamfer * w_chamfer}, Loss Edge:{loss_edge * w_edge}, Loss Normal:{loss_normal * w_normal}, Loss Laplacian:{loss_laplacian * w_laplacian}"
    )

    return deform_verts
Exemple #18
0
def create_model(cfg, device, mode="train", camera_model=None, **kwargs):
    ''' Returns model

    Args:
        cfg (edict): imported yaml config
        device (device): pytorch device
    '''
    if cfg.model.type == 'point':
        decoder = None

    texture = None
    use_lighting = (cfg.renderer is not None
                    and not cfg.renderer.get('is_neural_texture', True))
    if use_lighting:
        texture = LightingTexture()
    else:
        if 'rgb' not in cfg.model.decoder_kwargs.out_dims:
            Texture = get_class_from_string(cfg.model.texture_type)
            cfg.model.texture_kwargs[
                'c_dim'] = cfg.model.decoder_kwargs.out_dims.get('latent', 0)
            texture_decoder = Texture(**cfg.model.texture_kwargs)
        else:
            texture_decoder = decoder
            logger_py.info("Decoder used as NeuralTexture")

        texture = NeuralTexture(
            view_dependent=cfg.model.texture_kwargs.view_dependent,
            decoder=texture_decoder).to(device=device)
        logger_py.info("Created NeuralTexture {}".format(texture.__class__))
        logger_py.info(texture)

    Model = get_class_from_string("DSS.models.{}_modeling.Model".format(
        cfg.model.type))

    # if not using decoder, then use non-parameterized point renderer
    # create icosphere as initial point cloud
    sphere_mesh = ico_sphere(level=4)
    sphere_mesh.scale_verts_(0.5)
    points, normals = sample_points_from_meshes(
        sphere_mesh,
        num_samples=int(cfg['model']['model_kwargs']['n_points_per_cloud']),
        return_normals=True)
    colors = torch.ones_like(points)
    renderer = create_renderer(cfg.renderer).to(device)
    model = Model(
        points,
        normals,
        colors,
        renderer,
        device=device,
        texture=texture,
        **cfg.model.model_kwargs,
    ).to(device=device)

    return model
Exemple #19
0
    def run(self):

        
        deform_verts = torch.full(self.src.verts_packed().shape, 0.0, device=device, requires_grad=True)
        optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)


        Niter = 2000
        w_chamfer = 1.0
        w_edge = 1.0
        w_normal =  0.01
        w_laplacian = 0.1

        


        for i in range(Niter):
            
        
            optimizer.zero_grad()

            new_src_mesh = self.src.offset_verts(deform_verts)

            
            sampmle_trg = sample_points_from_meshes(self.target, 5000)
            sample_src = sample_points_from_meshes(new_src_mesh, 5000)


            loss_chamfer, _ = chamfer_distance(sampmle_trg, sample_src)
            loss_edge = mesh_edge_loss(new_src_mesh)
            loss_normal = mesh_normal_consistency(new_src_mesh)
            loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform")

            #weighted sum of the losses
            loss = loss_chamfer*w_chamfer + loss_edge*w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian
            

            loss.backward()
            optimizer.step()
            print('total_loss = %.6f' % loss)
            self.backwarded.emit(new_src_mesh.verts_packed())
def plot_pointcloud(mesh, title=""):
    # Sample points uniformly from the surface of the mesh.
    points = sample_points_from_meshes(mesh, 5000)
    x, y, z = points.clone().detach().cpu().squeeze().unbind(1)
    fig = plt.figure(figsize=(5, 5))
    ax = Axes3D(fig)
    ax.scatter3D(x, z, -y)
    ax.set_xlabel('x')
    ax.set_ylabel('z')
    ax.set_zlabel('y')
    ax.set_title(title)
    ax.view_init(190, 30)
    plt.show()
def save_plot3d_mesh_img( mesh, file_path, title = "plot mesh", n_sample = 500, fig_size = (5,5), view_points = (190,30) ):
    points = sample_points_from_meshes(mesh, n_sample)
    x, y, z = points.clone().detach().cpu().squeeze().unbind(1)    
    fig = plt.figure(figsize=fig_size)
    ax = Axes3D(fig)
    ax.scatter3D(x, z, -y)
    ax.set_xlabel('x')
    ax.set_ylabel('z')
    ax.set_zlabel('y')
    ax.set_title(title)
    ax.view_init(view_points[0], view_points[1])
    plt.savefig( file_path, dpi = 200, bbox_inches = 'tight' )
    return
    def test_texture_sampling(self):
        device = torch.device("cuda:0")
        batch_size = 6
        # verts
        verts = torch.rand((batch_size, 6, 3),
                           device=device,
                           dtype=torch.float32)
        verts[:, :3, 2] = 1.0
        verts[:, 3:, 2] = -1.0
        # textures
        texts = torch.rand((batch_size, 6, 3),
                           device=device,
                           dtype=torch.float32)
        # faces
        faces = torch.tensor([[0, 1, 2], [3, 4, 5]],
                             device=device,
                             dtype=torch.int64)
        faces = faces.view(1, 2, 3).expand(batch_size, -1, -1)

        meshes = Meshes(verts=verts,
                        faces=faces,
                        textures=TexturesVertex(texts))

        num_samples = 24
        samples, normals, textures = sample_points_from_meshes(
            meshes,
            num_samples=num_samples,
            return_normals=True,
            return_textures=True)

        textures_naive = torch.zeros((batch_size, num_samples, 3),
                                     dtype=torch.float32,
                                     device=device)
        for n in range(batch_size):
            for i in range(num_samples):
                p = samples[n, i]
                if p[2] > 0.0:  # sampled from 1st face
                    v0, v1, v2 = verts[n, 0, :2], verts[n, 1, :2], verts[n,
                                                                         2, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 0], texts[n, 1], texts[n, 2]
                else:  # sampled from 2nd face
                    v0, v1, v2 = verts[n, 3, :2], verts[n, 4, :2], verts[n,
                                                                         5, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 3], texts[n, 4], texts[n, 5]

                tt = w0 * t0 + w1 * t1 + w2 * t2
                textures_naive[n, i] = tt

        self.assertClose(textures, textures_naive)
Exemple #23
0
    def postprocess(self, batch, device=None):
        if device is None:
            device = torch.device("cuda")
        non_standard_keys = ["points", "normals", "voxels", "id_strs"]

        # process standard items
        processed_batch = {
            key: (value.to(device) if value is not None else None)
            for key, value in batch.items()
            if key not in non_standard_keys
        }

        # process non-standard items
        if batch["points"] is not None and batch["normals"] is not None:
            processed_batch["points"] = batch["points"].to(device)
            processed_batch["normals"] = batch["normals"].to(device)
        else:
            processed_batch["points"], processed_batch["normals"] = \
                sample_points_from_meshes(
                    batch["meshes"], num_samples=self.num_samples,
                    return_normals=True
                )
        if batch["voxels"] is not None:
            if torch.is_tensor(batch["voxels"]):
                # We used cached voxels on disk, just cast and return
                processed_batch["voxels"] = batch["voxels"].to(device)
                # TODO: need to transform voxel grid to all views
                raise NotImplementedError(
                    "need to transform voxel grid to all views"
                )
            else:
                # We got a list of voxel_coords, and need to compute voxels on-the-fly
                voxel_coords = batch["voxels"]
                voxels = []
                for batch_idx, cur_voxel_coords in enumerate(voxel_coords):
                    cur_voxel_coords = cur_voxel_coords.to(device)
                    voxels_views = []
                    K = batch["intrinsics"][batch_idx]
                    # find voxel grid in all views coordinate frames
                    for view_idx, transform in \
                            enumerate(batch["extrinsics"][batch_idx].unbind(0)):
                        P = K.matmul(transform)
                        cur_voxels = self._voxelize(cur_voxel_coords, P)
                        voxels_views.append(cur_voxels)
                    voxels.append(torch.stack(voxels_views, dim=0))
                processed_batch["voxels"] = torch.stack(voxels, dim=0)

        if self.return_id_str:
            processed_batch["id_strs"] = batch["id_strs"]

        return processed_batch
Exemple #24
0
    def forward(self, voxel_scores, meshes_pred, voxels_gt, meshes_gt):
        """
        Args:
          meshes_pred: Meshes
          meshes_gt: Either Meshes, or a tuple (points_gt, normals_gt)

        Returns:
          loss (float): Torch scalar giving the total loss, or None if an error occured and
                we should skip this loss. TODO use an exception instead?
          losses (dict): A dictionary mapping loss names to Torch scalars giving their
                        (unweighted) values.
        """
        # Sample from meshes_gt if we haven't already
        if isinstance(meshes_gt, tuple):
            points_gt, normals_gt = meshes_gt
        else:
            points_gt, normals_gt = sample_points_from_meshes(
                meshes_gt,
                num_samples=self.gt_num_samples,
                return_normals=True)

        total_loss = torch.tensor(0.0).to(points_gt)
        losses = {}

        if voxel_scores is not None and voxels_gt is not None and self.voxel_weight > 0:
            voxels_gt = voxels_gt.float()
            voxel_loss = F.binary_cross_entropy_with_logits(
                voxel_scores, voxels_gt)
            total_loss = total_loss + self.voxel_weight * voxel_loss
            losses["voxel"] = voxel_loss

        if isinstance(meshes_pred, Meshes):
            meshes_pred = [meshes_pred]
        elif meshes_pred is None:
            meshes_pred = []

        # Now assume meshes_pred is a list
        if not self.skip_mesh_loss:
            for i, cur_meshes_pred in enumerate(meshes_pred):
                cur_out = self._mesh_loss(cur_meshes_pred, points_gt,
                                          normals_gt)
                cur_loss, cur_losses = cur_out
                if total_loss is None or cur_loss is None:
                    total_loss = None
                else:
                    total_loss = total_loss + cur_loss / len(meshes_pred)
                for k, v in cur_losses.items():
                    losses["%s_%d" % (k, i)] = v

        return total_loss, losses
Exemple #25
0
def compute_geometric_metrics_points(pred_mesh_path, gdth_mesh_path, rot=None):

    with torch.no_grad():
        pred_mesh = trimesh.load(pred_mesh_path)
        gt_points = np.load(gdth_mesh_path + '/points.npy')
        gt_normals = np.load(gdth_mesh_path + '/normals.npy')
        mask = np.random.randint(0, gt_points.shape[0], 10000)
        gt_points = gt_points[mask, :]
        gt_normals = gt_normals[mask, :]

        if rot is not None:
            r = R.from_euler(rot[0], rot[1], degrees=True)
            gt_points = r.apply(gt_points)
            gt_normals = r.apply(gt_normals)

        # make to unit scale and shift to origin
        gt_points -= np.mean(gt_points, axis=0)
        gt_points /= np.max(np.linalg.norm(gt_points, axis=1))

        #         pred_vertices = pred_mesh.verts_list()[0]
        pred_vertices = pred_mesh.vertices
        pred_pts = pred_mesh.sample(10000)
        pred_vertices -= np.mean(pred_pts, axis=0)
        pred_vertices /= np.max(np.linalg.norm(pred_vertices, axis=1))

        #         sph = regular_on_sphere_points(1024)
        #         both =  np.vstack((np.vstack((pred_vertices, gt_points)), sph))
        #         np.savetxt('test.xyz',both)

        pred_vertices = torch.from_numpy(pred_vertices).float()
        pred_faces = torch.from_numpy(pred_mesh.faces)
        pred_mesh = Meshes(verts=[pred_vertices], faces=[pred_faces])

        pred_points, pred_normals = sample_points_from_meshes(
            pred_mesh, num_samples=10000, return_normals=True)

        pred_points = pred_points
        pred_normals = pred_normals
        gt_points = torch.from_numpy(gt_points).type_as(pred_points).unsqueeze(
            0)
        gt_normals = torch.from_numpy(gt_normals).type_as(
            pred_points).unsqueeze(0)

        metrics = _compute_sampling_metrics(pred_points,
                                            pred_normals,
                                            gt_points,
                                            gt_normals,
                                            eps=1e-8)

    return metrics
Exemple #26
0
    def validation_epoch_end(self, outputs):
        log_mean = {"log": {}}
        for k in outputs[0]["log"].keys():
            log_mean["log"][k] = torch.stack([x["log"][k]
                                              for x in outputs]).mean()

        log_mean['val_loss'] = torch.stack([x["val_loss"]
                                            for x in outputs]).mean()

        # Compute chamfer Loss
        if self.cfg.experiment.chamfer_loss and isinstance(
                self.val_dataset, SynthesizableDataset):
            assert self.val_dataset.target_mesh is not None, "To compute the " \
                "chamfer loss, a target mesh .obj must be provided in the dataset folder"

            # Target model to query based on the grid
            model = self.get_model()

            # Read the input 3D model
            vertices, faces, _, _, _, _ = extract_geometry(
                model, self.device, None)

            # We construct a Meshes structure for the target mesh
            input_mesh = create_mesh(vertices, faces)

            # Sparse sampling
            target_samples = sample_points_from_meshes(
                self.val_dataset.target_mesh,
                self.cfg.experiment.chamfer_sampling_size)
            input_samples = sample_points_from_meshes(
                input_mesh, self.cfg.experiment.chamfer_sampling_size)

            chamfer_loss, _ = chamfer_distance(target_samples, input_samples)
            log_mean["log"]["validation/chamfer_loss"] = chamfer_loss

        return log_mean
Exemple #27
0
    def _mesh_loss(self, meshes_pred, points_gt, normals_gt):
        """
        Args:
          meshes_pred: Meshes containing N meshes
          points_gt: Tensor of shape NxPx3
          normals_gt: Tensor of shape NxPx3

        Returns:
          total_loss (float): The sum of all losses specific to meshes
          losses (dict): All (unweighted) mesh losses in a dictionary
        """
        device = meshes_pred.verts_list()[0].device
        zero = torch.tensor(0.0).to(device)
        losses = {"chamfer": zero, "normal": zero, "edge": zero}
        if self.upsample_pred_mesh:
            points_pred, normals_pred = sample_points_from_meshes(
                meshes_pred,
                num_samples=self.pred_num_samples,
                return_normals=True)
        else:
            points_pred = meshes_pred.verts_list()
            normals_pred = meshes_pred.verts_normals_list()

        total_loss = torch.tensor(0.0).to(device)
        if points_pred is None or points_gt is None:
            # Sampling failed, so return None
            total_loss = None
            which = "predictions" if points_pred is None else "GT"
            logger.info("WARNING: Sampling %s failed" % (which))
            return total_loss, losses

        losses = {}
        cham_loss, normal_loss = chamfer_distance(points_pred,
                                                  points_gt,
                                                  x_normals=normals_pred,
                                                  y_normals=normals_gt)

        total_loss = total_loss + self.chamfer_weight * cham_loss
        total_loss = total_loss + self.normal_weight * normal_loss
        losses["chamfer"] = cham_loss
        losses["normal"] = normal_loss

        edge_loss = mesh_edge_loss(meshes_pred)
        total_loss = total_loss + self.edge_weight * edge_loss
        losses["edge"] = edge_loss

        return total_loss, losses
Exemple #28
0
def create_data(folder_path='meshes/',
                nb_of_pointclouds=50,
                nb_of_points=5000,
                sphere_level=4,
                normalize_data=True):
    device = torch.device("cuda:0")

    data_path = os.path.join(os.getcwd(), folder_path)
    src_mesh = ico_sphere(sphere_level, device)

    for filename in os.listdir(data_path):
        print(f"{datetime.now()} Starting:{filename}")
        file_path = os.path.join(data_path, filename)
        cur_mesh = utils.load_mesh(file_path)
        cur_deform_verts = deformation.get_deform_verts(
            cur_mesh, nb_of_points, sphere_level)
        data_verts = np.expand_dims(cur_deform_verts.detach().cpu().numpy(),
                                    axis=0)
        data_input = None
        data_output = None
        for _ in range(nb_of_pointclouds):
            data_a = sample_points_from_meshes(
                cur_mesh, nb_of_points).squeeze().cpu().numpy()
            if normalize_data:
                data_a = data_a - np.mean(data_a, axis=0)
                data_a = data_a / np.max(data_a, axis=0)
                data_a_sort_indices = np.argsort(np.linalg.norm(data_a,
                                                                axis=1))
                data_a = data_a[data_a_sort_indices]
            data_a = np.expand_dims(data_a, axis=0)
            data_input = data_a if data_input is None else np.concatenate(
                (data_input, data_a))
            data_output = data_verts if data_output is None else np.concatenate(
                (data_output, data_verts))
        np.save(f'data/{os.path.splitext(filename)[0]}_input.npy', data_input)
        np.save(f'data/{os.path.splitext(filename)[0]}_output.npy',
                data_output)
        deformed_mesh = src_mesh.offset_verts(cur_deform_verts)
        final_verts, final_faces = deformed_mesh.get_mesh_verts_faces(0)
        final_obj = os.path.join(
            'deformed_meshes/',
            f'{os.path.splitext(filename)[0]}_deformed.obj')
        save_obj(final_obj, final_verts, final_faces)
        print(
            f"{datetime.now()} Finished:{filename}, Point Cloud Shape:{data_input.shape} Deform Verts Shape:{data_output.shape}"
        )
Exemple #29
0
def validate_training_AE(validation_generator, model):
    '''
    This function is used to calculate validation loss during training NMF AE
    '''
    print("Validating model......")
    with torch.no_grad():
        total_loss = 0
        items = 0
        for input,_,_ in validation_generator: 
            input = input.cuda()  
            _, _, pred2, face = model(input)   # Point prediction after each deform block and face information (refer figure 4 in paper)
            mesh_p2 = Meshes(verts = pred2, faces = face)  # Construct Differentiable mesh M_p2
            pts2 = sample_points_from_meshes(mesh_p2,num_samples=2562)  # Differentiably sample random points from mesh surfaces
            loss,_ = chamfer_distance(pts2,  input)
            total_loss+=loss.item()
            items+=1
            
    return total_loss/items   # Return average validation loss
    def test_outputs(self):

        for add_texture in (True, False):
            meshes = TestSamplePoints.init_meshes(
                device=torch.device("cuda:0"), add_texture=add_texture
            )
            out1 = sample_points_from_meshes(meshes, num_samples=100)
            self.assertTrue(torch.is_tensor(out1))

            out2 = sample_points_from_meshes(
                meshes, num_samples=100, return_normals=True
            )
            self.assertTrue(isinstance(out2, tuple) and len(out2) == 2)

            if add_texture:
                out3 = sample_points_from_meshes(
                    meshes, num_samples=100, return_textures=True
                )
                self.assertTrue(isinstance(out3, tuple) and len(out3) == 2)

                out4 = sample_points_from_meshes(
                    meshes, num_samples=100, return_normals=True, return_textures=True
                )
                self.assertTrue(isinstance(out4, tuple) and len(out4) == 3)
            else:
                with self.assertRaisesRegex(
                    ValueError, "Meshes do not contain textures."
                ):
                    sample_points_from_meshes(
                        meshes, num_samples=100, return_textures=True
                    )

                with self.assertRaisesRegex(
                    ValueError, "Meshes do not contain textures."
                ):
                    sample_points_from_meshes(
                        meshes,
                        num_samples=100,
                        return_normals=True,
                        return_textures=True,
                    )