Ejemplo n.º 1
0
    def close(self):
        # close writers
        for key, gw in self.gif_writers.items():
            if isinstance(gw, str):
                continue

            gw.close()
            self.gif_writers[key] = join(self.folder_locs[key],
                                         f"camera_simulation_{key}.gif")
        # generate json file for the render
        json_dict = self._dict()
        json_file = join(self.folder_locs["base"], "info.json")
        with open(json_file, mode="w") as f:
            json.dump(json_dict, f)

        if self.pred_results:
            results_file = join(self.folder_locs["base"],
                                "reconstruction_results.json")
            with open(results_file, mode="w") as f:
                json.dump(self.pred_results, f)

        if "predicted_mesh" in self.__dict__.keys():
            for mesh, name in self.predicted_mesh:
                mesh_path = join(self.folder_locs["base"],
                                 f"predicted_mesh{name}.obj")
                verts, faces = mesh.get_mesh_verts_faces(0)
                save_obj(mesh_path, verts, faces)

        if "pose_plot" in self.__dict__.keys():
            for plot, name in self.pose_plot:
                plot_path = join(self.folder_locs["base"],
                                 f"pose_plot_{name}.png")
                plot.savefig(plot_path, dpi=plot.dpi)
Ejemplo n.º 2
0
def postprocess_data(cached_pred_poses, output_dir_mesh, cfg, device,
                     recompute_meshes):
    input_dir_img = cfg['dataset']['input_dir_img']
    input_dir_mesh = cfg['dataset']['input_dir_mesh']

    # postprocessing each mesh/img in dataset
    refiner = MeshRefiner(cfg, device)
    loss_info = {}
    tqdm_out = utils.TqdmPrintEvery()
    for instance_name in tqdm(cached_pred_poses, file=tqdm_out):
        curr_obj_path = os.path.join(output_dir_mesh, instance_name + ".obj")
        if recompute_meshes or not os.path.exists(curr_obj_path):
            input_image = np.asarray(
                Image.open(os.path.join(input_dir_img,
                                        instance_name + ".png")))
            with torch.no_grad():
                mesh = utils.load_untextured_mesh(
                    os.path.join(input_dir_mesh, instance_name + ".obj"),
                    device)
            pred_dist = cached_pred_poses[instance_name]['dist']
            pred_elev = cached_pred_poses[instance_name]['elev']
            pred_azim = cached_pred_poses[instance_name]['azim']

            curr_refined_mesh, curr_loss_info = refiner.refine_mesh(
                mesh, input_image, pred_dist, pred_elev, pred_azim)
            save_obj(curr_obj_path, curr_refined_mesh.verts_packed(),
                     curr_refined_mesh.faces_packed())
            loss_info[instance_name] = curr_loss_info

    return loss_info
Ejemplo n.º 3
0
 def _bm_load_obj(verts: torch.Tensor, faces: torch.Tensor,
                  decimal_places: int):
     f = StringIO()
     save_obj(f, verts, faces, decimal_places)
     s = f.getvalue()
     # Recreate stream so it's unaffected by how it was created.
     return lambda: load_obj(StringIO(s))
Ejemplo n.º 4
0
 def test_save_obj(self):
     verts = torch.tensor(
         [
             [0.01, 0.2, 0.301],
             [0.2, 0.03, 0.408],
             [0.3, 0.4, 0.05],
             [0.6, 0.7, 0.8],
         ],
         dtype=torch.float32,
     )
     faces = torch.tensor([[0, 2, 1], [0, 1, 2], [3, 2, 1], [3, 1, 0]],
                          dtype=torch.int64)
     obj_file = StringIO()
     save_obj(obj_file, verts, faces, decimal_places=2)
     expected_file = "\n".join([
         "v 0.01 0.20 0.30",
         "v 0.20 0.03 0.41",
         "v 0.30 0.40 0.05",
         "v 0.60 0.70 0.80",
         "f 1 3 2",
         "f 1 2 3",
         "f 4 3 2",
         "f 4 2 1",
     ])
     actual_file = obj_file.getvalue()
     self.assertEqual(actual_file, expected_file)
Ejemplo n.º 5
0
def translate_mesh_on_axis(
    mesh, t: list, renderer, dist: float = 3.5, save: str = "", device: str = ""
):

    translation = Transform3d(device=device).translate(t[0], t[1], t[2])

    verts, faces = mesh.get_mesh_verts_faces(0)
    verts = translation.transform_points(verts)
    mesh = mesh.update_padded(verts.unsqueeze(0))

    dist = dist
    elev = 0
    azim = 0

    R, T = look_at_view_transform(dist=dist, elev=elev, azim=azim, device=device)

    image_ref = renderer(meshes_world=mesh, R=R, T=T, device=device)
    image_ref = image_ref.cpu().numpy()

    plt.imshow(image_ref.squeeze())
    plt.show()

    if save:
        verts, faces = mesh.get_mesh_verts_faces(0)
        save_obj(save, verts, faces)
    return mesh
Ejemplo n.º 6
0
    def run_on_images(self, imgs, sid, mid, iid, sampled_idx):
        dir1 = os.path.join(output_dir, str(sid), str(mid))
        if not os.path.exists(dir1):
            os.makedirs(dir1)

        deprocess = imagenet_deprocess(rescale_image=False)
        image_features = self.encoder(imgs)
        raw_features, generated_volume = self.decoder(image_features)
        generated_volume = self.merger(raw_features, generated_volume)
        generated_volume = self.refiner(generated_volume)

        mesh = cubify(generated_volume, 0.3)
        #         mesh = voxel_to_world(meshes)
        save_mesh = os.path.join(dir1, "%s_%s.obj" % (iid, sampled_idx))
        verts, faces = mesh.get_mesh_verts_faces(0)
        save_obj(save_mesh, verts, faces)

        generated_volume = generated_volume.squeeze()
        img = image_to_numpy(deprocess(imgs[0][0]))
        save_img = os.path.join(dir1, "%02d.png" % (iid))
        #         cv2.imwrite(save_img, img[:, :, ::-1])
        cv2.imwrite(save_img, img)
        img1 = image_to_numpy(deprocess(imgs[0][1]))
        save_img1 = os.path.join(dir1, "%02d.png" % (sampled_idx))
        cv2.imwrite(save_img1, img1)
        #         cv2.imwrite(save_img1, img1[:, :, ::-1])
        get_volume_views(generated_volume, dir1, iid, sampled_idx)
Ejemplo n.º 7
0
def rotate_mesh_around_axis(
    mesh, rot: list, renderer, dist: float = 3.5, save: str = "", device: str = ""
):

    if not device:
        device = torch.cuda.current_device()

    rot_x = RotateAxisAngle(rot[0], "X", device=device)
    rot_y = RotateAxisAngle(rot[1], "Y", device=device)
    rot_z = RotateAxisAngle(rot[2], "Z", device=device)

    rot = Transform3d(device=device).stack(rot_x, rot_y, rot_z)

    verts, faces = mesh.get_mesh_verts_faces(0)
    verts = rot_x.transform_points(verts)
    verts = rot_y.transform_points(verts)
    verts = rot_z.transform_points(verts)
    mesh = mesh.update_padded(verts.unsqueeze(0))

    dist = dist
    elev = 0
    azim = 0

    R, T = look_at_view_transform(dist=dist, elev=elev, azim=azim, device=device)

    image_ref = renderer(meshes_world=mesh, R=R, T=T, device=device)
    image_ref = image_ref.cpu().numpy()[..., :3]

    plt.imshow(image_ref.squeeze())
    plt.show()

    if save:
        verts, faces = mesh.get_mesh_verts_faces(0)
        save_obj(save, verts, faces)
    return mesh
Ejemplo n.º 8
0
def visualize_predictions(
    image_id,
    image_file,
    scores,
    labels,
    boxes,
    mask_rles,
    meshes,
    metadata,
    output_dir,
    alpha=0.6,
    dpi=200,
):

    # create vis_dir
    output_dir = os.path.join(output_dir, "results")
    os.makedirs(output_dir, exist_ok=True)

    cat_colors = metadata.thing_colors
    cat_names = metadata.thing_classes

    # read image
    image_file = os.path.join(metadata.image_root, image_file)
    image = detection_utils.read_image(image_file, format="RGB")

    num_preds = len(scores)

    for i in range(num_preds):
        # box
        box = boxes[i].view(1, 4)
        # RLE to 2D mask
        mask = mask_util.decode(mask_rles[i])

        label = labels[i]
        mask_color = np.array(cat_colors[label], dtype=np.float32)
        cat_name = cat_names[label]
        score = scores[i]

        # plot mask overlayed
        composite = image.copy()
        composite = draw_mask(composite,
                              mask,
                              mask_color,
                              alpha=alpha,
                              draw_contours=False)
        thickness = int(np.ceil(0.001 * image.shape[0]))
        composite = draw_boxes(composite, box, thickness)

        save_file = os.path.join(
            output_dir, "%d_%d_%s_%.3f.png" % (image_id, i, cat_name, score))
        cv2.imwrite(save_file, composite[:, :, ::-1])

        save_file = os.path.join(
            output_dir, "%d_%d_%s_%.3f.obj" % (image_id, i, cat_name, score))
        verts, faces = meshes.get_mesh_verts_faces(i)
        save_obj(save_file, verts, faces)
Ejemplo n.º 9
0
    def test_save_obj_invalid_indices(self):
        message_regex = "Faces have invalid indices"
        verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
        faces = torch.LongTensor([[0, 1, 2]])
        with self.assertWarnsRegex(UserWarning, message_regex):
            save_obj(StringIO(), verts, faces)

        faces = torch.LongTensor([[-1, 0, 1]])
        with self.assertWarnsRegex(UserWarning, message_regex):
            save_obj(StringIO(), verts, faces)
Ejemplo n.º 10
0
def visualize_prediction(image_id, img, mesh, output_dir):
    # create vis_dir
    output_dir = os.path.join(output_dir, "results_shapenet")
    os.makedirs(output_dir, exist_ok=True)

    save_img = os.path.join(output_dir, "%s.png" % (image_id))
    cv2.imwrite(save_img, img[:, :, ::-1])

    save_mesh = os.path.join(output_dir, "%s.obj" % (image_id))
    verts, faces = mesh.get_mesh_verts_faces(0)
    save_obj(save_mesh, verts, faces)
Ejemplo n.º 11
0
 def _test_save_load(self, verts, faces):
     f = StringIO()
     save_obj(f, verts, faces)
     f.seek(0)
     expected_verts, expected_faces = verts, faces
     if not len(expected_verts):  # Always compare with a (V, 3) tensor
         expected_verts = torch.zeros(size=(0, 3), dtype=torch.float32)
     if not len(expected_faces):  # Always compare with an (F, 3) tensor
         expected_faces = torch.zeros(size=(0, 3), dtype=torch.int64)
     actual_verts, actual_faces, _ = load_obj(f)
     self.assertClose(expected_verts, actual_verts)
     self.assertClose(expected_faces, actual_faces.verts_idx)
Ejemplo n.º 12
0
    def test_save_obj_invalid_indices(self):
        message_regex = "Faces have invalid indices"
        verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
        faces = torch.LongTensor([[0, 1, 2]])
        with self.assertWarnsRegex(UserWarning, message_regex):
            with NamedTemporaryFile(mode="w", suffix=".obj") as f:
                save_obj(Path(f.name), verts, faces)

        faces = torch.LongTensor([[-1, 0, 1]])
        with self.assertWarnsRegex(UserWarning, message_regex):
            with NamedTemporaryFile(mode="w", suffix=".obj") as f:
                save_obj(Path(f.name), verts, faces)
Ejemplo n.º 13
0
def save_merged_voxel_grids(
    file_prefix, voxel_scores, voxel_size, cubify_threshold
):
    """
    save merged voxel grids for debugging purpose
    """
    from pytorch3d.io import save_obj
    import open3d as o3d
    meshes = cubify(voxel_scores, voxel_size, cubify_threshold)
    for batch_idx, mesh in enumerate(meshes):
        filename = "/tmp/cube_mesh_{}_{}_merged.obj" \
                        .format(file_prefix, batch_idx)
        save_obj(filename, mesh.verts_packed(), mesh.faces_packed())
Ejemplo n.º 14
0
    def _test_save_load(self, verts, faces):
        with NamedTemporaryFile(mode="w", suffix=".obj") as f:
            file_path = Path(f.name)
            save_obj(file_path, verts, faces)
            f.flush()

            expected_verts, expected_faces = verts, faces
            if not len(expected_verts):  # Always compare with a (V, 3) tensor
                expected_verts = torch.zeros(size=(0, 3), dtype=torch.float32)
            if not len(expected_faces):  # Always compare with an (F, 3) tensor
                expected_faces = torch.zeros(size=(0, 3), dtype=torch.int64)
            actual_verts, actual_faces, _ = load_obj(file_path)
            self.assertClose(expected_verts, actual_verts)
            self.assertClose(expected_faces, actual_faces.verts_idx)
Ejemplo n.º 15
0
    def test_save_obj_invalid_shapes(self):
        # Invalid vertices shape
        with self.assertRaises(ValueError) as error:
            verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]])  # (V, 4)
            faces = torch.LongTensor([[0, 1, 2]])
            save_obj(StringIO(), verts, faces)
        expected_message = "Argument 'verts' should either be empty or of shape (num_verts, 3)."
        self.assertTrue(expected_message, error.exception)

        # Invalid faces shape
        with self.assertRaises(ValueError) as error:
            verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
            faces = torch.LongTensor([[0, 1, 2, 3]])  # (F, 4)
            save_obj(StringIO(), verts, faces)
        expected_message = "Argument 'faces' should either be empty or of shape (num_faces, 3)."
        self.assertTrue(expected_message, error.exception)
Ejemplo n.º 16
0
def create_data(folder_path='meshes/',
                nb_of_pointclouds=50,
                nb_of_points=5000,
                sphere_level=4,
                normalize_data=True):
    device = torch.device("cuda:0")

    data_path = os.path.join(os.getcwd(), folder_path)
    src_mesh = ico_sphere(sphere_level, device)

    for filename in os.listdir(data_path):
        print(f"{datetime.now()} Starting:{filename}")
        file_path = os.path.join(data_path, filename)
        cur_mesh = utils.load_mesh(file_path)
        cur_deform_verts = deformation.get_deform_verts(
            cur_mesh, nb_of_points, sphere_level)
        data_verts = np.expand_dims(cur_deform_verts.detach().cpu().numpy(),
                                    axis=0)
        data_input = None
        data_output = None
        for _ in range(nb_of_pointclouds):
            data_a = sample_points_from_meshes(
                cur_mesh, nb_of_points).squeeze().cpu().numpy()
            if normalize_data:
                data_a = data_a - np.mean(data_a, axis=0)
                data_a = data_a / np.max(data_a, axis=0)
                data_a_sort_indices = np.argsort(np.linalg.norm(data_a,
                                                                axis=1))
                data_a = data_a[data_a_sort_indices]
            data_a = np.expand_dims(data_a, axis=0)
            data_input = data_a if data_input is None else np.concatenate(
                (data_input, data_a))
            data_output = data_verts if data_output is None else np.concatenate(
                (data_output, data_verts))
        np.save(f'data/{os.path.splitext(filename)[0]}_input.npy', data_input)
        np.save(f'data/{os.path.splitext(filename)[0]}_output.npy',
                data_output)
        deformed_mesh = src_mesh.offset_verts(cur_deform_verts)
        final_verts, final_faces = deformed_mesh.get_mesh_verts_faces(0)
        final_obj = os.path.join(
            'deformed_meshes/',
            f'{os.path.splitext(filename)[0]}_deformed.obj')
        save_obj(final_obj, final_verts, final_faces)
        print(
            f"{datetime.now()} Finished:{filename}, Point Cloud Shape:{data_input.shape} Deform Verts Shape:{data_output.shape}"
        )
Ejemplo n.º 17
0
def main():
    if cfg.is_save_from_mat:
        mat_path = os.path.join('../Data', 'modelnet40_2111instances10000_PointNet.mat')
        dataset = sio.loadmat(mat_path)
        trg_pc = torch.FloatTensor(dataset["data"]).cuda()

        if not os.path.exists(os.path.join('../Data', 'All_class_ori_mesh')):
                os.makedirs(os.path.join('../Data', 'All_class_ori_mesh'))


        for i in range(trg_pc.size(0)):
            curr_trg_pc = trg_pc[i].unsqueeze(0)
            fout = open(os.path.join('../Data', 'All_class_ori_mesh', str(i)+'.xyz'), 'w')

            for m in range(curr_trg_pc.shape[2]):
                fout.write('%f %f %f \n' % (curr_trg_pc[0, 0, m], curr_trg_pc[0, 1, m], curr_trg_pc[0, 2, m]))
            fout.close()
    else:
        #from modelnet40_with_vert import ModelNet40_vert
        from modelnet_trn_test import ModelNetDataset

        test_dataset = ModelNetDataset(root='/data/modelnet40_normal_resampled/', batch_size=1, npoints=cfg.npoint, split='test', normal_channel=True)
        #test_dataset = ModelNet40_vert('/data/ModelNet40/', 40, phase='test', regenerate_dataset=False)
        test_loader = torch.utils.data.DataLoader(test_dataset, 1, shuffle=True, drop_last=True, num_workers=8, pin_memory=True)
        test_size = test_dataset.__len__()

        if not os.path.exists(os.path.join('../Data', 'Ten_class_ori_mesh')):
                os.makedirs(os.path.join('../Data', 'Ten_class_ori_mesh'))

        for i, (vert, faces, label) in enumerate(test_loader):
            if convert_from_modelnet40_1024_processed[label[0]] in label_indexes:
                vert = vert.squeeze(0)
                faces = faces.squeeze(0)
                vert, _, _ = pc_normalize_torch(vert)
                trg_mesh = Meshes(verts=[vert], faces=[faces]).cuda()

                file_name = os.path.join('../Data', 'Ten_class_ori_mesh', str(i)+'_'+str(convert_from_modelnet40_1024_processed[label[0]])+'.obj')
                final_verts, final_faces = trg_mesh.get_mesh_verts_faces(0)
                print('Processing ['+str(i)+'/'+str(test_size)+' ] instance')
                save_obj(file_name, final_verts, final_faces)
Ejemplo n.º 18
0
    def test_save_obj_invalid_shapes(self):
        # Invalid vertices shape
        with self.assertRaises(ValueError) as error:
            verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]])  # (V, 4)
            faces = torch.LongTensor([[0, 1, 2]])
            with NamedTemporaryFile(mode="w", suffix=".obj") as f:
                save_obj(Path(f.name), verts, faces)
        expected_message = (
            "Argument 'verts' should either be empty or of shape (num_verts, 3)."
        )
        self.assertTrue(expected_message, error.exception)

        # Invalid faces shape
        with self.assertRaises(ValueError) as error:
            verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
            faces = torch.LongTensor([[0, 1, 2, 3]])  # (F, 4)
            with NamedTemporaryFile(mode="w", suffix=".obj") as f:
                save_obj(Path(f.name), verts, faces)
        expected_message = (
            "Argument 'faces' should either be empty or of shape (num_faces, 3)."
        )
        self.assertTrue(expected_message, error.exception)
Ejemplo n.º 19
0
    def test_save_obj(self):
        verts = torch.tensor(
            [[0.01, 0.2, 0.301], [0.2, 0.03, 0.408], [0.3, 0.4, 0.05],
             [0.6, 0.7, 0.8]],
            dtype=torch.float32,
        )
        faces = torch.tensor([[0, 2, 1], [0, 1, 2], [3, 2, 1], [3, 1, 0]],
                             dtype=torch.int64)
        with NamedTemporaryFile(mode="w", suffix=".obj") as f:
            save_obj(Path(f.name), verts, faces, decimal_places=2)

            expected_file = "\n".join([
                "v 0.01 0.20 0.30",
                "v 0.20 0.03 0.41",
                "v 0.30 0.40 0.05",
                "v 0.60 0.70 0.80",
                "f 1 3 2",
                "f 1 2 3",
                "f 4 3 2",
                "f 4 2 1",
            ])
            actual_file = open(Path(f.name), "r")
            self.assertEqual(actual_file.read(), expected_file)
Ejemplo n.º 20
0
    def test_save_obj_with_texture(self):
        device = torch.device('cuda:0')
        torch.cuda.set_device(device)
        # Set paths
        data_dir = Path(__file__).parent / 'data'
        data_dir.mkdir(exist_ok=True)
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"
        final_obj = data_dir / "cow_exported.obj"
        # Load obj file
        mesh = load_objs_as_meshes([obj_filename], device=device)

        try:
            texture_image = mesh.textures.maps_padded()
            save_obj(final_obj,
                     mesh.verts_packed(),
                     mesh.faces_packed(),
                     verts_uvs=mesh.textures.verts_uvs_packed(),
                     texture_map=texture_image,
                     faces_uvs=mesh.textures.faces_uvs_packed())

        except:
            pass
Ejemplo n.º 21
0
        t.set_description("loss = {}".format(loss))
        if WANDB:
            wandb.log({"Train Loss": loss})
        losses.append(loss.detach())


        # Plot mesh
        if i % plot_period == 0 and i!=0:
            plot_pointcloud(new_block1, title="iter: %d" % i)
            plot_pointcloud(new_block2, title="iter: %d" % i)
            plot_pointcloud(new_block3, title="iter: %d" % i)
            plt.imshow(image.squeeze().permute(1, 2, 0).detach().cpu().numpy())
            plt.show()
            plt.plot(range(len(losses)), np.log10(losses))
            plt.show()
            save_obj("model1.obj", new_block1.verts_packed(), new_block1.faces_packed())
            save_obj("model2.obj", new_block2.verts_packed(), new_block2.faces_packed())
            save_obj("model3.obj", new_block3.verts_packed(), new_block3.faces_packed())
        if i % save_period == 0 and i !=0:
            print("saving ...")
            if WANDB:
                torch.save({
                    'epoch': epoch,
                    'iteration':i,
                    'gcn_state_dict': [gcn1.state_dict(), gcn2.state_dict(), gcn3.state_dict()],
                    'fe_state_dic': fe.state_dict(),
                    'cnn_state_dic': image_model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': loss}, os.path.join(wandb.run.dir, 'dic_checkpoint'))

Ejemplo n.º 22
0
 def bm_save_simple_obj_with_init(V: int, F: int):
     verts_list = torch.tensor(V * [[0.11, 0.22, 0.33]]).view(-1, 3)
     faces_list = torch.tensor(F * [[1, 2, 3]]).view(-1, 3)
     return lambda: save_obj(
         StringIO(), verts_list, faces_list, decimal_places=2
     )
Ejemplo n.º 23
0
 def _bm_save_obj(verts: torch.Tensor, faces: torch.Tensor,
                  decimal_places: int):
     return lambda: save_obj(StringIO(), verts, faces, decimal_places)
Ejemplo n.º 24
0
                try:
                    if save_result or (opt.isTrain and not opt.no_html):
                        estimated_obj_path = os.path.join(
                            visualizer.img_dir, 'epoch%.3d_%s.obj' %
                            (total_iters // opt.batch_size, 'estimated_mesh'))
                        true_obj_path = os.path.join(
                            visualizer.img_dir, 'epoch%.3d_%s.obj' %
                            (total_iters // opt.batch_size, 'true_mesh'))
                        save_obj(estimated_obj_path,
                                 model.estimated_mesh[
                                     model.verbose_batch_ind].verts_packed(),
                                 torch.from_numpy(
                                     model.flamelayer.faces.astype(np.int32)),
                                 verts_uvs=model.estimated_mesh[
                                     model.verbose_batch_ind].textures.
                                 verts_uvs_packed(),
                                 texture_map=model.estimated_texture_map[
                                     None, model.verbose_batch_ind],
                                 faces_uvs=model.estimated_mesh[
                                     model.verbose_batch_ind].textures.
                                 faces_uvs_packed())
                        save_obj(
                            true_obj_path,
                            model.true_mesh[
                                model.verbose_batch_ind].verts_packed(),
                            torch.from_numpy(
                                model.flamelayer.faces.astype(np.int32)),
                            verts_uvs=model.true_mesh[model.verbose_batch_ind].
                            textures.verts_uvs_packed(),
                            texture_map=model.true_mesh.textures.maps_padded()[
Ejemplo n.º 25
0
    def test_cube_mesh_render(self):
        """
        End-End test of rendering a cube mesh with texture
        from decreasing camera distances. The camera starts
        outside the cube and enters the inside of the cube.
        """
        device = torch.device("cuda:0")
        mesh = self.load_cube_mesh_with_texture(device)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=1e-8,
            faces_per_pixel=5,
            z_clip_value=1e-2,
            perspective_correct=True,
            bin_size=0,
        )

        # Only ambient, no diffuse or specular
        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
            location=[[0.0, 0.0, -3.0]],
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=SoftPhongShader(device=device, lights=lights),
        )

        # Render the cube by decreasing the distance from the camera until
        # the camera enters the cube. Check the output looks correct.
        images_list = []
        dists = np.linspace(0.1, 2.5, 20)[::-1]
        for d in dists:
            R, T = look_at_view_transform(d, 0, 0)
            T[0, 1] -= 0.1  # move down in the y axis
            cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
            images = renderer(mesh, cameras=cameras)
            rgb = images[0, ..., :3].cpu().detach()
            filename = "DEBUG_cube_dist=%.1f.jpg" % d
            im = (rgb.numpy() * 255).astype(np.uint8)
            images_list.append(im)

            # Check one of the images where the camera is inside the mesh
            if d == 0.5:
                filename = "test_render_mesh_clipped_cam_dist=0.5.jpg"
                image_ref = load_rgb_image(filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

        # Save a gif of the output - this should show
        # the camera moving inside the cube.
        if DEBUG:
            gif_filename = ("room_original.gif"
                            if raster_settings.z_clip_value is None else
                            "room_clipped.gif")
            imageio.mimsave(DATA_DIR / gif_filename, images_list, fps=2)
            save_obj(
                f=DATA_DIR / "cube.obj",
                verts=mesh.verts_packed().cpu(),
                faces=mesh.faces_packed().cpu(),
            )
Ejemplo n.º 26
0
 def save_mesh():
     save_obj(obj_file, verts_list, faces_list, decimal_places=2)
Ejemplo n.º 27
0
    #laplacian_losses.append(loss_laplacian)
    losses.append(loss)

    # Plot mesh
    if i % plot_period == 0 and i!=0:
        plot_pointcloud(new_block1, title="iter: %d" % i)
        plot_pointcloud(new_block2, title="iter: %d" % i)
        plot_pointcloud(new_block3, title="iter: %d" % i)

    # Optimization step
    loss.backward()

    torch.nn.utils.clip_grad_norm_(gcn1.parameters(), 1)
    torch.nn.utils.clip_grad_norm_(gcn2.parameters(), 1)
    torch.nn.utils.clip_grad_norm_(gcn3.parameters(), 1)

    optimizer.step()
    # for name, param in gcn1.named_parameters():
    #     print(name, torch.isfinite(param.grad).all())
    # for name, param in gcn2.named_parameters():
    #     print(name, torch.isfinite(param.grad).all())
    # for name, param in gcn3.named_parameters():
    #     print(name, torch.isfinite(param.grad).all())

save_obj("model_3d_to_3d_1.obj", new_block1.verts_packed(), new_block1.faces_packed())
save_obj("model_3d_to_3d_2.obj", new_block2.verts_packed(), new_block2.faces_packed())
save_obj("model_3d_to_3d_3.obj", new_block3.verts_packed(), new_block3.faces_packed())

plt.plot(range(len(losses)), np.log(losses))

plt.show()
    # Print the losses
    t.set_description('total_loss = %.6f' % loss)

    # Save the losses for plotting
    chamfer_losses.append(loss_chamfer)
    edge_losses.append(loss_edge)
    normal_losses.append(loss_normal)
    laplacian_losses.append(loss_laplacian)
    losses.append(loss)
    # Plot mesh
    if i % plot_period == 0:
        plot_pointcloud(new_src_mesh, title="iter: %d" % i)
        plt.show()

    # Optimization step
    loss.backward()
    optimizer.step()

plt.plot(range(len(losses)), losses)
plt.show()
# Fetch the verts and faces of the final predicted mesh
final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)

# Scale normalize back to the original target size
final_verts = final_verts * scale + center

# Store the predicted mesh using save_obj
final_obj = os.path.join('../', '3dTo3d/final_model.obj')
save_obj(final_obj, final_verts, final_faces)
    print('Error! No existing model!')
    exit(-1)

net.eval()

src_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.src_shape)
tar_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.tar_shape)
key_pts, _, _ = load_obj("%s/key_point_50.obj" % opt.src_shape)
_, src_faces, _ = load_obj("%s/manifold.obj" % opt.src_shape)
src_faces = src_faces.verts_idx
w_pc = torch.from_numpy(np.load("%s/w_pc_4096.npy" % opt.src_shape))
w_mesh = torch.from_numpy(np.load("%s/w_mesh.npy" % opt.src_shape))
src_pc = src_pc.unsqueeze(0).cuda()
tar_pc = tar_pc.unsqueeze(0).cuda()
key_pts = key_pts.unsqueeze(0).cuda()
w_pc = w_pc.unsqueeze(0).cuda()

_, _, _, basis, _, _, _, coef_range = net(src_pc, tar_pc, key_pts, w_pc)

os.makedirs(opt.save_dir, exist_ok=True)
for i in range(opt.num_basis):
    l = coef_range[0][i][0] * 2
    r = coef_range[0][i][1] * 2
    for j in range(4):
        scale = (r - l) / 3 * j + l
        off = basis[0][i].reshape(50, 3) * scale
        def_key_pts = key_pts[0] + off
        def_ver = torch.matmul(w_mesh.cuda(), def_key_pts)
        save_obj(("%s/%d-%d.obj") % (opt.save_dir, i, j), def_ver.cpu(),
                 src_faces)
Ejemplo n.º 30
0
opt = parse_args()
net = network.model(opt.num_basis).cuda()
net = torch.nn.DataParallel(net)
try:
    checkpoint = torch.load(opt.checkpoint)
    net.load_state_dict(checkpoint['model_state_dict'])
    print('Use pretrain model')
except:
    print('Error! No existing model!')
    exit(-1)

net.eval()

src_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.src_shape)
tar_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.tar_shape)
key_pts, _, _ = load_obj("%s/key_point_50.obj" % opt.src_shape)
_, src_faces, _ = load_obj("%s/manifold.obj" % opt.src_shape)
src_faces = src_faces.verts_idx
w_pc = torch.from_numpy(np.load("%s/w_pc_4096.npy" % opt.src_shape))
w_mesh = torch.from_numpy(np.load("%s/w_mesh.npy" % opt.src_shape))
src_pc = src_pc.unsqueeze(0).cuda()
tar_pc = tar_pc.unsqueeze(0).cuda()
key_pts = key_pts.unsqueeze(0).cuda()
w_pc = w_pc.unsqueeze(0).cuda()

def_key_pts, _, _, _, _, _, _, _ = net(src_pc, tar_pc, key_pts, w_pc)

def_ver = torch.matmul(w_mesh.cuda(), def_key_pts[0])
save_obj(opt.save_name, def_ver.cpu(), src_faces)