def load(self, obj_filename):
        # Load obj file
        extension = obj_filename[-3:]
        
        if extension == 'obj':
            verts, faces, aux = load_obj(obj_filename)
            verts_idx = faces.verts_idx        
        elif extension == 'ply':
            verts, faces = load_ply(obj_filename)
            verts_idx = faces

        if os.path.exists(obj_filename[:-3]+'npy'):
            colors = np.load(obj_filename[:-3]+'npy')
            verts_rgb = torch.FloatTensor(colors[...,[2,1,0]])
            verts_rgb = verts_rgb.unsqueeze(0)
            verts_rgb = verts_rgb.to(self.device)
        else:
            # Initialize each vertex to be white in color - bgr
            verts_rgb = torch.ones_like(verts)[None]
            verts_rgb = verts_rgb.to(self.device)
            #textures = Textures(faces_uvs=faces.textures_idx[None,...], verts_uvs=aux.verts_uvs[None,...], verts_rgb=verts_rgb.to(self.device))

        # Create a Meshes object for the face.
        self.face_mesh = Meshes(
            verts = [verts.to(self.device)],
            faces = [verts_idx.to(self.device)],
            textures= Textures(verts_rgb=verts_rgb)
        )
Exemplo n.º 2
0
def deepdream(FLAGS):
    '''
    General deepdreaming wrapped in a cache to prevent recomputation

    :param FLAGS:
    :return:
    '''

    global model, renderer_instance

    model.deep_dream(FLAGS)

    deep_dream_dir = model.result_dir

    files = os.listdir(deep_dream_dir)
    verts = []
    faces = []
    verts_rgb = []
    titles = []
    for file in files:
        if file.split('.')[1] == 'ply':
            titles.append(file.split('/')[-1])
            vert, face = load_ply(os.path.join(deep_dream_dir, file))
            verts.append(vert.to(device))
            faces.append(face.to(device))
            verts_rgb.append(torch.ones_like(vert).to(device))

    textures = Textures(verts_rgb=verts_rgb)
    interpol_mesh = Meshes(verts, faces, textures)

    print('rendering images')
    images = renderer_instance(interpol_mesh).cpu().numpy()

    print('processing images')
    num_images = int(images.shape[0])
    cols = 2
    rows = -int(-num_images // cols)

    fig, axs = plt.subplots(nrows=rows,
                            ncols=cols,
                            sharex='all',
                            sharey='all',
                            figsize=(20, 20),
                            gridspec_kw={
                                'wspace': 0,
                                'hspace': 0
                            })

    for ax, im in zip(axs.flatten(), range(num_images)):
        ax.imshow(images[im, :, :, :3])
        ax.axis('off')

    return fig
Exemplo n.º 3
0
    def __init__(self, dir):
        print("Loading scene ", dir)
        self.ReadConfig(dir)
        a = numpy.loadtxt(dir + "poses_view_matrix.txt")
        self.num_images = a.shape[0] // 4
        self.view_matrices = []
        for i in range(self.num_images):
            V = a[i * 4:(i + 1) * 4, :]
            self.view_matrices.append(V)

        # convert view matrix to R,t
        self.camera_rs = []
        self.camera_ts = []

        for V in self.view_matrices:
            R = V[0:3, 0:3]
            t = V[0:3, 3:4]
            self.camera_rs.append(R)
            self.camera_ts.append(t.transpose())

        self.R = torch.tensor(self.camera_rs).float().to(device)
        self.T = torch.tensor(self.camera_ts).squeeze(1).float().to(device)

        self.image_size = (self.h, self.w)
        self.image_size_tensor = torch.Tensor(
            self.image_size).unsqueeze(0).to(device).int()

        pc = load_ply(
            "/home/dari/Projects/pointrendering2/Code/scenes/tt_train_colmap/point_cloud.ply"
        )
        verts = torch.Tensor(pc[0]).to(device).unsqueeze(0)
        rgb = verts.clone()
        rgb = torch.ones_like(verts)
        # PrintTensorInfo(verts)
        self.point_cloud = Pointclouds(points=verts, features=rgb)

        print("Scene loaded: R, T , K , size")
        PrintTensorInfo(self.R)
        PrintTensorInfo(self.T)
        PrintTensorInfo(self.K)
        PrintTensorInfo(self.image_size_tensor)
Exemplo n.º 4
0
    torch.cuda.set_device(device)

    names_and_path = get_names_and_paths(opt)
    for mesh_name, mesh_path in names_and_path:
        output_dir = os.path.join(opt.output, mesh_name + '_variational_light')
        rgb_dir = os.path.join(output_dir, "image")
        mask_dir = os.path.join(output_dir, "mask")
        depth_dir = os.path.join(output_dir, "depth")
        os.makedirs(output_dir, exist_ok=True)
        os.makedirs(rgb_dir, exist_ok=True)
        os.makedirs(mask_dir, exist_ok=True)
        os.makedirs(depth_dir, exist_ok=True)

        # load and normalize mesh
        if os.path.splitext(mesh_path)[1].lower() == ".ply":
            verts, faces = load_ply(mesh_path)
            verts_idx = faces
        elif os.path.splitext(mesh_path)[1].lower() == ".obj":
            verts, faces, aux = load_obj(mesh_path)
            verts_idx = faces.verts_idx
        else:
            raise NotImplementedError

        # # normalize to unit box
        # vert_range = (verts.max(dim=0)[0] - verts.min(dim=0)[0]).max()
        # vert_center = (verts.max(dim=0)[0] + verts.min(dim=0)[0]) / 2
        # verts -= vert_center
        # verts /= vert_range

        # normalize to unit sphere
        vert_center = torch.mean(verts, dim=0)