points_dir = os.path.commonpath(points_paths) points_relpaths = [os.path.relpath(p, points_dir) for p in points_paths] else: points_relpaths = [os.path.basename(p) for p in points_paths] torch.manual_seed(24) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(24) view_sampler = SphericalSampler(300, 'SPIRAL') points = view_sampler.points save_ply(points,'example_data/pointclouds/spiral_300.ply',normals=points) scene = readScene(opt.source, device="cpu") opt.genCamera = 300 if opt.genCamera > 0: camSampler = CameraSampler(opt.genCamera, opt.camOffset, opt.camFocalLength, points=scene.cloud.localPoints, camWidth=opt.width, camHeight=opt.height, filename="example_data/pointclouds/spiral_300.ply") camSampler.closer = False with torch.no_grad(): splatter = createSplatter(opt, scene=scene) #splatter.shading = 'depth' if opt.genCamera > 0: cameras = [] for i in range(opt.genCamera): cam = next(camSampler) cameras.append(cam) splatter.initCameras(cameras=cameras)
writeScene(scene, os.path.join(expr_dir, 'final_scene.json'), os.path.join(expr_dir, 'final_cloud.ply')) if __name__ == "__main__": opt = DeformationOptions().parse() # reproducability torch.manual_seed(24) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(24) # load scenes refScene = readScene(opt.ref, device=torch.device("cpu")) scene = readScene(opt.source, device=torch.device("cpu")) if opt.baseline: refScene.cloud.shading = scene.cloud.shading = "depth" scene.cloud.shading = refScene.cloud.shading scene.pointlightPositions = refScene.pointlightPositions scene.pointlightColors = refScene.pointlightColors scene.sunDirections = refScene.sunDirections scene.sunColors = refScene.sunColors scene.ambientLight = refScene.ambientLight trainShapeOnImage(scene, refScene, opt, baseline=opt.baseline,