def convert_png_to_jpg_rgb(self, tex_path): tex_basename = os.path.splitext(tex_path)[0] img = PNMImage() self.load_img_with_retry(img, tex_path) jpg_path = tex_basename + '.jpg' x_size = img.get_x_size() y_size = img.get_y_size() output_img = PNMImage(x_size, y_size, 3) output_img.copy_sub_image(img, 0, 0, 0, 0, x_size, y_size) jpg_path = tex_basename + '.jpg' print(f'Writing JPG {jpg_path}...') output_img.write(Filename.from_os_specific(jpg_path)) if img.num_channels == 4: alpha_image = PNMImage(x_size, y_size, 1) alpha_image.set_type(RGB_TYPE) # Copy alpha channel from source image for i in range(x_size): for j in range(y_size): alpha_image.set_gray(i, j, img.get_alpha(i, j)) rgb_path = tex_basename + '_a.rgb' print(f'Writing RGB {rgb_path}...') alpha_image.write(Filename.from_os_specific(rgb_path))
def create_instance(self): self.create_buffer() # Water surface maker = CardMaker('water') maker.setFrame(self.x1, self.x2, self.y1, self.y2) self.waterNP = self.parent.instance.attachNewNode(maker.generate()) self.waterNP.hide(BaseObject.AllCamerasMask) self.waterNP.show(BaseObject.DefaultCameraMask) self.waterNP.setHpr(0, -90, 0) self.waterNP.setPos(0, 0, self.z) self.waterNP.setTransparency(TransparencyAttrib.MAlpha) vertex_shader = defaultDirContext.find_shader('water-vertex.glsl') fragment_shader = defaultDirContext.find_shader('water-fragment.glsl') self.waterNP.setShader( Shader.load( Shader.SL_GLSL, vertex=Filename.from_os_specific(vertex_shader).get_fullpath(), fragment=Filename.from_os_specific( fragment_shader).get_fullpath())) self.waterNP.setShaderInput('wateranim', Vec4(0.03, -0.015, self.scale, 0)) # vx, vy, scale, skip # offset, strength, refraction factor (0=perfect mirror, 1=total refraction), refractivity self.waterNP.setShaderInput('waterdistort', Vec4(0.4, 1.0, 0.25, 0.45)) self.waterNP.setShaderInput('time', 0) self.waterNP.setShaderInput('reflection_tex', self.texture) # distortion texture tex1 = loader.loadTexture('textures/water.png') self.waterNP.setShaderInput('distortion_tex', tex1)
def do_load_texture(self, filename, alpha_filename): tex = Texture() panda_filename = Filename.from_os_specific(filename) if alpha_filename is not None: panda_alpha_filename = Filename.from_os_specific(alpha_filename) else: panda_alpha_filename = Filename('') tex.read(fullpath=panda_filename, alpha_fullpath=panda_alpha_filename, primary_file_num_channels=0, alpha_file_channel=0) return tex
def load_texture(self, filename, alpha_filename=None): texture = None try: panda_filename = Filename.from_os_specific(filename).get_fullpath() if alpha_filename is not None: panda_alpha_filename = Filename.from_os_specific( filename).get_fullpath() else: panda_alpha_filename = None texture = loader.loadTexture(panda_filename, alphaPath=panda_alpha_filename) except IOError: print("Could not load texture", filename) return texture
def panda_config(self): data = [] request_opengl_config(data) self.app_panda_config(data) data.append("text-encoding utf8") data.append("paste-emit-keystrokes #f") #TODO: Still needed ? data.append("bounds-type box") data.append("screenshot-extension png") data.append("screenshot-filename %~p-%Y-%m-%d-%H-%M-%S-%~f.%~e") data.append("fullscreen %d" % settings.win_fullscreen) if settings.win_fullscreen: data.append("win-size %d %d" % (settings.win_fs_width, settings.win_fs_height)) else: data.append("win-size %d %d" % (settings.win_width, settings.win_height)) data.append("lens-far-limit %g" % settings.lens_far_limit) loadPrcFileData("", '\n'.join(data)) if settings.prc_file is not None: config_file = settings.prc_file if not os.path.isabs(config_file): config_file = os.path.join(settings.config_dir, config_file) filename = Filename.from_os_specific(config_file) if filename.exists(): print("Loading panda config", filename) loadPrcFile(filename) else: print("Panda config file", filename)
def main(): args = parse_args() if args.frames: animations_frames = map(int, args.frames.split(',')) else: animations_frames = [len(args.input)] kwargs = {} if args.fps: kwargs['fps'] = args.fps if args.scale: kwargs['scale'] = args.scale if args.type: kwargs['type'] = args.type if args.empty: kwargs['empty'] = args.empty if args.prefix: kwargs['prefix'] = args.prefix if args.prefix: vfs = VirtualFileSystem.get_global_ptr() vfs.mount( Filename.from_os_specific('.').get_fullpath(), args.prefix.rstrip('/'), 0) mp = get_model_path() mp.prepend_directory(args.prefix.rstrip('/')) cm = CardMaker(animations_frames, args.input, **kwargs) cm.make(args.output)
def read_texture(self, filename): """ Reads a texture from the Pandora resources repository. Returns a PNMImage object representing the image data. Throws a PalettizerException if file could not be found. :filename: Relative filename pointing to a texture file in the Pandora repository. """ full_filename = self.find_texture(filename) if self.debug: print(f'Reading {full_filename}...') # We've found the source file! Let's load it using Panda3D. img = PNMImage() img.read(Filename.from_os_specific(full_filename)) needs_alpha_fill = img.num_channels not in (2, 4) img.set_color_type(4) if needs_alpha_fill: # We need an alpha channel no matter what, so if the image does not have one, # it needs to be filled immediately with opaque pixels as it starts out with transparent pixels img.alpha_fill(1) return img
def append_subdirectories_to_search(root_dir: str, mount_root: bool = True) -> None: """ Walks through a root directory and mounts all subdirectorys to the search path. Generally used for development source version of an application or servers. """ model_path = get_model_path() folders = [ os.path.join(root_dir, o) for o in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, o)) ] if mount_root: if not root_dir.endswith('/'): root_dir += '/' folders.append(root_dir) for folder in folders: # Ignore invalid folders base_name = os.path.basename(folder) if base_name.startswith('.'): continue folder = Filename.from_os_specific(os.path.abspath(folder)) __file_notify.debug('Mounting (%s) to search path' % (folder)) model_path.append_directory(folder)
def vfs_mount_subdirectories(mount_point: str, root_dir: str, mount_root: bool = True) -> bool: """ Walks through a root directory and mounts all subdirectorys to the requested mount point. Generally used for development source version of an application or servers. """ folders = [ os.path.join(root_dir, o) for o in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, o)) ] if mount_root: if not root_dir.endswith('/'): root_dir += '/' folders.append(root_dir) for folder in folders: # Ignore invalid folders base_name = os.path.basename(folder) if base_name.startswith('.'): continue folder = Filename.from_os_specific(os.path.abspath(folder)) __file_notify.debug('Mounting (%s) to (%s)' % (folder, mount_point)) vfs_mount_directory(mount_point, folder)
def __init__(self, droneList): ShowBase.__init__(self) # set resolution wp = WindowProperties() wp.setSize(2000, 1500) # wp.setSize(1200, 900) # wp.setSize(800, 600) self.win.requestProperties(wp) self.setFrameRateMeter(True) self.render.setAntialias(AntialiasAttrib.MAuto) CameraController(self) # setup model directory self.modelDir = os.path.abspath( sys.path[0]) # Get the location of the 'py' file I'm running: self.modelDir = Filename.from_os_specific(self.modelDir).getFullpath( ) + "/models" # Convert that to panda's unix-style notation. self.initScene() self.initBullet() self.droneManager = DroneManager(self, droneList) DroneRecorder(self.droneManager) self.stopwatchOn = False self.now = 0 self.accept('t', self.toggleStopwatch)
def load(self, filename): """ Loads a profile from a given filename """ # Make filename unique fname = Filename.from_os_specific(filename) if not VirtualFileSystem.get_global_ptr().resolve_filename(fname, get_model_path().get_value(), "ies"): self.error("Could not resolve", filename) return -1 fname = fname.get_fullpath() # Check for cache entries if fname in self._entries: return self._entries.index(fname) # Check for out of bounds if len(self._entries) >= self._max_entries: # TODO: Could remove unused profiles here or regenerate texture self.warn("Cannot load IES Profile, too many loaded! (Maximum: 32)") # Try loading the dataset, and see what happes try: dataset = self._loader.load(fname) except IESLoaderException as msg: self.warn("Failed to load profile from", filename, ":", msg) return -1 if not dataset: return -1 # Dataset was loaded successfully, now copy it dataset.generate_dataset_texture_into(self._storage_tex, len(self._entries)) self._entries.append(fname) return len(self._entries) - 1
def load_img_with_retry(self, img, tex_path): retry = 0 while not img.read(Filename.from_os_specific(tex_path)): retry += 1 if retry > 5: return
def load_texture(self, filename): texture = None try: texture = loader.loadTexture( Filename.from_os_specific(filename).get_fullpath()) except IOError: print("Could not load texture", filename) return texture
def mount_multifile(self): mf = Multifile() mf.open_read(Filename.from_os_specific('phase_1.ef')) mf.set_encryption_flag(True) mf.set_encryption_password(self.PW) if not VirtualFileSystem.get_global_ptr().mount(mf, Filename('/'), 0): raise Exception('Multifile could not be mounted.')
def _find_basepath(self): """ Attempts to find the pipeline base path by looking at the location of this file """ pth = os.path.abspath( join(os.path.dirname(os.path.realpath(__file__)), "..")) filename = Filename.from_os_specific(pth) # convert lib/site-packages to Lib/site-packages on windows filename.make_true_case() return filename.get_fullpath()
def write_model(self, filename): model_root = next(self.get_objects_of_type('ModelRoot')) root = NodePath(model_root.create_node()) self.add_node(model_root, root) model_root.finalize_node(root) root.ls() root.write_bam_file(Filename.from_os_specific(filename))
def test_filename(): """Fixture returning a filename to an existent .test file.""" fp = tempfile.NamedTemporaryFile(suffix='.test', delete=False) fp.write(b"test") fp.close() filename = Filename.from_os_specific(fp.name) filename.make_true_case() yield filename os.unlink(fp.name)
def load_model(pattern, callback=None, context=defaultDirContext): filename = context.find_model(pattern) if filename is not None: print("Loading model", filename) return loader.loadModel( Filename.from_os_specific(filename).get_fullpath(), callback=callback) else: print("Model not found", pattern) return None
def test_loader_extensions(test_filename): """Tests multi-extension loaders.""" class MultiExtensionLoader: extensions = ["test1", "teSt2"] @staticmethod def load_file(path, options, record=None): return ModelRoot("loaded") fp1 = tempfile.NamedTemporaryFile(suffix='.test1', delete=False) fp1.write(b"test1") fp1.close() fn1 = Filename.from_os_specific(fp1.name) fn1.make_true_case() fp2 = tempfile.NamedTemporaryFile(suffix='.TEST2', delete=False) fp2.write(b"test2") fp2.close() fn2 = Filename.from_os_specific(fp2.name) fn2.make_true_case() try: with registered_type(MultiExtensionLoader): model1 = Loader.get_global_ptr().load_sync( fn1, LoaderOptions(LoaderOptions.LF_no_cache)) assert model1 is not None assert model1.name == "loaded" model2 = Loader.get_global_ptr().load_sync( fn2, LoaderOptions(LoaderOptions.LF_no_cache)) assert model2 is not None assert model2.name == "loaded" finally: os.unlink(fp1.name) os.unlink(fp2.name) # Ensure that both were unregistered. registry = LoaderFileTypeRegistry.get_global_ptr() assert not registry.get_type_from_extension("test1") assert not registry.get_type_from_extension("test2")
def get_panda_sdk_path(): """ Returns the path of the panda3d sdk, under windows """ # Import the base panda3d module import panda3d # Path of the module p3d_module = dirname(panda3d.__file__) p3d_sdk = join(p3d_module, "..") # Convert it to a valid filename fname = Filename.from_os_specific(p3d_sdk) fname.make_absolute() return fname.to_os_specific()
def _node_from_bam_name(entity, bam_name): mesh_filename = "{}.bam".format(bam_name) root_path = entity.scene.resource_manager.root_path model_path = path.join(root_path, "meshes", mesh_filename) filename = Filename.from_os_specific(model_path) nodepath = loader.loadModel(filename) if not isinstance(nodepath.node(), BulletBodyNode): raise ValueError("Invalid node type {}".format( nodepath.node().get_class_type())) return nodepath.node()
def write_jpg(self, new_image, alpha_image, folder, phase_dir, basename, rgb_only=False): """ Saves a previously palettized image as a PNG file. :new_image: The palettized image containing RGB data. :alpha_image: The SGI variant of the palettized image containing alpha data. :folder: The folder to save the image in. :phase_dir: The name of the phase folder containing the texture, for example: "phase_3" :basename: The filename of the image, for example: "avatar_palette_1mla_1" :rgb_only: True if we only want to save the RGB variant of this image. """ # Create the folder if necessary. folder = os.path.join(folder, phase_dir, 'maps') if not os.path.exists(folder): os.makedirs(folder) palette_path = os.path.join(folder, basename.strip('.')) # We have an RGB only file! if rgb_only: new_image.write(Filename.from_os_specific(f'{palette_path}.rgb')) return # JPG files do not require alpha channels, so remove it. new_image.remove_alpha() new_image.write(Filename.from_os_specific(f'{palette_path}.jpg')) # Write our alpha file if it exists. if alpha_image is not None: alpha_image.write( Filename.from_os_specific(f'{palette_path}_a.rgb'))
def write_path(self, pth): """ Set a writable directory for generated files. This can be a string path name or a multifile with openReadWrite(). If no pathname is set then the root directory is used. This feature is usually only used for debugging, the pipeline will dump all generated shaders and other temporary files to that directory. If you don't need this, you can use set_virtual_write_path(), which will create the temporary path in the VirtualFileSystem, thus not writing any files to disk. """ if pth is None: self._write_path = None self._lock_file = "instance.pid" else: self._write_path = Filename.from_os_specific(pth).get_fullpath() self._lock_file = join(self._write_path, "instance.pid")
def _set_write_path(self, pth): """ Set a writable directory for generated files. This can be a string path name or a multifile with openReadWrite(). If no pathname is set then the root directory is used. This feature is usually only used for debugging, the pipeline will dump all generated shaders and other temporary files to that directory. If you don't need this, you can use set_virtual_write_path(), which will create the temporary path in the VirtualFileSystem, thus not writing any files to disk. """ if pth is None: self._write_path = None self._lock_file = "instance.pid" else: self._write_path = Filename.from_os_specific(pth).get_fullpath() self._lock_file = join(self._write_path, "instance.pid")
def createElement(self, name, type, start, end=None): if type == "cell": model_file = "sphere.dae" elif type == "bit": model_file = "box.dae" elif type == "segment" or type == "synapse": model_file = "cylinder.dae" # Create the rigid body body_node = BulletRigidBodyNode(name) body_node.setDeactivationEnabled(False) body_np = self.render.attachNewNode(body_node) body_np.setName(name) if type == "segment" or type == "synapse": # Calculate the linear distance between the start and the end position of the segment. length = (Point3(start) - Point3(end)).length() body_np.setPos(start) body_np.lookAt(end) body_np.setScale(1, length / 2, 1) else: body_np.setPos(start) # Load the 3d model file using the asset folder relative path and attach the geom node to rigid body object_np = self.loader.loadModel( Filename.from_os_specific( os.path.join(REPO_DIR, "models", model_file))) object_np.reparentTo(body_np) object_np.setPosHpr((0, 0, 0), (0, 0, 0)) object_np.setName(name + "_geom") object_np.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldPosition) # Apply any transforms from modelling sotware to gain performance object_np.flattenStrong() # Create the shape used for collisions geom_nodes = object_np.findAllMatches("**/+GeomNode") mesh = BulletTriangleMesh() for geom in geom_nodes[0].node().getGeoms(): mesh.addGeom(geom) shape = BulletTriangleMeshShape(mesh, dynamic=True) body_node.addShape(shape) self.physics_manager.attachRigidBody(body_node) return body_np
def do_load_texture_array(self, textures): tex = Texture() tex.setup_2d_texture_array(len(textures)) for (page, texture) in enumerate(textures): filename = texture.source.texture_filename(None) if filename is not None: panda_filename = Filename.from_os_specific(filename) tex.read(fullpath=panda_filename, z=page, n=0, read_pages=False, read_mipmaps=False) else: print("Could not find", texture.source.texture_name(None)) image = texture.create_default_image() tex.load(image, z=page, n=0) return tex
def _shape_from_mesh_component(entity, component): """Load triangle mesh from class MeshComponent""" mesh_filename = "{}.egg".format(component.mesh_name) root_path = entity.scene.resource_manager.root_path model_path = path.join(root_path, "meshes", mesh_filename) filename = Filename.from_os_specific(model_path) nodepath = loader.loadModel(filename) geom_nodepath = nodepath.find('**/+GeomNode') geom_node = geom_nodepath.node() geom = geom_node.get_geom(0) transform = geom_node.getTransform() mesh = BulletTriangleMesh() mesh.addGeom(geom, True, transform) return BulletTriangleMeshShape(mesh, dynamic=False)
def _set_config_dir(self, pth): """ Sets the path to the config directory. Usually this is the Config/ directory located in the pipeline root directory. However, if you want to load your own configuration files, you can specify a custom config directory here. Your configuration directory should contain the pipeline.yaml, plugins.yaml, daytime.yaml and configuration.prc. It is highly recommended you use the pipeline provided config files, modify them to your needs, and as soon as you think they are in a final version, copy them over. Please also notice that you should keep your config files up-to-date, e.g. when new configuration variables are added. Also, specifying a custom configuration dir disables the functionality of the PluginConfigurator and DayTime editor, since they operate on the pipelines config files. Set the directory to None to use the default directory. """ self._config_dir = Filename.from_os_specific(pth).get_fullpath()
def config_dir(self, pth): """ Sets the path to the config directory. Usually this is the config/ directory located in the pipeline root directory. However, if you want to load your own configuration files, you can specify a custom config directory here. Your configuration directory should contain the pipeline.yaml, plugins.yaml, daytime.yaml and configuration.prc. It is highly recommended you use the pipeline provided config files, modify them to your needs, and as soon as you think they are in a final version, copy them over. Please also notice that you should keep your config files up-to-date, e.g. when new configuration variables are added. Also, specifying a custom configuration_dir disables the functionality of the PluginConfigurator and DayTime editor, since they operate on the pipelines default config files. Set the directory to None to use the default directory. """ self._config_dir = Filename.from_os_specific(pth).get_fullpath()
def load(self, filename): """ Loads a profile from a given filename and returns the internal used index which can be assigned to a light.""" # Make sure the user can load profiles directly from the ies profile folder data_path = join("/$$rp/rpcore/data/ies_profiles/", filename) if isfile(data_path): filename = data_path # Make filename unique fname = Filename.from_os_specific(filename) if not VirtualFileSystem.get_global_ptr().resolve_filename( fname, get_model_path().get_value(), "ies"): self.error("Could not resolve", filename) return -1 fname = fname.get_fullpath() # Check for cache entries if fname in self._entries: return self._entries.index(fname) # Check for out of bounds if len(self._entries) >= self._max_entries: # TODO: Could remove unused profiles here or regenerate texture self.warn( "Cannot load IES Profile, too many loaded! (Maximum: 32)") # Try loading the dataset, and see what happes try: dataset = self._load_and_parse_file(fname) except InvalidIESProfileException as msg: self.warn("Failed to load profile from", filename, ":", msg) return -1 if not dataset: return -1 # Dataset was loaded successfully, now copy it dataset.generate_dataset_texture_into(self._storage_tex, len(self._entries)) self._entries.append(fname) return len(self._entries) - 1
def load(self, filename): """ Loads a profile from a given filename and returns the internal used index which can be assigned to a light.""" # Make sure the user can load profiles directly from the ies profile folder data_path = join("/$$rp/data/ies_profiles/", filename) if isfile(data_path): filename = data_path # Make filename unique fname = Filename.from_os_specific(filename) if not VirtualFileSystem.get_global_ptr().resolve_filename( fname, get_model_path().get_value(), "ies"): self.error("Could not resolve", filename) return -1 fname = fname.get_fullpath() # Check for cache entries if fname in self._entries: return self._entries.index(fname) # Check for out of bounds if len(self._entries) >= self._max_entries: # TODO: Could remove unused profiles here or regenerate texture self.warn("Cannot load IES Profile, too many loaded! (Maximum: 32)") # Try loading the dataset, and see what happes try: dataset = self._load_and_parse_file(fname) except InvalidIESProfileException as msg: self.warn("Failed to load profile from", filename, ":", msg) return -1 if not dataset: return -1 # Dataset was loaded successfully, now copy it dataset.generate_dataset_texture_into(self._storage_tex, len(self._entries)) self._entries.append(fname) return len(self._entries) - 1
def write_png(self, new_image, has_alpha, folder, phase_dir, basename): """ Saves a previously palettized image as a PNG file. :new_image: The palettized image containing RGB data. :has_alpha: Does this image contain alpha data? :folder: The folder to save the image in. :phase_dir: The name of the phase folder containing the texture, for example: "phase_3" :basename: The filename of the image, for example: "avatar_palette_1mla_1" """ # Create the folder if necessary. folder = os.path.join(folder, phase_dir, 'maps') if not os.path.exists(folder): os.makedirs(folder) palette_path = os.path.join(folder, basename.strip('.')) if not has_alpha: # We do not have any alpha pixels, it would be wise to remove the alpha channel new_image.remove_alpha() new_image.write(Filename.from_os_specific(f'{palette_path}.png'))
def read_texture(self, filename, alpha=False): """ Reads a texture from the model path. Throws a PalettizerException if file could not be found. :filename: Relative filename pointing to a texture file in the model path. :alpha: Do we need an alpha channel? """ img = PNMImage() img.read(Filename.from_os_specific(filename)) if alpha: needs_alpha_fill = img.num_channels not in (2, 4) img.set_color_type(4) if needs_alpha_fill: # We need an alpha channel no matter what, so if the image does not have one, # it needs to be filled immediately with opaque pixels as it starts out with transparent pixels img.alpha_fill(1) else: img.set_color_type(3) return img
def panda_config(self): data = [] request_opengl_config(data) self.app_panda_config(data) data.append("fullscreen %d" % settings.win_fullscreen) if settings.win_fullscreen: data.append("win-size %d %d" % (settings.win_fs_width, settings.win_fs_height)) else: data.append("win-size %d %d" % (settings.win_width, settings.win_height)) data.append("lens-far-limit %g" % settings.lens_far_limit) loadPrcFileData("", '\n'.join(data)) if settings.prc_file is not None: config_file = settings.prc_file if not os.path.isabs(config_file): config_file = os.path.join(settings.config_dir, config_file) filename = Filename.from_os_specific(config_file) if filename.exists(): print("Loading panda config", filename) loadPrcFile(filename) else: print("Panda config file", filename)
def get_resource(self, pth): """ Turns a relative path into an absolute one, using the skin_location """ return Filename.from_os_specific(join(self.skin_location, pth)).get_fullpath()
def mount(self): """ Inits the VFS Mounts. This creates the following virtual directory structure, from which all files can be located: /$$rp/ (Mounted from the render pipeline base directory) + rpcore/ + shader/ + ... /$rpconfig/ (Mounted from config/, may be set by user) + pipeline.yaml + ... /$$rptemp/ (Either ramdisk or user specified) + day_time_config + shader_auto_config + ... /$$rpshader/ (Link to /$$rp/rpcore/shader) """ self.debug("Setting up virtual filesystem") self._mounted = True convert_path = lambda pth: Filename.from_os_specific(pth).get_fullpath() vfs = VirtualFileSystem.get_global_ptr() # Mount config dir as $$rpconf if self._config_dir is None: config_dir = convert_path(join(self._base_path, "config/")) self.debug("Mounting auto-detected config dir:", config_dir) vfs.mount(config_dir, "/$$rpconfig", 0) else: self.debug("Mounting custom config dir:", self._config_dir) vfs.mount(convert_path(self._config_dir), "/$$rpconfig", 0) # Mount directory structure vfs.mount(convert_path(self._base_path), "/$$rp", 0) vfs.mount(convert_path(join(self._base_path, "rpcore/shader")), "/$$rp/shader", 0) vfs.mount(convert_path(join(self._base_path, "effects")), "effects", 0) # Mount the pipeline temp path: # If no write path is specified, use a virtual ramdisk if self._write_path is None: self.debug("Mounting ramdisk as /$$rptemp") vfs.mount(VirtualFileMountRamdisk(), "/$$rptemp", 0) else: # In case an actual write path is specified: # Ensure the pipeline write path exists, and if not, create it if not isdir(self._write_path): self.debug("Creating temporary path, since it does not exist yet") try: os.makedirs(self._write_path) except IOError as msg: self.fatal("Failed to create temporary path:", msg) self.debug("Mounting", self._write_path, "as /$$rptemp") vfs.mount(convert_path(self._write_path), '/$$rptemp', 0) get_model_path().prepend_directory("/$$rp") get_model_path().prepend_directory("/$$rp/shader") get_model_path().prepend_directory("/$$rptemp")
def _set_base_path(self, pth): """ Sets the path where the base shaders and models on are contained. This is usually the root of the rendering pipeline folder """ self.debug("Set base path to '" + pth + "'") self._base_path = Filename.from_os_specific(pth).get_fullpath()
def join_abs(*args): """ Behaves like os.path.join, but replaces stuff like '/../' """ joined = join(*args) fname = Filename.from_os_specific(joined) fname.make_absolute() return fname.to_os_generic()
from __future__ import print_function import os from panda3d.core import Filename, Texture, load_prc_file_data load_prc_file_data("", "window-type none") load_prc_file_data("", "notify-level-pnmimage error") load_prc_file_data("", "textures-power-2 none") files_to_convert = [ "data/gui/loading_screen_bg.png", "rpplugins/bloom/resources/lens_dirt.png", "data/builtin_models/skybox/skybox.jpg" ] this_dir = os.path.realpath(os.path.dirname(__file__)) os.chdir(this_dir) pipeline_dir = "../" import direct.directbase.DirectStart for filename in files_to_convert: src_path = os.path.abspath(os.path.join(pipeline_dir, filename)) fullpath = Filename.from_os_specific(src_path).get_fullpath() dest_path = fullpath.replace(".png", ".txo.pz") dest_path = dest_path.replace(".jpg", ".txo.pz") print(src_path, "->", dest_path) loader.load_texture(fullpath).write(dest_path)
def _find_basepath(self): """ Attempts to find the pipeline base path by looking at the location of this file """ pth = os.path.abspath(join(os.path.dirname(os.path.realpath(__file__)), "../..")) return Filename.from_os_specific(pth).get_fullpath()
def __init__(self): load_prc_file_data("", "win-size 512 512") # load_prc_file_data("", "window-type offscreen") load_prc_file_data("", "model-cache-dir") load_prc_file_data("", "model-cache-textures #f") load_prc_file_data("", "textures-power-2 none") load_prc_file_data("", "alpha-bits 0") load_prc_file_data("", "print-pipe-types #f") # Construct render pipeline self.render_pipeline = RenderPipeline() self.render_pipeline.mount_mgr.config_dir = "config/" self.render_pipeline.set_empty_loading_screen() self.render_pipeline.create(self) self.setup_scene() # Disable model caching BamCache.get_global_ptr().cache_models = False self.update_queue = [] self.start_listen() # Render initial frames for i in range(10): self.taskMgr.step() last_update = 0.0 self.scene_node = None current_lights = [] current_envprobes = [] # Wait for updates while True: # Update once in a while curr_time = time.time() if curr_time > last_update + 1.0: last_update = curr_time self.taskMgr.step() if self.update_queue: if self.scene_node: self.scene_node.remove_node() # Only take the latest packet payload = self.update_queue.pop(0) print("RENDERING:", payload) scene = loader.loadModel(Filename.from_os_specific(payload["scene"])) for light in scene.find_all_matches("**/+PointLight"): light.remove_node() for light in scene.find_all_matches("**/+Spotlight"): light.remove_node() # Find camera main_cam = scene.find("**/Camera") if main_cam: transform_mat = main_cam.get_transform(render).get_mat() transform_mat = Mat4.convert_mat(CS_zup_right, CS_yup_right) * transform_mat base.camera.set_mat(transform_mat) else: print("WARNING: No camera found") base.camera.set_pos(0, -3.5, 0) base.camera.look_at(0, -2.5, 0) base.camLens.set_fov(64.0) self.scene_node = scene scene.reparent_to(render) # Render scene for i in range(8): self.taskMgr.step() dest_path = Filename.from_os_specific(payload["dest"]) print("Saving screenshot to", dest_path) self.win.save_screenshot(dest_path) self.notify_about_finish(int(payload["pingback_port"]))
def convert_path(pth): return Filename.from_os_specific(pth).get_fullpath()