示例#1
0
文件: Phong.py 项目: theres1/blendigo
    def get_format(self):
        element_name = 'phong'

        fmt = {
            'name': [self.material_name],
            'backface_emit': [
                str(self.material_group.indigo_material_emission.backface_emit
                    ).lower()
            ],
            'emission_sampling_factor':
            [self.material_group.indigo_material_emission.em_sampling_mult],
            element_name: {
                'ior': [self.property_group.ior],
                'fresnel_scale': {
                    'constant': [self.property_group.fresnel_scale]
                },
            }
        }

        if self.property_group.use_roughness:
            fmt[element_name]['roughness'] = {
                'constant': [self.property_group.roughness]
            }
        else:
            fmt[element_name]['exponent'] = {
                'constant': [self.property_group.exponent]
            }

        fmt[element_name].update(self.get_channels())

        if self.property_group.specular_reflectivity:
            fmt[element_name]['specular_reflectivity'] = fmt[element_name][
                'diffuse_albedo']
            del fmt[element_name]['diffuse_albedo']
            del fmt[element_name]['ior']

        if self.property_group.nk_data_type == 'file' and self.property_group.nk_data_file != '':
            fmt[element_name]['nk_data'] = [
                efutil.path_relative_to_export(
                    self.property_group.nk_data_file)
            ]
            try:
                # doesn't matter if these keys don't exist, but remove them if they do
                del fmt[element_name]['ior']
                del fmt[element_name]['diffuse_albedo']
                del fmt[element_name]['specular_reflectivity']
            except:
                pass

        if self.property_group.nk_data_type == 'preset':
            fmt[element_name]['nk_data'] = [self.property_group.nk_data_preset]
            try:
                # doesn't matter if these keys don't exist, but remove them if they do
                del fmt[element_name]['ior']
                del fmt[element_name]['diffuse_albedo']
                del fmt[element_name]['specular_reflectivity']
            except:
                pass

        return fmt
示例#2
0
	def worldBegin(self, *args):
		self.wf(Files.MAIN, '\nWorldBegin')
		if self.files[Files.MAIN] is not None:
			# Include the other files if they exist
			for idx in [Files.MATS, Files.GEOM, Files.VOLM]:
				if os.path.exists(self.file_names[idx]):
					self.wf(Files.MAIN, '\nInclude "%s"' % efutil.path_relative_to_export(self.file_names[idx]))
示例#3
0
 def build_xml_element(self, obj, filename, use_shading_normals, exported_name=""):
     
     if exported_name == "":
         exported_name = obj.data.name
     
     xml = self.Element('mesh')
     
     xml_format = {
         'name': [exported_name],
         'normal_smoothing': [str(use_shading_normals and (not self.disable_smoothing)).lower()],
         'scale': [1.0],
         'external': {
             'path': [filename]
         }
     }
     
     if self.valid_proxy():
         xml_format['external']['path'] = [path_relative_to_export(self.mesh_path)]
     
     if self.max_num_subdivisions > 0:
         xml_format.update({
             'subdivision_smoothing':                    [str(self.subdivision_smoothing).lower()],
             'max_num_subdivisions':                        [self.max_num_subdivisions],
             'subdivide_pixel_threshold':                [self.subdivide_pixel_threshold],
             'subdivide_curvature_threshold':            [self.subdivide_curvature_threshold],
             'displacement_error_threshold':                [self.displacement_error_threshold],
             'view_dependent_subdivision':                [str(self.view_dependent_subdivision).lower()],
             'merge_vertices_with_same_pos_and_normal':    [str(self.merge_verts).lower()]
         })
     
     self.build_subelements(obj, xml_format, xml)
     
     return xml
示例#4
0
	def export_scene(self, scene):
		api_type, write_files = self.set_export_path(scene)
		
		# Pre-allocate the LuxManager so that we can set up the network servers before export
		LM = LuxManager(
			scene.name,
			api_type = api_type,
		)
		LuxManager.SetActive(LM)
		
		if scene.luxrender_engine.export_type == 'INT':
			# Set up networking before export so that we get better server usage
			if scene.luxrender_networking.use_network_servers and scene.luxrender_networking.servers != '':
				LM.lux_context.setNetworkServerUpdateInterval( scene.luxrender_networking.serverinterval )
				for server in scene.luxrender_networking.servers.split(','):
					LM.lux_context.addServer(server.strip())
		
		output_filename = get_output_filename(scene)
		
		scene_exporter = SceneExporter()
		scene_exporter.properties.directory = self.output_dir
		scene_exporter.properties.filename = output_filename
		scene_exporter.properties.api_type = api_type			# Set export target
		scene_exporter.properties.write_files = write_files		# Use file write decision from above
		scene_exporter.properties.write_all_files = False		# Use UI file write settings
		scene_exporter.set_scene(scene)
		
		export_result = scene_exporter.export()
		
		if 'CANCELLED' in export_result:
			return False
		
		# Look for an output image to load
		if scene.camera.data.luxrender_camera.luxrender_film.write_png:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.png' % (self.output_dir, output_filename)
			)
		elif scene.camera.data.luxrender_camera.luxrender_film.write_tga:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.tga' % (self.output_dir, output_filename)
			)
		elif scene.camera.data.luxrender_camera.luxrender_film.write_exr:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.exr' % (self.output_dir, output_filename)
			)
		
		return "%s.lxs" % output_filename
示例#5
0
	def export_scene(self, scene):
		api_type, write_files = self.set_export_path(scene)
		
		# Pre-allocate the LuxManager so that we can set up the network servers before export
		LM = LuxManager(
			scene.name,
			api_type = api_type,
		)
		LuxManager.SetActive(LM)
		
		if scene.luxrender_engine.export_type == 'INT':
			# Set up networking before export so that we get better server usage
			if scene.luxrender_networking.use_network_servers and scene.luxrender_networking.servers != '':
				LM.lux_context.setNetworkServerUpdateInterval( scene.luxrender_networking.serverinterval )
				for server in scene.luxrender_networking.servers.split(','):
					LM.lux_context.addServer(server.strip())
		
		output_filename = get_output_filename(scene)
		
		scene_exporter = SceneExporter()
		scene_exporter.properties.directory = self.output_dir
		scene_exporter.properties.filename = output_filename
		scene_exporter.properties.api_type = api_type			# Set export target
		scene_exporter.properties.write_files = write_files		# Use file write decision from above
		scene_exporter.properties.write_all_files = False		# Use UI file write settings
		scene_exporter.set_scene(scene)
		
		export_result = scene_exporter.export()
		
		if 'CANCELLED' in export_result:
			return False
		
		# Look for an output image to load
		if scene.camera.data.luxrender_camera.luxrender_film.write_png:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.png' % (self.output_dir, output_filename)
			)
		elif scene.camera.data.luxrender_camera.luxrender_film.write_tga:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.tga' % (self.output_dir, output_filename)
			)
		elif scene.camera.data.luxrender_camera.luxrender_film.write_exr:
			self.output_file = efutil.path_relative_to_export(
				'%s/%s.exr' % (self.output_dir, output_filename)
			)
		
		return "%s.lxs" % output_filename
示例#6
0
文件: lamp.py 项目: theres1/blendigo
    def get_env_map_format(self, obj, scene):

        # TODO; re-implement spherical/angular and spherical width ?

        trans = mathutils.Matrix.Identity(3)

        trans[0][0:3] = 0.0, 1.0, 0.0
        trans[1][0:3] = -1.0, 0.0, 0.0
        trans[2][0:3] = 0.0, 0.0, 1.0

        mat = obj.matrix_world.to_3x3()
        mat = mat * trans

        rq = mat.to_quaternion().to_axis_angle()

        #rmr = []
        #for row in tbt.col:
        #    rmr.extend(row)

        fmt = {
            'texture': {
                'path': [efutil.path_relative_to_export(self.env_map_path)],
                'exponent': [1.0],  # TODO; make configurable?
                'tex_coord_generation': {
                    self.env_map_type: {
                        'rotation': {
                            #'matrix': rmr
                            'axis_rotation': {
                                'axis': list(rq[0]),
                                'angle': [-rq[1]]
                            }
                        }
                    }
                },
            },
            'base_emission': {
                'constant': {
                    'uniform': {
                        'value':
                        [self.env_map_gain_val * 10**self.env_map_gain_exp]
                    }
                }
            },
            'emission': {
                'texture': {
                    'texture_index': [0]
                }
            }
        }

        if self.layer != '' and not scene.indigo_lightlayers.ignore:

            lls = scene.indigo_lightlayers.enumerate()
            valid_layers = lls.keys()

            fmt['layer'] = [lls[self.layer]
                            ] if self.layer in valid_layers else [0]

        return fmt
示例#7
0
 def worldBegin(self, *args):
     self.wf(Files.MAIN, '\nWorldBegin')
     if self.files[Files.MAIN] is not None:
         # Include the other files if they exist
         for idx in [Files.MATS, Files.GEOM, Files.VOLM]:
             if os.path.exists(self.file_names[idx]):
                 self.wf(
                     Files.MAIN, '\nInclude "%s"' %
                     efutil.path_relative_to_export(self.file_names[idx]))
示例#8
0
 def get_env_map_format(self, obj, scene):
     
     # TODO; re-implement spherical/angular and spherical width ?
     
     trans = mathutils.Matrix.Identity(3)
     
     trans[0][0:3] = 0.0, 1.0, 0.0
     trans[1][0:3] = -1.0, 0.0, 0.0
     trans[2][0:3] = 0.0, 0.0, 1.0
     
     mat = obj.matrix_world.to_3x3()
     mat = mat * trans
     
     rq = mat.to_quaternion().to_axis_angle()
     
     #rmr = []
     #for row in tbt.col:
     #    rmr.extend(row)
     
     fmt = {
         'texture': {
             'path': [efutil.path_relative_to_export(self.env_map_path)],
             'exponent': [1.0],    # TODO; make configurable?
             'tex_coord_generation': {
                 self.env_map_type: {
                     'rotation': {
                         #'matrix': rmr
                         'axis_rotation': {
                             'axis': list(rq[0]),
                             'angle': [-rq[1]]
                         }
                     }
                 }
             },
         },
         'base_emission': {
             'constant': {
                 'uniform': {
                     'value': [self.env_map_gain_val * 10**self.env_map_gain_exp]
                 }
             }
         },
         'emission': {
             'texture': {
                 'texture_index': [0]
             }
         }
     }
     
     if self.layer != '' and not scene.indigo_lightlayers.ignore:
         
         lls = scene.indigo_lightlayers.enumerate()
         valid_layers = lls.keys()
         
         fmt['layer'] = [lls[self.layer]] if self.layer in valid_layers else [0]
     
     return fmt
示例#9
0
def getServices():
    cube = bpy.context.scene.objects['Cube']
    path = cube.material_slots[0].material.texture_slots[0].texture.image.filepath
    xfac = efutil.filesystem_path(path)
    print("path now>> %s" % xfac)
    abs = os.path.abspath(xfac)
    print("path abs>> %s" % abs)
    rel = efutil.path_relative_to_export(path)
    print("path rel>> %s" % rel)
示例#10
0
	def buildMesh(self, obj):
		"""
		Decide which mesh format to output, if any, since the given object
		may be an external PLY proxy.
		"""
		
		# Using a cache on object massively speeds up dupli instance export
		obj_cache_key = (self.geometry_scene, obj)
		if self.ExportedObjects.have(obj_cache_key): return self.ExportedObjects.get(obj_cache_key)
		
		mesh_definitions = []
		
		export_original = True
		# mesh data name first for portal reasons
		ext_mesh_name = '%s_%s_ext' % (obj.data.name, self.geometry_scene.name)
		if obj.luxrender_object.append_proxy:
			if obj.luxrender_object.hide_proxy_mesh:
				export_original = False
			
			if self.allow_instancing(obj) and self.ExportedMeshes.have(ext_mesh_name):
				mesh_definitions.append( self.ExportedMeshes.get(ext_mesh_name) )
			else:
				ext_params = ParamSet()
				if obj.luxrender_object.proxy_type in {'plymesh', 'stlmesh'}:
					ext_params.add_string('filename', efutil.path_relative_to_export(obj.luxrender_object.external_mesh))
					ext_params.add_bool('smooth', obj.luxrender_object.use_smoothing)
				if obj.luxrender_object.proxy_type in {'sphere', 'cylinder', 'cone', 'disk', 'paraboloid'}:
					ext_params.add_float('radius', obj.luxrender_object.radius)
					ext_params.add_float('phimax', obj.luxrender_object.phimax*(180/math.pi))
				if obj.luxrender_object.proxy_type in {'cylinder', 'paraboloid'}:
					ext_params.add_float('zmax', obj.luxrender_object.zmax)
				if obj.luxrender_object.proxy_type == 'cylinder':
					ext_params.add_float('zmin', obj.luxrender_object.zmin)
					
				proxy_material = obj.active_material.name if obj.active_material else ""
				mesh_definition = (ext_mesh_name, proxy_material, obj.luxrender_object.proxy_type, ext_params)
				mesh_definitions.append( mesh_definition )
				
				# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
				if self.allow_instancing(obj):
					self.exportShapeDefinition(obj, mesh_definition)
					self.ExportedMeshes.add(ext_mesh_name, mesh_definition)
		
		if export_original:
			# Choose the mesh export type, if set, or use the default
			mesh_type = obj.data.luxrender_mesh.mesh_type
			# If the rendering is INT and not writing to disk, we must use native mesh format
			internal_nofiles = self.visibility_scene.luxrender_engine.export_type=='INT' and not self.visibility_scene.luxrender_engine.write_files
			global_type = 'native' if internal_nofiles else self.visibility_scene.luxrender_engine.mesh_type
			if mesh_type == 'native' or (mesh_type == 'global' and global_type == 'native'):
				mesh_definitions.extend( self.buildNativeMesh(obj) )
			if mesh_type == 'binary_ply' or (mesh_type == 'global' and global_type == 'binary_ply'):
				mesh_definitions.extend( self.buildBinaryPLYMesh(obj) )
		
		self.ExportedObjects.add(obj_cache_key, mesh_definitions)
		return mesh_definitions
示例#11
0
	def get_params(self):
		params = ParamSet()

		params.add_string('filename', efutil.path_relative_to_export(self.filename)) \
			  .add_string('filterType', self.filterType) \
			  .add_float('maxAnisotropy', self.maxAnisotropy) \
			  .add_string('wrapMode', self.wrapMode) \
			  .add_float('gamma', -1 if self.srgb else self.gamma)

		return params
示例#12
0
	def get_params(self):
		params = ParamSet()

		params.add_string('filename', efutil.path_relative_to_export(self.filename)) \
			  .add_string('filterType', self.filterType) \
			  .add_float('maxAnisotropy', self.maxAnisotropy) \
			  .add_string('wrapMode', self.wrapMode) \
			  .add_float('gamma', -1 if self.srgb else self.gamma)

		return params
示例#13
0
def getServices():
    cube = bpy.context.scene.objects['Cube']
    path = cube.material_slots[0].material.texture_slots[
        0].texture.image.filepath
    xfac = efutil.filesystem_path(path)
    print("path now>> %s" % xfac)
    abs = os.path.abspath(xfac)
    print("path abs>> %s" % abs)
    rel = efutil.path_relative_to_export(path)
    print("path rel>> %s" % rel)
示例#14
0
	def buildMesh(self, obj):
		"""
		Decide which mesh format to output, if any, since the given object
		may be an external PLY proxy.
		"""
		
		# Using a cache on object massively speeds up dupli instance export
		obj_cache_key = (self.geometry_scene, obj)
		if self.ExportedObjects.have(obj_cache_key): return self.ExportedObjects.get(obj_cache_key)
		
		mesh_definitions = []
		
		export_original = True
		# mesh data name first for portal reasons
		ext_mesh_name = '%s_%s_ext' % (obj.data.name, self.geometry_scene.name)
		if obj.luxrender_object.append_proxy:
			if obj.luxrender_object.hide_proxy_mesh:
				export_original = False
			
			if self.allow_instancing(obj) and self.ExportedMeshes.have(ext_mesh_name):
				mesh_definitions.append( self.ExportedMeshes.get(ext_mesh_name) )
			else:
				ext_params = ParamSet()
				if obj.luxrender_object.proxy_type in {'plymesh', 'stlmesh'}:
					ext_params.add_string('filename', efutil.path_relative_to_export(obj.luxrender_object.external_mesh))
					ext_params.add_bool('smooth', obj.luxrender_object.use_smoothing)
				if obj.luxrender_object.proxy_type in {'sphere', 'cylinder', 'cone', 'disk', 'paraboloid'}:
					ext_params.add_float('radius', obj.luxrender_object.radius)
					ext_params.add_float('phimax', obj.luxrender_object.phimax*(180/math.pi))
				if obj.luxrender_object.proxy_type in {'cylinder', 'paraboloid'}:
					ext_params.add_float('zmax', obj.luxrender_object.zmax)
				if obj.luxrender_object.proxy_type == 'cylinder':
					ext_params.add_float('zmin', obj.luxrender_object.zmin)
				
				mesh_definition = (ext_mesh_name, obj.active_material.name, obj.luxrender_object.proxy_type, ext_params)
				mesh_definitions.append( mesh_definition )
				
				# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
				if self.allow_instancing(obj):
					self.exportShapeDefinition(obj, mesh_definition)
					self.ExportedMeshes.add(ext_mesh_name, mesh_definition)
		
		if export_original:
			# Choose the mesh export type, if set, or use the default
			mesh_type = obj.data.luxrender_mesh.mesh_type
			# If the rendering is INT and not writing to disk, we must use native mesh format
			internal_nofiles = self.visibility_scene.luxrender_engine.export_type=='INT' and not self.visibility_scene.luxrender_engine.write_files
			global_type = 'native' if internal_nofiles else self.visibility_scene.luxrender_engine.mesh_type
			if mesh_type == 'native' or (mesh_type == 'global' and global_type == 'native'):
				mesh_definitions.extend( self.buildNativeMesh(obj) )
			if mesh_type == 'binary_ply' or (mesh_type == 'global' and global_type == 'binary_ply'):
				mesh_definitions.extend( self.buildBinaryPLYMesh(obj) )
		
		self.ExportedObjects.add(obj_cache_key, mesh_definitions)
		return mesh_definitions
示例#15
0
	def get_params(self):
		params = ParamSet()
		#file_relative		= efutil.path_relative_to_export(file_library_path) if obj.library else efutil.path_relative_to_export(file_path)
		params.add_string('filename', efutil.path_relative_to_export(self.filename))
		params.add_float('ksMultiplier', self.ksMultiplier)
		params.add_float('kdMultiplier', self.kdMultiplier)
		params.add_float('repeatU', self.repeatU)
		params.add_float('repeatV', self.repeatV)
		params.add_color('kd', self.kd)
		params.add_color('ks', self.ks)
		params.add_color('warp_kd', self.warp_kd)
		params.add_color('warp_ks', self.warp_ks)
		params.add_color('weft_kd', self.weft_kd)
		params.add_color('weft_ks', self.weft_ks)
		return params
示例#16
0
 def get_params(self):
     params = ParamSet()
     #file_relative		= efutil.path_relative_to_export(file_library_path) if obj.library else efutil.path_relative_to_export(file_path)
     params.add_string('filename',
                       efutil.path_relative_to_export(self.filename))
     params.add_float('ksMultiplier', self.ksMultiplier)
     params.add_float('kdMultiplier', self.kdMultiplier)
     params.add_float('repeatU', self.repeatU)
     params.add_float('repeatV', self.repeatV)
     params.add_color('kd', self.kd)
     params.add_color('ks', self.ks)
     params.add_color('warp_kd', self.warp_kd)
     params.add_color('warp_ks', self.warp_ks)
     params.add_color('weft_kd', self.weft_kd)
     params.add_color('weft_ks', self.weft_ks)
     return params
示例#17
0
    def get_paramset(self, lamp_object):
        params = ParamSet()

        if self.infinite_map != '':
            if lamp_object.library is not None:
                hdri_path = bpy.path.abspath(self.infinite_map,
                                             lamp_object.library.filepath)
            else:
                hdri_path = self.infinite_map
            params.add_string('mapname',
                              efutil.path_relative_to_export(hdri_path))
            params.add_string('mapping', self.mapping_type)
            params.add_float('gamma', self.gamma)
            params.add_integer('nsamples', self.nsamples)

        if self.infinite_map == '' or self.hdri_multiply:
            params.add_color('L', self.L_color)

        return params
示例#18
0
文件: lamp.py 项目: Badcreature/sagcg
	def get_paramset(self, lamp_object):
		params = ParamSet()
		
		if self.type == 'infinite':
			if self.infinite_map != '':
				if lamp_object.library is not None:
					hdri_path = bpy.path.abspath(self.infinite_map, lamp_object.library.filepath)
				else:
					hdri_path = self.infinite_map
				params.add_string('mapname', efutil.path_relative_to_export(hdri_path) )
				params.add_string('mapping', self.mapping_type)
				params.add_float('gamma', self.gamma)
				params.add_integer('nsamples', self.nsamples)
				
			if self.infinite_map == '' or self.hdri_multiply:
				params.add_color('L', self.L_color)
		else:
			params.add_color('L', self.L_color)
		
		return params
示例#19
0
    def buildBinaryPLYMesh(self, obj):
        """
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and create a binary PLY file.
		"""
        try:
            mesh_definitions = []
            mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
            if mesh is None:
                raise UnexportableObjectException(
                    'Cannot create render/export mesh')

            # collate faces by mat index
            ffaces_mats = {}
            mesh_faces = mesh.tessfaces
            for f in mesh_faces:
                mi = f.material_index
                if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
                ffaces_mats[mi].append(f)
            material_indices = ffaces_mats.keys()

            if len(mesh.materials) > 0 and mesh.materials[0] != None:
                mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
            else:
                mats = [(0, None)]

            for i, mat in mats:
                try:
                    if i not in material_indices: continue

                    # If this mesh/mat combo has already been processed, get it from the cache
                    mesh_cache_key = (self.geometry_scene, obj.data, i)
                    if self.allow_instancing(obj) and self.ExportedMeshes.have(
                            mesh_cache_key):
                        mesh_definitions.append(
                            self.ExportedMeshes.get(mesh_cache_key))
                        continue

                    # Put PLY files in frame-numbered subfolders to avoid
                    # clobbering when rendering animations
                    #sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
                    sc_fr = '%s/%s/%s/%05d' % (
                        self.mts_context.meshes_dir, efutil.scene_filename(),
                        bpy.path.clean_name(self.geometry_scene.name),
                        self.visibility_scene.frame_current)
                    if not os.path.exists(sc_fr):
                        os.makedirs(sc_fr)

                    def make_plyfilename():
                        ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
                        mesh_name = '%s_%04d_m%03d' % (obj.data.name,
                                                       ply_serial, i)
                        ply_filename = '%s.ply' % bpy.path.clean_name(
                            mesh_name)
                        ply_path = '/'.join([sc_fr, ply_filename])
                        return mesh_name, ply_path

                    mesh_name, ply_path = make_plyfilename()

                    # Ensure that all PLY files have unique names
                    while self.ExportedPLYs.have(ply_path):
                        mesh_name, ply_path = make_plyfilename()

                    self.ExportedPLYs.add(ply_path, None)

                    # skip writing the PLY file if the box is checked
                    skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
                    if not os.path.exists(ply_path) or not (
                            self.visibility_scene.mitsuba_engine.partial_export
                            and skip_exporting):

                        GeometryExporter.NewExportedObjects.add(obj)

                        uv_textures = mesh.tessface_uv_textures
                        if len(uv_textures) > 0:
                            if uv_textures.active and uv_textures.active.data:
                                uv_layer = uv_textures.active.data
                        else:
                            uv_layer = None

                        # Here we work out exactly which vert+normal combinations
                        # we need to export. This is done first, and the export
                        # combinations cached before writing to file because the
                        # number of verts needed needs to be written in the header
                        # and that number is not known before this is done.

                        # Export data
                        ntris = 0
                        co_no_uv_cache = []
                        face_vert_indices = [
                        ]  # mapping of face index to list of exported vert indices for that face

                        # Caches
                        vert_vno_indices = {
                        }  # mapping of vert index to exported vert index for verts with vert normals
                        vert_use_vno = set(
                        )  # Set of vert indices that use vert normals

                        vert_index = 0  # exported vert index
                        for face in ffaces_mats[i]:
                            fvi = []
                            for j, vertex in enumerate(face.vertices):
                                v = mesh.vertices[vertex]

                                if uv_layer:
                                    # Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
                                    uv_coord = (uv_layer[face.index].uv[j][0],
                                                1.0 -
                                                uv_layer[face.index].uv[j][1])

                                if face.use_smooth:

                                    if uv_layer:
                                        vert_data = (v.co[:], v.normal[:],
                                                     uv_coord)
                                    else:
                                        vert_data = (v.co[:], v.normal[:])

                                    if vert_data not in vert_use_vno:
                                        vert_use_vno.add(vert_data)

                                        co_no_uv_cache.append(vert_data)

                                        vert_vno_indices[
                                            vert_data] = vert_index
                                        fvi.append(vert_index)

                                        vert_index += 1
                                    else:
                                        fvi.append(vert_vno_indices[vert_data])

                                else:

                                    if uv_layer:
                                        vert_data = (
                                            v.co[:], face.normal[:],
                                            uv_layer[face.index].uv[j][:])
                                    else:
                                        vert_data = (v.co[:], face.normal[:])

                                    # All face-vert-co-no are unique, we cannot
                                    # cache them
                                    co_no_uv_cache.append(vert_data)

                                    fvi.append(vert_index)

                                    vert_index += 1

                            # For Mitsuba, we need to triangulate quad faces
                            face_vert_indices.append(fvi[0:3])
                            ntris += 3
                            if len(fvi) == 4:
                                face_vert_indices.append(
                                    (fvi[0], fvi[2], fvi[3]))
                                ntris += 3

                        del vert_vno_indices
                        del vert_use_vno

                        with open(ply_path, 'wb') as ply:
                            ply.write(b'ply\n')
                            ply.write(b'format binary_little_endian 1.0\n')
                            ply.write(
                                b'comment Created by MtsBlend 2.5 exporter for Mitsuba - www.mitsuba.net\n'
                            )

                            # vert_index == the number of actual verts needed
                            ply.write(
                                ('element vertex %d\n' % vert_index).encode())
                            ply.write(b'property float x\n')
                            ply.write(b'property float y\n')
                            ply.write(b'property float z\n')

                            ply.write(b'property float nx\n')
                            ply.write(b'property float ny\n')
                            ply.write(b'property float nz\n')

                            if uv_layer:
                                ply.write(b'property float s\n')
                                ply.write(b'property float t\n')

                            ply.write(('element face %d\n' %
                                       int(ntris / 3)).encode())
                            ply.write(
                                b'property list uchar uint vertex_indices\n')

                            ply.write(b'end_header\n')

                            # dump cached co/no/uv
                            if uv_layer:
                                for co, no, uv in co_no_uv_cache:
                                    ply.write(struct.pack('<3f', *co))
                                    ply.write(struct.pack('<3f', *no))
                                    ply.write(struct.pack('<2f', *uv))
                            else:
                                for co, no in co_no_uv_cache:
                                    ply.write(struct.pack('<3f', *co))
                                    ply.write(struct.pack('<3f', *no))

                            # dump face vert indices
                            for face in face_vert_indices:
                                ply.write(struct.pack('<B', 3))
                                ply.write(struct.pack('<3I', *face))

                            del co_no_uv_cache
                            del face_vert_indices

                        MtsLog('Binary PLY file written: %s' % (ply_path))
                    else:
                        MtsLog('Skipping already exported PLY: %s' % mesh_name)

                    shape_params = ParamSet().add_string(
                        'filename', efutil.path_relative_to_export(ply_path))
                    if obj.data.mitsuba_mesh.normals == 'facenormals':
                        shape_params.add_boolean('faceNormals',
                                                 {'value': 'true'})

                    mesh_definition = (mesh_name, i, 'ply', shape_params)
                    # Only export Shapegroup and cache this mesh_definition if we plan to use instancing
                    if self.allow_instancing(
                            obj) and self.exportShapeDefinition(
                                obj, mesh_definition):
                        shape_params = ParamSet().add_reference(
                            'id', '', mesh_name + '-shapegroup_%i' % (i))

                        mesh_definition = (mesh_name, i, 'instance',
                                           shape_params)
                        self.ExportedMeshes.add(mesh_cache_key,
                                                mesh_definition)

                    mesh_definitions.append(mesh_definition)

                except InvalidGeometryException as err:
                    MtsLog('Mesh export failed, skipping this mesh: %s' % err)

            del ffaces_mats
            bpy.data.meshes.remove(mesh)

        except UnexportableObjectException as err:
            MtsLog('Object export failed, skipping this object: %s' % err)

        return mesh_definitions
示例#20
0
    def execute(self, master_scene):
        try:
            if master_scene is None:
                #indigo_log('Scene context is invalid')
                raise Exception('Scene context is invalid')
            
            #------------------------------------------------------------------------------
            # Init stats
            if self.verbose: indigo_log('Indigo export started ...')
            export_start_time = time.time()
            
            igs_filename = self.check_output_path(self.properties.directory)
            export_scenes = [master_scene.background_set, master_scene]
            
            if self.verbose: indigo_log('Export render settings')
            
            #------------------------------------------------------------------------------
            # Start with render settings, this also creates the root <scene>
            self.scene_xml = master_scene.indigo_engine.build_xml_element(master_scene)
            
            # Export background light if no light exists.
            self.export_default_background_light(export_scenes)
            
            #------------------------------------------------------------------------------
            # Tonemapping
            self.export_tonemapping(master_scene)
            
            #------------------------------------------------------------------------------
            # Materials - always export the default clay material and a null material
            self.export_default_materials(master_scene)
            
            # Initialise values used for motion blur export.
            fps = master_scene.render.fps / master_scene.render.fps_base
            start_frame = master_scene.frame_current
            exposure = 1 / master_scene.camera.data.indigo_camera.exposure
            camera = (master_scene.camera, [])
            
            # Make a relative igs and mesh dir path like "TheAnimation/00002"
            rel_mesh_dir = efutil.scene_filename()
            rel_frame_dir = '%s/%05i' % (rel_mesh_dir, start_frame) #bpy.path.clean_name(master_scene.name), 
            mesh_dir = '/'.join([efutil.export_path, rel_mesh_dir])
            frame_dir = '/'.join([efutil.export_path, rel_frame_dir])
            
            # Initialise GeometryExporter.
            geometry_exporter = geometry.GeometryExporter()
            geometry_exporter.mesh_dir = mesh_dir
            geometry_exporter.rel_mesh_dir = rel_mesh_dir
            geometry_exporter.skip_existing_meshes = master_scene.indigo_engine.skip_existing_meshes
            geometry_exporter.verbose = self.verbose
            
            # Make frame_dir directory if it does not exist yet.
            if not os.path.exists(frame_dir):
                os.makedirs(frame_dir)
            
            if master_scene.indigo_engine.motionblur:
                # When motion blur is on, calculate the number of frames covered by the exposure time
                start_time = start_frame / fps
                end_time = start_time + exposure
                end_frame = math.ceil(end_time * fps)
                
                # end_frame + 1 because range is max excl
                frame_list = [x for x in range(start_frame, end_frame+1)]
            else:
                frame_list = [start_frame]
                
            #indigo_log('frame_list: %s'%frame_list)
            
            #------------------------------------------------------------------------------
            # Process all objects in all frames in all scenes.
            for cur_frame in frame_list:
                # Calculate normalised time for keyframes.
                normalised_time = (cur_frame - start_frame) / fps / exposure
                if self.verbose: indigo_log('Processing frame: %i time: %f'%(cur_frame, normalised_time))
                
                geometry_exporter.normalised_time = normalised_time
                bpy.context.scene.frame_set(cur_frame, 0.0)

                # Add Camera matrix.
                camera[1].append((normalised_time, camera[0].matrix_world.copy()))
            
                for ex_scene in export_scenes:
                    if ex_scene is None: continue
                    
                    if self.verbose: indigo_log('Processing objects for scene %s' % ex_scene.name)
                    geometry_exporter.iterateScene(ex_scene)
            
            #------------------------------------------------------------------------------
            # Export camera
            if self.verbose: indigo_log('Exporting camera')
            self.scene_xml.append(
                camera[0].data.indigo_camera.build_xml_element(master_scene, camera[1])
            )
            
            #------------------------------------------------------------------------------
            # Export light layers
            from indigo.export.light_layer import light_layer_xml
            # TODO:
            # light_layer_count was supposed to export correct indices when there
            # is a background_set with emitters on light layers -
            # however, the re-indexing at material export time is non-trivial for
            # now and probably not worth it.
            #light_layer_count = 0
            for ex_scene in export_scenes:
                if ex_scene is None: continue
                
                # Light layer names
                for layer_name, idx in ex_scene.indigo_lightlayers.enumerate().items():
                    if self.verbose: indigo_log('Light layer %i: %s' % (idx, layer_name))
                    self.scene_xml.append(
                        light_layer_xml().build_xml_element(ex_scene, idx, layer_name)
                    )
                    # light_layer_count += 1
            
            if self.verbose: indigo_log('Exporting lamps')
            
            # use special n==1 case due to bug in indigo <sum> material
            num_lamps = len(geometry_exporter.ExportedLamps)
            
            if num_lamps == 1:
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_mat = ET.Element('background_material')
                scene_background_settings.append(scene_background_settings_mat)
                
                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        scene_background_settings_mat.append(xml)
                
                self.scene_xml.append(scene_background_settings)
            
            if num_lamps > 1:
                
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_fmt = {
                    'background_material': {
                        'material': {
                            'name': ['background_material'],
                            'sum': { 'mat': xml_multichild() }
                        }
                    }
                }
                
                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        self.scene_xml.append(xml)
                    
                    scene_background_settings_fmt['background_material']['material']['sum']['mat'].append({
                        'mat_name': [ck],
                        'weight': {'constant': [1]}
                    })
                scene_background_settings_obj = xml_builder()
                scene_background_settings_obj.build_subelements(None, scene_background_settings_fmt, scene_background_settings)
                self.scene_xml.append(scene_background_settings)
            
            #------------------------------------------------------------------------------
            # Export Medium
            from indigo.export.materials.medium import medium_xml
            # TODO:
            # check if medium is currently used by any material and add 
            # basic medium for SpecularMaterial default

            for ex_scene in export_scenes:
                if ex_scene is None: continue
                
                indigo_material_medium = ex_scene.indigo_material_medium
                medium = indigo_material_medium.medium
                
                if len(indigo_material_medium.medium.items()) == 0 : continue
                
                for medium_name, medium_data in medium.items():
                    
                    medium_index = ex_scene.indigo_material_medium.medium.find(medium_name) # more precise if same name
                    
                    indigo_log('Exporting medium: %s ' % (medium_name))
                    self.scene_xml.append(
                        medium_xml(ex_scene, medium_name, medium_index, medium_data).build_xml_element(ex_scene, medium_name, medium_data)
                    )
                indigo_log('Exporting Medium: %s ' % (medium_name))         
                # TODO: 
                # check for unused medium	
            basic_medium = ET.fromstring("""
                                <medium>
                                   <uid>10200137</uid>
		                             <name>basic</name>
			                           <precedence>10</precedence>
			                             <basic>
				                           <ior>1.5</ior>
				                           <cauchy_b_coeff>0</cauchy_b_coeff>
				                           <max_extinction_coeff>1</max_extinction_coeff>
				                           <absorption_coefficient>
					                         <constant>
						                      <uniform>
							                   <value>0</value>
						                      </uniform>
					                         </constant>
				                           </absorption_coefficient>
			                             </basic>
	                            </medium>   
                         """)
            
            self.scene_xml.append(basic_medium)
            
            #------------------------------------------------------------------------------
            # Export used materials.
            if self.verbose: indigo_log('Exporting used materials')
            material_count = 0
            for ck, ci in geometry_exporter.ExportedMaterials.items():
                for xml in ci:
                    self.scene_xml.append(xml)
                material_count += 1
            if self.verbose: indigo_log('Exported %i materials' % material_count)
            
            # Export used meshes.
            if self.verbose: indigo_log('Exporting meshes')
            mesh_count = 0
            for ck, ci in geometry_exporter.MeshesOnDisk.items():
                mesh_name, xml = ci
                self.scene_xml.append(xml)
                mesh_count += 1
            if self.verbose: indigo_log('Exported %i meshes' % mesh_count)
            
            #------------------------------------------------------------------------------
            # We write object instances to a separate file
            oc = 0
            scene_data_xml = ET.Element('scenedata')
            for ck, ci in geometry_exporter.ExportedObjects.items():
                obj_type = ci[0]
                
                if obj_type == 'OBJECT':
                    obj = ci[1]
                    mesh_name = ci[2]
                    obj_matrices = ci[3]
                    scene = ci[4]
                    
                    xml = geometry.model_object(scene).build_xml_element(obj, mesh_name, obj_matrices)
                else:
                    xml = ci[1]
                    
                scene_data_xml.append(xml)
                oc += 1
            
            objects_file_name = '%s/objects.igs' % (
                frame_dir
            )
            
            objects_file = open(objects_file_name, 'wb')
            ET.ElementTree(element=scene_data_xml).write(objects_file, encoding='utf-8')
            objects_file.close()
            # indigo_log('Exported %i object instances to %s' % (oc,objects_file_name))
            scene_data_include = include.xml_include( efutil.path_relative_to_export(objects_file_name) )
            self.scene_xml.append( scene_data_include.build_xml_element(master_scene) )
            
            #------------------------------------------------------------------------------
            # Write formatted XML for settings, materials and meshes
            out_file = open(igs_filename, 'w')
            xml_str = ET.tostring(self.scene_xml, encoding='utf-8').decode()
            
            # substitute back characters protected from entity encoding in CDATA nodes
            xml_str = xml_str.replace('{_LESSTHAN_}', '<')
            xml_str = xml_str.replace('{_GREATERTHAN_}', '>')
            
            xml_dom = MD.parseString(xml_str)
            xml_dom.writexml(out_file, addindent='\t', newl='\n', encoding='utf-8')
            out_file.close()
            
            #------------------------------------------------------------------------------
            # Print stats
            export_end_time = time.time()
            if self.verbose: indigo_log('Total mesh export time: %f seconds' % (geometry_exporter.total_mesh_export_time))
            indigo_log('Export finished; took %f seconds' % (export_end_time-export_start_time))
            
            # Reset to start_frame.
            if len(frame_list) > 1:
                bpy.context.scene.frame_set(start_frame)
            
            return {'FINISHED'}
        
        except Exception as err:
            indigo_log('%s' % err, message_type='ERROR')
            if os.getenv('B25_OBJECT_ANALYSIS', False):
                raise err
            return {'CANCELLED'}
示例#21
0
    def handler_Duplis_PATH(self, obj, *args, **kwargs):
        if not 'particle_system' in kwargs.keys():
            MtsLog('ERROR: handler_Duplis_PATH called without particle_system')
            return

        psys = kwargs['particle_system']

        if not psys.settings.type == 'HAIR':
            MtsLog(
                'ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")'
                % psys.name)
            return

        for mod in obj.modifiers:
            if mod.type == 'PARTICLE_SYSTEM' and mod.show_render == False:
                return

        MtsLog('Exporting Hair system "%s"...' % psys.name)

        size = psys.settings.particle_size / 2.0 / 1000.0
        psys.set_resolution(self.geometry_scene, obj, 'RENDER')
        steps = 2**psys.settings.render_step
        num_parents = len(psys.particles)
        num_children = len(psys.child_particles)

        partsys_name = '%s_%s' % (obj.name, psys.name)
        det = DupliExportProgressThread()
        det.start(num_parents + num_children)

        # Put Hair files in frame-numbered subfolders to avoid
        # clobbering when rendering animations
        sc_fr = '%s/%s/%s/%05d' % (
            self.mts_context.meshes_dir, efutil.scene_filename(),
            bpy.path.clean_name(
                self.geometry_scene.name), self.visibility_scene.frame_current)
        if not os.path.exists(sc_fr):
            os.makedirs(sc_fr)

        hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
        hair_file_path = '/'.join([sc_fr, hair_filename])

        shape_params = ParamSet().add_string(
            'filename',
            efutil.path_relative_to_export(hair_file_path)).add_float(
                'radius', size)
        mesh_definitions = []
        mesh_definition = (psys.name, psys.settings.material - 1, 'hair',
                           shape_params)
        mesh_definitions.append(mesh_definition)
        self.exportShapeInstances(obj, mesh_definitions)

        hair_file = open(hair_file_path, 'w')

        transform = obj.matrix_world.inverted()
        for pindex in range(num_parents + num_children):
            det.exported_objects += 1
            points = []

            for step in range(0, steps + 1):
                co = psys.co_hair(obj, mod, pindex, step)
                if not co.length_squared == 0:
                    points.append(transform * co)

            if psys.settings.use_hair_bspline:
                temp = []
                degree = 2
                dimension = 3
                for i in range(
                        math.trunc(math.pow(2, psys.settings.render_step))):
                    if i > 0:
                        u = i * (len(points) - degree) / math.trunc(
                            math.pow(2, psys.settings.render_step) -
                            1) - 0.0000000000001
                    else:
                        u = i * (len(points) - degree) / math.trunc(
                            math.pow(2, psys.settings.render_step) - 1)
                    temp.append(self.BSpline(points, dimension, degree, u))
                points = temp

            for p in points:
                hair_file.write('%f %f %f\n' % (p[0], p[1], p[2]))

            hair_file.write('\n')

        hair_file.close()

        psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
        det.stop()
        det.join()

        MtsLog('... done, exported %s hairs' % det.exported_objects)
示例#22
0
文件: Base.py 项目: theres1/blendigo
    def get_channel(self, property_group, channel_name, channel_prop_name):
        d = {}

        channel_type = getattr(property_group, channel_prop_name + '_type')

        if channel_type == 'spectrum':
            spectrum_type = getattr(property_group,
                                    channel_prop_name + '_SP_type')
            if spectrum_type == 'rgb':
                d[channel_name] = {
                    'constant':
                    rgb([
                        i for i in getattr(property_group, channel_prop_name +
                                           '_SP_rgb') *
                        getattr(property_group, channel_prop_name +
                                '_SP_rgb_gain', 1.0)
                    ])
                }
            elif spectrum_type == 'uniform':
                d[channel_name] = {
                    'constant': uniform([
                        getattr(property_group, channel_prop_name + '_SP_uniform_val') * \
                        10**getattr(property_group, channel_prop_name + '_SP_uniform_exp')
                    ])
                }
            elif spectrum_type == 'blackbody':
                d[channel_name] = {
                    'constant':
                    blackbody([
                        getattr(property_group,
                                channel_prop_name + '_SP_blackbody_temp')
                    ], [
                        getattr(property_group,
                                channel_prop_name + '_SP_blackbody_gain')
                    ])
                }

        elif channel_type == 'texture':
            tex_name = getattr(property_group,
                               channel_prop_name + '_TX_texture')

            if tex_name:  # string is not empty
                if channel_prop_name not in self.found_texture_indices:
                    self.found_texture_indices.append(channel_prop_name)

                    if not tex_name in bpy.data.textures:
                        raise Exception(
                            "Texture \"%s\" assigned to material \"%s\" doesn't exist!"
                            % (tex_name, self.material_name))

                    tex_property_group = bpy.data.textures[
                        tex_name].indigo_texture

                    if tex_property_group.image_ref == 'file':
                        relative_texture_path = efutil.path_relative_to_export(
                            getattr(tex_property_group, 'path'))
                    elif tex_property_group.image_ref == 'blender':
                        if not tex_property_group.image in bpy.data.images:
                            raise Exception(
                                "Error with image reference on texture \"%s\""
                                % tex_name)

                        img = bpy.data.images[tex_property_group.image]

                        if img.filepath == '':
                            bl_img_path = 'blendigo_extracted_image_%s.png' % bpy.path.clean_name(
                                tex_name)
                        else:
                            bl_img_path = img.filepath

                        if img.source != 'FILE' or img.packed_file:
                            bl_file_formatted = os.path.splitext(
                                os.path.basename(bl_img_path))[0]
                            bl_file_formatted = '%s.%s' % (
                                bl_file_formatted,
                                self.scene.render.image_settings.file_format)
                            bl_img_path = os.path.join(
                                efutil.export_path, efutil.scene_filename(),
                                bpy.path.clean_name(self.scene.name),
                                '%05d' % self.scene.frame_current,
                                bl_file_formatted)
                            img.save_render(bl_img_path, self.scene)

                        relative_texture_path = efutil.path_relative_to_export(
                            bl_img_path)

                    if not getattr(property_group,
                                   channel_prop_name + '_TX_abc_from_tex'):
                        abc_property_group = property_group
                        abc_prefix = channel_prop_name + '_TX_'
                    else:
                        abc_property_group = tex_property_group
                        abc_prefix = ''

                    uv_set_name = getattr(property_group,
                                          channel_prop_name + '_TX_uvset')
                    try:
                        uv_set_index = self.obj.data.uv_textures.keys().index(
                            uv_set_name)
                    except:
                        uv_set_index = 0

                    self.found_textures.append({
                        'uv_set_index': [
                            uv_set_index
                        ],  #getattr(property_group, channel_prop_name + '_TX_uv_index')],
                        'path': [relative_texture_path],
                        'exponent': [getattr(tex_property_group, 'gamma')],
                        'a': [getattr(abc_property_group, abc_prefix + 'A')],
                        'b': [getattr(abc_property_group, abc_prefix + 'B')],
                        'c': [getattr(abc_property_group, abc_prefix + 'C')],
                        'smooth': [
                            str(
                                getattr(property_group, channel_prop_name +
                                        '_TX_smooth')).lower()
                        ]
                    })

                d[channel_name] = {
                    'texture': {
                        'texture_index':
                        [self.found_texture_indices.index(channel_prop_name)],
                    }
                }

        elif channel_type == 'shader':
            try:
                shader_name = getattr(property_group,
                                      channel_prop_name + '_SH_text')
                if not shader_name in bpy.data.texts:
                    raise Exception(
                        'Referenced Text "%s" for shader on material "%s" not found'
                        % (shader_name, self.material_name))

                shader_text = '\n' + bpy.data.texts[shader_name].as_string()
                d[channel_name] = {
                    'shader': {
                        'shader': xml_cdata(shader_text)
                    }
                }
            except:
                pass

        return d
示例#23
0
def exportLight(scene, lux_context, ob, matrix, portals=[]):
    light = ob.data

    lg_gain = 1.0
    light_group = light.luxrender_lamp.lightgroup

    if not checkLightEnabled(scene, light):
        return False

    if light_group in scene.luxrender_lightgroups.lightgroups:
        lg_gain = scene.luxrender_lightgroups.lightgroups[light_group].gain

    if scene.luxrender_lightgroups.ignore or light.luxrender_lamp.lightgroup == "" or scene.luxrender_integrator.surfaceintegrator == 'exphotonmap':  #Light groups don't work with exphotonmap
        light_group = 'default'

    # Params common to all light types
    light_params = ParamSet() \
     .add_float('gain', light.energy*lg_gain) \
     .add_float('importance', light.luxrender_lamp.importance)

    ies_data = ParamSet()
    if light.luxrender_lamp.iesname != '':
        if light.library is not None:
            iespath = bpy.path.abspath(light.luxrender_lamp.iesname,
                                       light.library.filepath)
        else:
            iespath = light.luxrender_lamp.iesname
        ies_data = ParamSet().add_string(
            'iesname', efutil.path_relative_to_export(iespath))

    # Params from light sub-types
    light_params.update(
        getattr(light.luxrender_lamp,
                'luxrender_lamp_%s' % light.type.lower()).get_paramset(ob))

    # Other lamp params from lamp object
    if light.type == 'SUN':
        invmatrix = matrix.inverted()
        invmatrix = fix_matrix_order(invmatrix)  # matrix indexing hack
        sunsky_type = light.luxrender_lamp.luxrender_lamp_sun.sunsky_type
        legacy_sky = light.luxrender_lamp.luxrender_lamp_sun.legacy_sky
        if light.luxrender_lamp.luxrender_lamp_sun.sunsky_type in [
                'sun', 'sunsky'
        ]:
            light_params.add_vector(
                'sundir', (invmatrix[2][0], invmatrix[2][1], invmatrix[2][2]))
        if light.luxrender_lamp.luxrender_lamp_sun.sunsky_type == 'distant':
            light_params.add_point(
                'from', (invmatrix[2][0], invmatrix[2][1], invmatrix[2][2]))
            light_params.add_point(
                'to',
                (0, 0, 0))  #This combo will produce the same result as sundir
        if not legacy_sky and sunsky_type not in ['sun', 'distant'
                                                  ]:  # new skymodel
            if sunsky_type == 'sky':
                attr_light(scene,
                           lux_context,
                           light,
                           ob.name,
                           light_group,
                           'sky2',
                           light_params,
                           portals=portals)
            elif sunsky_type == 'sunsky':
                attr_light(scene,
                           lux_context,
                           light,
                           ob.name,
                           light_group,
                           'sunsky2',
                           light_params,
                           portals=portals)
        else:  # Use light definition itself, old sky model, sun only, or distant, as needed.
            attr_light(scene,
                       lux_context,
                       light,
                       ob.name,
                       light_group,
                       sunsky_type,
                       light_params,
                       portals=portals)
        return True

    if light.type == 'HEMI':
        infinite_type = 'infinitesample' if light.luxrender_lamp.luxrender_lamp_hemi.hdri_infinitesample else 'infinite'
        attr_light(scene,
                   lux_context,
                   light,
                   ob.name,
                   light_group,
                   infinite_type,
                   light_params,
                   transform=matrix_to_list(matrix, apply_worldscale=True),
                   portals=portals)
        return True

    if light.type == 'SPOT':
        light_params.update(ies_data)
        coneangle = degrees(light.spot_size) * 0.5
        conedeltaangle = degrees(light.spot_size * 0.5 * light.spot_blend)

        if light.luxrender_lamp.luxrender_lamp_spot.projector:
            light_type = 'projection'
            light_params.add_float('fov', coneangle * 2)
        else:
            light_type = 'spot'
            light_params.add_point('from', (0, 0, 0))
            light_params.add_point('to', (0, 0, -1))
            light_params.add_float('coneangle', coneangle)
            light_params.add_float('conedeltaangle', conedeltaangle)

        attr_light(scene,
                   lux_context,
                   light,
                   ob.name,
                   light_group,
                   light_type,
                   light_params,
                   transform=matrix_to_list(matrix, apply_worldscale=True),
                   portals=portals)
        return True

    if light.type == 'POINT':
        light_params.update(ies_data)
        #Here the use sphere option kicks in. If true, export an spherical area light (using Lux's geometric sphere primitive) rather than a true point light
        if light.luxrender_lamp.luxrender_lamp_point.usesphere == True and scene.luxrender_rendermode.renderer != 'hybrid':  #no sphere primitives with hybrid!
            light_params.add_float(
                'gain',
                light.energy * lg_gain *
                (get_worldscale(as_scalematrix=False)**2))
            light_params.add_integer(
                'nsamples',
                [light.luxrender_lamp.luxrender_lamp_point.nsamples]
            )  #Add this in manually, it is not used for the true point and thus is not in the normal parameter set
            lux_context.attributeBegin(ob.name, file=Files.MAIN)
            lux_context.transform(matrix_to_list(matrix,
                                                 apply_worldscale=True))
            lux_context.lightGroup(light_group, [])

            if light.luxrender_lamp.Exterior_volume != '':
                lux_context.exterior(light.luxrender_lamp.Exterior_volume)
            elif scene.luxrender_world.default_exterior_volume != '':
                lux_context.exterior(
                    scene.luxrender_world.default_exterior_volume)

            if light.luxrender_lamp.luxrender_lamp_point.null_lamp:
                mat_params = ParamSet()

                mat_params.add_string('type', 'null')

                lux_context.makeNamedMaterial(ob.name, mat_params)

                lux_context.namedMaterial(ob.name)

            lux_context.areaLightSource('area', light_params)

            # always remove blender object_scale to avoid corona-effect
            x, y, z = bpy.data.objects[
                light.name].scale.copy()  # get scale from blender
            lux_context.scale(1 / x, 1 / y, 1 / z)

            shape_params = ParamSet()

            shape_params.add_float(
                'radius',
                [light.luxrender_lamp.luxrender_lamp_point.pointsize]
            )  #Fetch point light size and use it for the sphere primitive's radius param
            shape_params.add_string('name', light.name)
            lux_context.shape('sphere', shape_params)

            for portal in portals:
                lux_context.portalInstance(portal)

            lux_context.attributeEnd()

        else:  #export an actual point light
            light_params.add_point(
                'from',
                (0, 0,
                 0))  # (0,0,0) is correct since there is an active Transform
            attr_light(scene,
                       lux_context,
                       light,
                       ob.name,
                       light_group,
                       'point',
                       light_params,
                       transform=matrix_to_list(matrix, apply_worldscale=True),
                       portals=portals)
        return True

    if light.type == 'AREA':
        light_params.update(ies_data)
        # overwrite gain with a gain scaled by ws^2 to account for change in lamp area
        light_params.add_float(
            'gain',
            light.energy * lg_gain * (get_worldscale(as_scalematrix=False)**2))
        lux_context.attributeBegin(ob.name, file=Files.MAIN)
        lux_context.transform(matrix_to_list(matrix, apply_worldscale=True))
        lux_context.lightGroup(light_group, [])

        if light.luxrender_lamp.Exterior_volume != '':
            lux_context.exterior(light.luxrender_lamp.Exterior_volume)
        elif scene.luxrender_world.default_exterior_volume != '':
            lux_context.exterior(scene.luxrender_world.default_exterior_volume)

        if light.luxrender_lamp.luxrender_lamp_area.null_lamp:
            mat_params = ParamSet()

            if scene.luxrender_rendermode.renderer == 'slg':  # Workaround: SLGRenderer supports only area lights with constant ConstantRGBColorTexture
                mat_params.add_string('type', 'matte')
            else:
                mat_params.add_string('type', 'null')

            lux_context.makeNamedMaterial(ob.name, mat_params)

            lux_context.namedMaterial(ob.name)

        lux_context.areaLightSource('area', light_params)

        areax = light.size

        if light.shape == 'SQUARE':
            areay = areax
        elif light.shape == 'RECTANGLE':
            areay = light.size_y
        else:
            areay = areax  # not supported yet

        points = [
            -areax / 2.0, areay / 2.0, 0.0, areax / 2.0, areay / 2.0, 0.0,
            areax / 2.0, -areay / 2.0, 0.0, -areax / 2.0, -areay / 2.0, 0.0
        ]

        shape_params = ParamSet()

        if lux_context.API_TYPE == 'PURE':
            # ntris isn't really the number of tris!!
            shape_params.add_integer('ntris', 6)
            shape_params.add_integer('nvertices', 4)

        shape_params.add_integer('indices', [0, 1, 2, 0, 2, 3])
        shape_params.add_point('P', points)
        shape_params.add_string('name', light.name)

        lux_context.shape('trianglemesh', shape_params)

        for portal in portals:
            lux_context.portalInstance(portal)

        lux_context.attributeEnd()

        return True

    return False
示例#24
0
    def execute(self, master_scene):
        try:
            if master_scene is None:
                #indigo_log('Scene context is invalid')
                raise Exception('Scene context is invalid')

            #------------------------------------------------------------------------------
            # Init stats
            if self.verbose: indigo_log('Indigo export started ...')
            export_start_time = time.time()

            igs_filename = self.check_output_path(self.properties.directory)
            export_scenes = [master_scene.background_set, master_scene]

            if self.verbose: indigo_log('Export render settings')

            #------------------------------------------------------------------------------
            # Start with render settings, this also creates the root <scene>
            self.scene_xml = master_scene.indigo_engine.build_xml_element(
                master_scene)

            #------------------------------------------------------------------------------
            # Tonemapping
            self.export_tonemapping(master_scene)

            #------------------------------------------------------------------------------
            # Materials - always export the default clay material and a null material
            self.export_default_materials(master_scene)

            # Initialise values used for motion blur export.
            fps = master_scene.render.fps / master_scene.render.fps_base
            start_frame = master_scene.frame_current
            exposure = 1 / master_scene.camera.data.indigo_camera.exposure
            camera = (master_scene.camera, [])

            # Make a relative igs and mesh dir path like "TheAnimation/00002"
            rel_mesh_dir = efutil.scene_filename()
            rel_frame_dir = '%s/%05i' % (
                rel_mesh_dir, start_frame
            )  #bpy.path.clean_name(master_scene.name),
            mesh_dir = '/'.join([efutil.export_path, rel_mesh_dir])
            frame_dir = '/'.join([efutil.export_path, rel_frame_dir])

            # Initialise GeometryExporter.
            geometry_exporter = geometry.GeometryExporter()
            geometry_exporter.mesh_dir = mesh_dir
            geometry_exporter.rel_mesh_dir = rel_mesh_dir
            geometry_exporter.skip_existing_meshes = master_scene.indigo_engine.skip_existing_meshes
            geometry_exporter.verbose = self.verbose

            # Make frame_dir directory if it does not exist yet.
            if not os.path.exists(frame_dir):
                os.makedirs(frame_dir)

            if master_scene.indigo_engine.motionblur:
                # When motion blur is on, calculate the number of frames covered by the exposure time
                start_time = start_frame / fps
                end_time = start_time + exposure
                end_frame = math.ceil(end_time * fps)

                # end_frame + 1 because range is max excl
                frame_list = [x for x in range(start_frame, end_frame + 1)]
            else:
                frame_list = [start_frame]

            #indigo_log('frame_list: %s'%frame_list)

            #------------------------------------------------------------------------------
            # Process all objects in all frames in all scenes.
            for cur_frame in frame_list:
                # Calculate normalised time for keyframes.
                normalised_time = (cur_frame - start_frame) / fps / exposure
                if self.verbose:
                    indigo_log('Processing frame: %i time: %f' %
                               (cur_frame, normalised_time))

                geometry_exporter.normalised_time = normalised_time

                if master_scene.indigo_engine.motionblur:
                    bpy.context.scene.frame_set(
                        cur_frame, 0.0
                    )  # waaay too slow for many objects (probably dupli_list gets recreated). Obligatory for motion blur.
                else:
                    bpy.context.scene.frame_current = cur_frame  # is it enough?

                # Add Camera matrix.
                camera[1].append(
                    (normalised_time, camera[0].matrix_world.copy()))

                for ex_scene in export_scenes:
                    if ex_scene is None: continue

                    if self.verbose:
                        indigo_log('Processing objects for scene %s' %
                                   ex_scene.name)
                    geometry_exporter.iterateScene(ex_scene)

            # Export background light if no light exists.
            self.export_default_background_light(
                geometry_exporter.isLightingValid())

            #------------------------------------------------------------------------------
            # Export camera
            if self.verbose: indigo_log('Exporting camera')
            self.scene_xml.append(
                camera[0].data.indigo_camera.build_xml_element(
                    master_scene, camera[1]))
            #------------------------------------------------------------------------------
            # Export light layers
            from ..export.light_layer import light_layer_xml
            # TODO:
            # light_layer_count was supposed to export correct indices when there
            # is a background_set with emitters on light layers -
            # however, the re-indexing at material export time is non-trivial for
            # now and probably not worth it.
            #light_layer_count = 0
            xml_render_settings = self.scene_xml.find('renderer_settings')
            for ex_scene in export_scenes:
                if ex_scene is None: continue

                # Light layer names
                lls = ex_scene.indigo_lightlayers.enumerate()

                for layer_name, idx in sorted(lls.items(), key=lambda x: x[1]):
                    if self.verbose:
                        indigo_log('Light layer %i: %s' % (idx, layer_name))
                    xml_render_settings.append(
                        light_layer_xml().build_xml_element(
                            ex_scene, idx, layer_name))

            if self.verbose: indigo_log('Exporting lamps')

            # use special n==1 case due to bug in indigo <sum> material
            num_lamps = len(geometry_exporter.ExportedLamps)

            if num_lamps == 1:
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_mat = ET.Element(
                    'background_material')
                scene_background_settings.append(scene_background_settings_mat)

                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        scene_background_settings_mat.append(xml)

                self.scene_xml.append(scene_background_settings)

            if num_lamps > 1:

                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_fmt = {
                    'background_material': {
                        'material': {
                            'name': ['background_material'],
                            'sum': {
                                'mat': xml_multichild()
                            }
                        }
                    }
                }

                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        self.scene_xml.append(xml)

                    scene_background_settings_fmt['background_material'][
                        'material']['sum']['mat'].append({
                            'mat_name': [ck],
                            'weight': {
                                'constant': [1]
                            }
                        })
                scene_background_settings_obj = xml_builder()
                scene_background_settings_obj.build_subelements(
                    None, scene_background_settings_fmt,
                    scene_background_settings)
                self.scene_xml.append(scene_background_settings)

            #------------------------------------------------------------------------------
            # Export Medium
            from ..export.materials.medium import medium_xml
            # TODO:
            # check if medium is currently used by any material and add
            # basic medium for SpecularMaterial default

            for ex_scene in export_scenes:
                if ex_scene is None: continue

                indigo_material_medium = ex_scene.indigo_material_medium
                medium = indigo_material_medium.medium

                if len(indigo_material_medium.medium.items()) == 0: continue

                for medium_name, medium_data in medium.items():

                    medium_index = ex_scene.indigo_material_medium.medium.find(
                        medium_name)  # more precise if same name

                    indigo_log('Exporting medium: %s ' % (medium_name))
                    self.scene_xml.append(
                        medium_xml(ex_scene, medium_name, medium_index,
                                   medium_data).build_xml_element(
                                       ex_scene, medium_name, medium_data))
                indigo_log('Exporting Medium: %s ' % (medium_name))
                # TODO:
                # check for unused medium
            basic_medium = ET.fromstring("""
                                <medium>
                                   <uid>4294967294</uid>
		                             <name>basic</name>
			                           <precedence>10</precedence>
			                             <basic>
				                           <ior>1.5</ior>
				                           <cauchy_b_coeff>0</cauchy_b_coeff>
				                           <max_extinction_coeff>1</max_extinction_coeff>
				                           <absorption_coefficient>
					                         <constant>
						                      <uniform>
							                   <value>0</value>
						                      </uniform>
					                         </constant>
				                           </absorption_coefficient>
			                             </basic>
	                            </medium>   
                         """)

            self.scene_xml.append(basic_medium)

            #------------------------------------------------------------------------------
            # Export used materials.
            if self.verbose: indigo_log('Exporting used materials')
            material_count = 0
            for ck, ci in geometry_exporter.ExportedMaterials.items():
                for xml in ci:
                    self.scene_xml.append(xml)
                material_count += 1
            if self.verbose:
                indigo_log('Exported %i materials' % material_count)

            # Export used meshes.
            if self.verbose: indigo_log('Exporting meshes')
            mesh_count = 0
            for ck, ci in geometry_exporter.MeshesOnDisk.items():
                mesh_name, xml = ci
                self.scene_xml.append(xml)
                mesh_count += 1
            if self.verbose: indigo_log('Exported %i meshes' % mesh_count)

            #------------------------------------------------------------------------------
            # We write object instances to a separate file
            oc = 0
            scene_data_xml = ET.Element('scenedata')
            for ck, ci in geometry_exporter.ExportedObjects.items():
                obj_type = ci[0]

                if obj_type == 'OBJECT':
                    obj = ci[1]
                    mesh_name = ci[2]
                    obj_matrices = ci[3]
                    scene = ci[4]

                    xml = geometry.model_object(scene).build_xml_element(
                        obj, mesh_name, obj_matrices)
                else:
                    xml = ci[1]
                scene_data_xml.append(xml)
                oc += 1

            objects_file_name = '%s/objects.igs' % (frame_dir)
            objects_file = open(objects_file_name, 'wb')
            ET.ElementTree(element=scene_data_xml).write(objects_file,
                                                         encoding='utf-8')
            objects_file.close()
            # indigo_log('Exported %i object instances to %s' % (oc,objects_file_name))
            scene_data_include = include.xml_include(
                efutil.path_relative_to_export(objects_file_name))
            self.scene_xml.append(
                scene_data_include.build_xml_element(master_scene))

            #------------------------------------------------------------------------------
            # Write formatted XML for settings, materials and meshes
            out_file = open(igs_filename, 'w')
            xml_str = ET.tostring(self.scene_xml, encoding='utf-8').decode()

            # substitute back characters protected from entity encoding in CDATA nodes
            xml_str = xml_str.replace('{_LESSTHAN_}', '<')
            xml_str = xml_str.replace('{_GREATERTHAN_}', '>')

            xml_dom = MD.parseString(xml_str)
            xml_dom.writexml(out_file,
                             addindent='\t',
                             newl='\n',
                             encoding='utf-8')
            out_file.close()

            #------------------------------------------------------------------------------
            # Computing devices
            if len(master_scene.indigo_engine.render_devices):
                from ..core.util import getSettingsPath
                settings_file = getSettingsPath()

                outermark = \
                """<selected_opencl_devices>
                {}
                </selected_opencl_devices>"""

                devicemark = \
                """<device>
                        <device_name><![CDATA[{}]]></device_name>
                        <vendor_name><![CDATA[{}]]></vendor_name>
                        <id>{}</id>
                    </device>"""
                devices = ''
                for d in bpy.context.scene.indigo_engine.render_devices:
                    if d.use:
                        devices += devicemark.format(d.device, d.vendor, d.id)
                selected_devices_xml = outermark.format(devices)

                if os.path.exists(settings_file):
                    # settings file exists
                    with open(settings_file, 'r') as f:
                        xml_string = f.read()

                    import re
                    pattern = r'<settings>.*</settings>'
                    if re.search(pattern, xml_string,
                                 re.DOTALL | re.IGNORECASE):
                        # <settings> tag exists (file seems to be correct)
                        pattern = r'<selected_opencl_devices>.*</selected_opencl_devices>'
                        if re.search(pattern, xml_string,
                                     re.DOTALL | re.IGNORECASE):
                            # computing devices already exists
                            xml_string = re.sub(pattern,
                                                selected_devices_xml,
                                                xml_string,
                                                flags=re.DOTALL
                                                | re.IGNORECASE)
                        else:
                            # computing devices does not exists yet
                            xml_string = re.sub(
                                r'</settings>',
                                selected_devices_xml + "</settings>",
                                xml_string,
                                flags=re.DOTALL | re.IGNORECASE)
                    else:
                        # settings tag does not exist. create new body
                        xml_string =\
                """<?xml version="1.0" encoding="utf-8"?>
                <settings>
                    {}
                </settings>""".format(selected_devices_xml)

                else:
                    # create new file
                    xml_string =\
                """<?xml version="1.0" encoding="utf-8"?>
                <settings>
                    {}
                </settings>""".format(selected_devices_xml)

                with open(settings_file, 'w') as f:
                    f.write(xml_string)

            #------------------------------------------------------------------------------
            # Print stats
            export_end_time = time.time()
            if self.verbose:
                indigo_log('Total mesh export time: %f seconds' %
                           (geometry_exporter.total_mesh_export_time))
            indigo_log('Export finished; took %f seconds' %
                       (export_end_time - export_start_time))

            # Reset to start_frame.
            if len(frame_list) > 1:
                bpy.context.scene.frame_set(start_frame)

            return {'FINISHED'}

        except Exception as err:
            indigo_log('%s' % err, message_type='ERROR')
            if os.getenv('B25_OBJECT_ANALYSIS', False):
                raise err
            return {'CANCELLED'}
示例#25
0
 def build_xml_element(self, scene, matrix_list):
     xml = self.Element('camera')
     
     xml_format = {
         'aperture_radius': [aperture_radius(scene, self)],
         'sensor_width': [scene.camera.data.sensor_width / 1000.0],
         'lens_sensor_dist': [lens_sensor_dist(scene, self)],
         'aspect_ratio': [aspect_ratio(scene, self)],
         'exposure_duration': 'exposure',
     }
     
     if self.whitebalance == 'Custom':
         xml_format['white_point'] = {
             'chromaticity_coordinates': {
                 'x': [self.whitebalanceX],
                 'y': [self.whitebalanceY],
             }
         }
     else:
         xml_format['white_balance'] = 'whitebalance',
     
     ws = get_worldscale(scene)
     
     if(scene.camera.data.type == 'ORTHO'):
         xml_format['camera_type'] = ['orthographic']
         xml_format['sensor_width'] = [scene.camera.data.ortho_scale * ws] # Blender seems to use 'ortho_scale' for the sensor width.
     
     mat = matrix_list[0][1].transposed()
     
     xml_format['pos']        = [ i*ws for i in mat[3][0:3]]
     xml_format['forwards']    = [-i*ws for i in mat[2][0:3]]
     xml_format['up']        = [ i*ws for i in mat[1][0:3]]
     
     if len(matrix_list) > 1:
         # Remove pos, conflicts with keyframes.
         del(xml_format['pos'])
     
         keyframes = exportutil.matrixListToKeyframes(scene, scene.camera, matrix_list)
             
         xml_format['keyframe'] = tuple(keyframes)
     
     if self.autofocus:
         xml_format['autofocus'] = '' # is empty element
         xml_format['focus_distance'] = [10.0]  # any non-zero value will do
     else:
         if scene.camera.data.dof_object is not None:
             xml_format['focus_distance'] = [((scene.camera.location - scene.camera.data.dof_object.location).length*ws)]
         elif scene.camera.data.dof_distance > 0:
             xml_format['focus_distance'] = [scene.camera.data.dof_distance*ws]
         else: #autofocus
             xml_format['autofocus'] = '' # is empty element
             xml_format['focus_distance'] = [10.0]  # any non-zero value will do
     
     if self.ad:
         xml_format.update({
             'aperture_shape': {}
         })
         if self.ad_obstacle != '':
             ad_obstacle = efutil.filesystem_path(self.ad_obstacle)
             if os.path.exists(ad_obstacle):
                 xml_format.update({
                     'obstacle_map': {
                         'path': [efutil.path_relative_to_export(ad_obstacle)]
                     }
                 })
             else:
                 indigo_log('WARNING: Camera Obstacle Map specified, but image path is not valid')
         
         if self.ad_type == 'image':
             ad_image = efutil.filesystem_path(self.ad_image)
             if os.path.exists(ad_image):
                 xml_format['aperture_shape'].update({
                     'image': {
                         'path': [efutil.path_relative_to_export(ad_image)]
                     }
                 })
             else:
                 indigo_log('WARNING: Camera Aperture Diffraction type "Image" selected, but image path is not valid')
         
         elif self.ad_type == 'generated':
             xml_format['aperture_shape'].update({
                 'generated': {
                     'num_blades': [self.ad_blades],
                     'start_angle': [self.ad_angle],
                     'blade_offset': [self.ad_offset],
                     'blade_curvature_radius': [self.ad_curvature]
                 }
             })
         elif self.ad_type == 'circular':
             xml_format['aperture_shape'][self.ad_type] = {}
     
     aspect = aspect_ratio(scene, self)
     if scene.camera.data.shift_x != 0:
         sx = scene.camera.data.shift_x * 0.001*scene.camera.data.sensor_width
         if aspect < 1.0:
             sx /= aspect
         xml_format['lens_shift_right_distance'] = [sx]
         
     if scene.camera.data.shift_y != 0:
         sy = scene.camera.data.shift_y * 0.001*scene.camera.data.sensor_width
         if aspect < 1.0:
             sy /= aspect
         xml_format['lens_shift_up_distance'] = [sy]
     
     self.build_subelements(scene, xml_format, xml)
     
     return xml
示例#26
0
    def buildNativeMesh(self, obj):
        """
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a serialized mesh
		file for Mitsuba.
		"""

        try:
            mesh_definitions = []
            mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
            if mesh is None:
                raise UnexportableObjectException(
                    'Cannot create render/export mesh')

            # collate faces by mat index
            ffaces_mats = {}
            mesh_faces = mesh.tessfaces
            for f in mesh_faces:
                mi = f.material_index
                if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
                ffaces_mats[mi].append(f)
            material_indices = ffaces_mats.keys()

            if len(mesh.materials) > 0 and mesh.materials[0] != None:
                mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
            else:
                mats = [(0, None)]

            for i, mat in mats:
                try:
                    if i not in material_indices: continue

                    # If this mesh/mat-index combo has already been processed, get it from the cache
                    mesh_cache_key = (self.geometry_scene, obj.data, i)
                    if self.allow_instancing(obj) and self.ExportedMeshes.have(
                            mesh_cache_key):
                        mesh_definitions.append(
                            self.ExportedMeshes.get(mesh_cache_key))
                        continue

                    # Put Serialized files in frame-numbered subfolders to avoid
                    # clobbering when rendering animations
                    #sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
                    sc_fr = '%s/%s/%s/%05d' % (
                        self.mts_context.meshes_dir, efutil.scene_filename(),
                        bpy.path.clean_name(self.geometry_scene.name),
                        self.visibility_scene.frame_current)
                    if not os.path.exists(sc_fr):
                        os.makedirs(sc_fr)

                    def make_serfilename():
                        ser_serial = self.ExportedSERs.serial(mesh_cache_key)
                        mesh_name = '%s_%04d_m%03d' % (obj.data.name,
                                                       ser_serial, i)
                        ser_filename = '%s.serialized' % bpy.path.clean_name(
                            mesh_name)
                        ser_path = '/'.join([sc_fr, ser_filename])
                        return mesh_name, ser_path

                    mesh_name, ser_path = make_serfilename()

                    # Ensure that all Serialized files have unique names
                    while self.ExportedSERs.have(ser_path):
                        mesh_name, ser_path = make_serfilename()

                    self.ExportedSERs.add(ser_path, None)

                    # skip writing the Serialized file if the box is checked
                    skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
                    if not os.path.exists(ser_path) or not (
                            self.visibility_scene.mitsuba_engine.partial_export
                            and skip_exporting):

                        GeometryExporter.NewExportedObjects.add(obj)

                        uv_textures = mesh.tessface_uv_textures
                        if len(uv_textures) > 0:
                            if uv_textures.active and uv_textures.active.data:
                                uv_layer = uv_textures.active.data
                        else:
                            uv_layer = None

                        # Export data
                        points = array.array('d', [])
                        normals = array.array('d', [])
                        uvs = array.array('d', [])
                        ntris = 0
                        face_vert_indices = array.array(
                            'I', [])  # list of face vert indices

                        # Caches
                        vert_vno_indices = {
                        }  # mapping of vert index to exported vert index for verts with vert normals
                        vert_use_vno = set(
                        )  # Set of vert indices that use vert normals

                        vert_index = 0  # exported vert index
                        for face in ffaces_mats[i]:
                            fvi = []
                            for j, vertex in enumerate(face.vertices):
                                v = mesh.vertices[vertex]

                                if uv_layer:
                                    # Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
                                    uv_coord = (uv_layer[face.index].uv[j][0],
                                                1.0 -
                                                uv_layer[face.index].uv[j][1])

                                if face.use_smooth:

                                    if uv_layer:
                                        vert_data = (v.co[:], v.normal[:],
                                                     uv_coord)
                                    else:
                                        vert_data = (v.co[:], v.normal[:],
                                                     tuple())

                                    if vert_data not in vert_use_vno:
                                        vert_use_vno.add(vert_data)

                                        points.extend(vert_data[0])
                                        normals.extend(vert_data[1])
                                        uvs.extend(vert_data[2])

                                        vert_vno_indices[
                                            vert_data] = vert_index
                                        fvi.append(vert_index)

                                        vert_index += 1
                                    else:
                                        fvi.append(vert_vno_indices[vert_data])

                                else:
                                    # all face-vert-co-no are unique, we cannot
                                    # cache them
                                    points.extend(v.co[:])
                                    normals.extend(face.normal[:])
                                    if uv_layer: uvs.extend(uv_coord)

                                    fvi.append(vert_index)

                                    vert_index += 1

                            # For Mitsuba, we need to triangulate quad faces
                            face_vert_indices.extend(fvi[0:3])
                            ntris += 3
                            if len(fvi) == 4:
                                face_vert_indices.extend(
                                    (fvi[0], fvi[2], fvi[3]))
                                ntris += 3

                        del vert_vno_indices
                        del vert_use_vno

                        with open(ser_path, 'wb') as ser:
                            # create mesh flags
                            flags = 0
                            # turn on double precision
                            flags = flags | 0x2000
                            # turn on vertex normals
                            flags = flags | 0x0001
                            # turn on uv layer
                            if uv_layer:
                                flags = flags | 0x0002

                            # begin serialized mesh data
                            ser.write(struct.pack('<HH', 0x041C, 0x0004))

                            # encode serialized mesh
                            encoder = zlib.compressobj()
                            ser.write(
                                encoder.compress(struct.pack('<I', flags)))
                            ser.write(
                                encoder.compress(
                                    bytes(mesh_name + "_serialized\0",
                                          'latin-1')))
                            ser.write(
                                encoder.compress(
                                    struct.pack('<QQ', vert_index,
                                                int(ntris / 3))))
                            ser.write(encoder.compress(points.tostring()))
                            ser.write(encoder.compress(normals.tostring()))
                            if uv_layer:
                                ser.write(encoder.compress(uvs.tostring()))
                            ser.write(
                                encoder.compress(face_vert_indices.tostring()))
                            ser.write(encoder.flush())

                            ser.write(struct.pack('<Q', 0))
                            ser.write(struct.pack('<I', 1))
                            ser.close()

                        MtsLog('Binary Serialized file written: %s' %
                               (ser_path))
                    else:
                        MtsLog(
                            'Skipping already exported Serialized mesh: %s' %
                            mesh_name)

                    shape_params = ParamSet().add_string(
                        'filename', efutil.path_relative_to_export(ser_path))
                    if obj.data.mitsuba_mesh.normals == 'facenormals':
                        shape_params.add_boolean('faceNormals',
                                                 {'value': 'true'})

                    mesh_definition = (mesh_name, i, 'serialized',
                                       shape_params)
                    # Only export Shapegroup and cache this mesh_definition if we plan to use instancing
                    if self.allow_instancing(
                            obj) and self.exportShapeDefinition(
                                obj, mesh_definition):
                        shape_params = ParamSet().add_reference(
                            'id', '', mesh_name + '-shapegroup_%i' % (i))

                        mesh_definition = (mesh_name, i, 'instance',
                                           shape_params)
                        self.ExportedMeshes.add(mesh_cache_key,
                                                mesh_definition)

                    mesh_definitions.append(mesh_definition)

                except InvalidGeometryException as err:
                    MtsLog('Mesh export failed, skipping this mesh: %s' % err)

            del ffaces_mats
            bpy.data.meshes.remove(mesh)

        except UnexportableObjectException as err:
            MtsLog('Object export failed, skipping this object: %s' % err)

        return mesh_definitions
示例#27
0
 def get_channel(self, property_group, channel_name, channel_prop_name):
     d = {}
     
     channel_type = getattr(property_group, channel_prop_name + '_type')
     
     if channel_type == 'spectrum':
         spectrum_type = getattr(property_group, channel_prop_name + '_SP_type')
         if spectrum_type == 'rgb':
             d[channel_name] = {
                 'constant': rgb([i for i in getattr(property_group, channel_prop_name + '_SP_rgb') * getattr(property_group, channel_prop_name + '_SP_rgb_gain', 1.0)])
             }
         elif spectrum_type == 'uniform':
             d[channel_name] = {
                 'constant': uniform([
                     getattr(property_group, channel_prop_name + '_SP_uniform_val') * \
                     10**getattr(property_group, channel_prop_name + '_SP_uniform_exp')
                 ])
             }
         elif spectrum_type == 'blackbody':
             d[channel_name] = {
                 'constant': blackbody(
                     [getattr(property_group, channel_prop_name + '_SP_blackbody_temp')],
                     [getattr(property_group, channel_prop_name + '_SP_blackbody_gain')]
                 )
             }
     
     elif channel_type == 'texture':
         tex_name = getattr(property_group, channel_prop_name + '_TX_texture')
         
         if tex_name: # string is not empty
             if channel_prop_name not in self.found_texture_indices:
                 self.found_texture_indices.append(channel_prop_name)
                 
                 if not tex_name in bpy.data.textures:
                     raise Exception("Texture \"%s\" assigned to material \"%s\" doesn't exist!" %(tex_name, self.material_name))
                 
                 tex_property_group = bpy.data.textures[tex_name].indigo_texture
                 
                 if tex_property_group.image_ref == 'file':
                     relative_texture_path = efutil.path_relative_to_export(
                         getattr(tex_property_group, 'path')
                     )
                 elif tex_property_group.image_ref == 'blender':
                     if not tex_property_group.image in bpy.data.images:
                         raise Exception("Error with image reference on texture \"%s\"" % tex_name)
                     
                     img = bpy.data.images[tex_property_group.image]
                     
                     if img.filepath == '':
                         bl_img_path = 'blendigo_extracted_image_%s.png' % bpy.path.clean_name(tex_name)
                     else:
                         bl_img_path = img.filepath
                     
                     if img.source != 'FILE' or img.packed_file:
                         bl_file_formatted = os.path.splitext(os.path.basename(bl_img_path))[0]
                         bl_file_formatted = '%s.%s' % (bl_file_formatted, self.scene.render.image_settings.file_format)
                         bl_img_path = os.path.join(
                             efutil.export_path,
                             efutil.scene_filename(),
                             bpy.path.clean_name(self.scene.name),
                             '%05d' % self.scene.frame_current,
                             bl_file_formatted
                         )
                         img.save_render(bl_img_path, self.scene)
                     
                     relative_texture_path = efutil.path_relative_to_export(bl_img_path)
                 
                 if not getattr(property_group, channel_prop_name + '_TX_abc_from_tex'):
                     abc_property_group = property_group
                     abc_prefix = channel_prop_name + '_TX_'
                 else:
                     abc_property_group = tex_property_group
                     abc_prefix = ''
                 
                 uv_set_name  = getattr(property_group, channel_prop_name + '_TX_uvset')
                 try:
                     uv_set_index = self.obj.data.uv_textures.keys().index(uv_set_name)
                 except:
                     uv_set_index = 0
                 
                 self.found_textures.append({
                     'uv_set_index':    [uv_set_index], #getattr(property_group, channel_prop_name + '_TX_uv_index')],
                     'path':            [relative_texture_path],
                     'exponent':        [getattr(tex_property_group, 'gamma')],
                     'a':            [getattr(abc_property_group, abc_prefix + 'A')],
                     'b':            [getattr(abc_property_group, abc_prefix + 'B')],
                     'c':            [getattr(abc_property_group, abc_prefix + 'C')],
                     'smooth':        [str(getattr(property_group, channel_prop_name + '_TX_smooth')).lower()]
                 })
             
             d[channel_name] = {
                 'texture': {
                     'texture_index': [ self.found_texture_indices.index(channel_prop_name) ],
                 }
             }
     
     elif channel_type == 'shader':
         try:
             shader_name = getattr(property_group, channel_prop_name + '_SH_text')
             if not shader_name in bpy.data.texts:
                 raise Exception('Referenced Text "%s" for shader on material "%s" not found' % (shader_name, self.material_name))
             
             shader_text = '\n' + bpy.data.texts[shader_name].as_string()
             d[channel_name] = {
                 'shader': {
                     'shader': xml_cdata(shader_text)
                 }
             }
         except:
             pass
     
     return d
示例#28
0
	def handler_Duplis_PATH(self, obj, *args, **kwargs):
		if not 'particle_system' in kwargs.keys():
			LuxLog('ERROR: handler_Duplis_PATH called without particle_system')
			return
		
		psys = kwargs['particle_system']
		
		if not psys.settings.type == 'HAIR':
			LuxLog('ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")' % psys.name)
			return
	
		if bpy.context.scene.luxrender_engine.export_hair == False:
			return
			
		for mod in obj.modifiers:
			if mod.type == 'PARTICLE_SYSTEM':
				if mod.particle_system.name == psys.name:
					break;

		if not (mod.type == 'PARTICLE_SYSTEM'):
			return
		elif not mod.particle_system.name == psys.name or mod.show_render == False:
			return
				
		LuxLog('Exporting Hair system "%s"...' % psys.name)

		size = psys.settings.luxrender_hair.hair_size / 2.0
		psys.set_resolution(self.geometry_scene, obj, 'RENDER')
		steps = 2**psys.settings.render_step
		num_parents = len(psys.particles)
		num_children = len(psys.child_particles)
		if num_children == 0:
			start = 0
		else:
			# Number of virtual parents reduces the number of exported children
			num_virtual_parents = math.trunc(0.3 * psys.settings.virtual_parents * psys.settings.child_nbr * num_parents)
			start = num_parents + num_virtual_parents
		
		partsys_name = '%s_%s'%(obj.name, psys.name)
		det = DupliExportProgressThread()
		det.start(num_parents + num_children)

		if psys.settings.luxrender_hair.use_binary_output:
			# Put HAIR_FILES files in frame-numbered subfolders to avoid
			# clobbering when rendering animations
			sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
			if not os.path.exists( sc_fr ):
				os.makedirs(sc_fr)
					
			hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
			hair_file_path = '/'.join([sc_fr, hair_filename])

			segments = []
			points = []
			thickness = []
			colors = []
			uv_coords = []
			total_segments_count = 0
			vertex_color_layer = None
			uv_tex = None
			colorflag = 0
			uvflag = 0                      
			image_width = 0
			image_height = 0
			image_pixels = []
			
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			uv_textures = mesh.tessface_uv_textures
			vertex_color =  mesh.tessface_vertex_colors

			if psys.settings.luxrender_hair.export_color == 'vertex_color':
				if vertex_color.active and vertex_color.active.data:
					vertex_color_layer = vertex_color.active.data
					colorflag = 1

			if uv_textures.active and uv_textures.active.data:
				uv_tex = uv_textures.active.data
				if psys.settings.luxrender_hair.export_color == 'uv_texture_map':
					if uv_tex[0].image:
						image_width = uv_tex[0].image.size[0]
						image_height = uv_tex[0].image.size[1]
						image_pixels = uv_tex[0].image.pixels[:]
						colorflag = 1
				uvflag = 1

			info = 'Created by LuxBlend 2.6 exporter for LuxRender - www.luxrender.net'

			transform = obj.matrix_world.inverted()
			total_strand_count = 0	
				
			for pindex in range(start, num_parents + num_children):                        
				det.exported_objects += 1                               
				point_count = 0
				i = 0

				if num_children == 0:
					i = pindex
		
				# A small optimization in order to speedup the export
				# process: cache the uv_co and color value
				uv_co = None
				col = None
				seg_length = 1.0				
				for step in range(0, steps):
					co = psys.co_hair(obj, mod, pindex, step)                               
					if (step > 0): seg_length = (co-obj.matrix_world*points[len(points)-1]).length_squared 
					if not (co.length_squared == 0 or seg_length == 0):
						points.append(transform*co)
						point_count = point_count + 1

						if uvflag:
							if not uv_co:
								uv_co = psys.uv_on_emitter(mod, psys.particles[i], pindex, uv_textures.active_index)
							uv_coords.append(uv_co)

						if psys.settings.luxrender_hair.export_color == 'uv_texture_map' and not len(image_pixels) == 0:
							if not col:
								x_co = round(uv_co[0] * (image_width - 1))
								y_co = round(uv_co[1] * (image_height - 1))
							
								pixelnumber = (image_width * y_co) + x_co
							
								r = image_pixels[pixelnumber*4]
								g = image_pixels[pixelnumber*4+1]
								b = image_pixels[pixelnumber*4+2]
								col = (r,g,b)
							colors.append(col)
						elif psys.settings.luxrender_hair.export_color == 'vertex_color':
							if not col:
								col = psys.mcol_on_emitter(mod, psys.particles[i], pindex, vertex_color.active_index)
							colors.append(col)

				if point_count == 1:
					points.pop()
					point_count = point_count - 1
				elif point_count > 1:
					segments.append(point_count - 1)
					total_strand_count = total_strand_count + 1
					total_segments_count = total_segments_count + point_count - 1
			hair_file_path = efutil.path_relative_to_export(hair_file_path)
			with open(hair_file_path, 'wb') as hair_file:
				## Binary hair file format from
				## http://www.cemyuksel.com/research/hairmodels/
				##
				##File header
				hair_file.write(b'HAIR')        #magic number
				hair_file.write(struct.pack('<I', total_strand_count)) #total strand count
				hair_file.write(struct.pack('<I', len(points))) #total point count 
				hair_file.write(struct.pack('<I', 1+2+16*colorflag+32*uvflag)) #bit array for configuration
				hair_file.write(struct.pack('<I', steps))       #default segments count
				hair_file.write(struct.pack('<f', size*2))      #default thickness
				hair_file.write(struct.pack('<f', 0.0))         #default transparency
				color = (0.65, 0.65, 0.65)
				hair_file.write(struct.pack('<3f', *color))     #default color
				hair_file.write(struct.pack('<88s', info.encode())) #information
				
				##hair data
				hair_file.write(struct.pack('<%dH'%(len(segments)), *segments))
				for point in points:
					hair_file.write(struct.pack('<3f', *point))
				if colorflag:
					for col in colors:
						hair_file.write(struct.pack('<3f', *col))
				if uvflag:
					for uv in uv_coords:
						hair_file.write(struct.pack('<2f', *uv))
					
			LuxLog('Binary hair file written: %s' % (hair_file_path))
			
			hair_mat = obj.material_slots[psys.settings.material - 1].material

			#Shape parameters			
			hair_shape_params = ParamSet()
			
			hair_shape_params.add_string('filename', hair_file_path)
			hair_shape_params.add_string('name', bpy.path.clean_name(partsys_name))
			hair_shape_params.add_point('camerapos', bpy.context.scene.camera.location)
			hair_shape_params.add_string('tesseltype', psys.settings.luxrender_hair.tesseltype)
			hair_shape_params.add_string('acceltype', psys.settings.luxrender_hair.acceltype)
		
			if psys.settings.luxrender_hair.tesseltype in ['ribbonadaptive', 'solidadaptive']:
				hair_shape_params.add_integer('adaptive_maxdepth', psys.settings.luxrender_hair.adaptive_maxdepth)
				hair_shape_params.add_float('adaptive_error', psys.settings.luxrender_hair.adaptive_error)
	
			if psys.settings.luxrender_hair.tesseltype in ['solid', 'solidadaptive']:
				hair_shape_params.add_integer('solid_sidecount', psys.settings.luxrender_hair.solid_sidecount)
				hair_shape_params.add_bool('solid_capbottom', psys.settings.luxrender_hair.solid_capbottom)
				hair_shape_params.add_bool('solid_captop', psys.settings.luxrender_hair.solid_captop)
			
 			# Export shape definition to .LXO file			
			self.lux_context.attributeBegin('hairfile_%s'%partsys_name)
			self.lux_context.transform( matrix_to_list(obj.matrix_world, apply_worldscale=True) )
			self.lux_context.namedMaterial(hair_mat.name)
			self.lux_context.shape('hairfile', hair_shape_params)
			self.lux_context.attributeEnd()
			self.lux_context.set_output_file(Files.MATS)
			mat_export_result = hair_mat.luxrender_material.export(self.visibility_scene, self.lux_context, hair_mat, mode='indirect')
			self.lux_context.set_output_file(Files.GEOM)
	
		else:
			#Old export with cylinder and sphere primitives
			# This should force the strand/junction objects to be instanced
			self.objects_used_as_duplis.add(obj)
			hair_Junction = (
				(
					'HAIR_Junction_%s'%partsys_name,
					psys.settings.material - 1,
					'sphere',
					ParamSet().add_float('radius', size)
				),
			)
			hair_Strand = (
				(
					'HAIR_Strand_%s'%partsys_name,
					psys.settings.material - 1,
					'cylinder',
					ParamSet() \
						.add_float('radius', size) \
						.add_float('zmin', 0.0) \
						.add_float('zmax', 1.0)
				),
			)
		
			for sn, si, st, sp in hair_Junction:
				self.lux_context.objectBegin(sn)
				self.lux_context.shape(st, sp)
				self.lux_context.objectEnd()
		
			for sn, si, st, sp in hair_Strand:
				self.lux_context.objectBegin(sn)
				self.lux_context.shape(st, sp)
				self.lux_context.objectEnd()
				
			for pindex in range(num_parents + num_children):
				det.exported_objects += 1
				points = []

				for step in range(0,steps):
					co = psys.co_hair(obj, mod, pindex, step)
					if not co.length_squared == 0:
						points.append(co)
						
				if psys.settings.use_hair_bspline:
					temp = []
					degree = 2
					dimension = 3
					for i in range(math.trunc(math.pow(2,psys.settings.render_step))):
						if i > 0:
							u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)-0.0000000000001
						else:
							u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)
						temp.append(self.BSpline(points, dimension, degree, u))
					points = temp
			
				for j in range(len(points)-1):
					# transpose SB so we can extract columns
					# TODO - change when matrix.col is available
					SB = obj.matrix_basis.transposed().to_3x3()
					SB = fix_matrix_order(SB) # matrix indexing hack
					v1 = points[j+1] - points[j]
					v2 = SB[2].cross(v1)
					v3 = v1.cross(v2)
					v2.normalize()
					v3.normalize()
					if any(v.length_squared == 0 for v in (v1, v2, v3)):
						#Use standard basis and scale according to segment length
						M = SB
						v = v1+v2+v3
						scale = v.length
						v.normalize()						
						M = mathutils.Matrix.Scale(abs(scale),3,v)*M						
					else:
						# v1, v2, v3 are the new columns
						# set as rows, transpose later
						M = mathutils.Matrix( (v3,v2,v1) )
						M = fix_matrix_order(M) # matrix indexing hack
					M = M.transposed().to_4x4()
					
					Mtrans = mathutils.Matrix.Translation(points[j])
					matrix = Mtrans * M
				
					self.exportShapeInstances(
						obj,
						hair_Strand,
						matrix=[matrix,None]
					)
					
					self.exportShapeInstances(
						obj,
						hair_Junction,
						matrix=[Mtrans,None]
					)

				matrix = mathutils.Matrix.Translation(points[len(points)-1])
				self.exportShapeInstances(
					obj,
					hair_Junction,
					matrix=[matrix,None]
				)
		
		psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
		det.stop()
		det.join()
		
		LuxLog('... done, exported %s hairs' % det.exported_objects)
示例#29
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a mesh_name and
		ParamSet for each part which will become a LuxRender PLYShape statement
		wrapped within objectBegin..objectEnd or placed in an
		attributeBegin..attributeEnd scope, depending if instancing is allowed.
		The actual geometry will be dumped to a binary ply file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			for f in mesh.faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			number_of_mats = len(mesh.materials)
			if number_of_mats > 0:
				iterator_range = range(number_of_mats)
			else:
				iterator_range = [0]
			
			for i in iterator_range:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.luxrender_engine.partial_ply and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = get_uv_textures(mesh)
						if len(uv_textures) > 0:
							if mesh.uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						co_no_uv_cache = []
						face_vert_indices = {}		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], v.normal[:])
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add( vert_data )
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							face_vert_indices[face.index] = fvi
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by LuxBlend 2.5 exporter for LuxRender - www.luxrender.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % len(ffaces_mats[i])).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in ffaces_mats[i]:
								lfvi = len(face_vert_indices[face.index])
								ply.write( struct.pack('<B', lfvi) )
								ply.write( struct.pack('<%dI'%lfvi, *face_vert_indices[face.index]) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						LuxLog('Binary PLY file written: %s' % (ply_path))
					else:
						LuxLog('Skipping already exported PLY: %s' % mesh_name)
					
					# Export the shape definition to LXO
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					
					# Add subdiv etc options
					shape_params.update( obj.data.luxrender_mesh.get_paramset() )
					
					mesh_definition = (
						mesh_name,
						i,
						'plymesh',
						shape_params
					)
					mesh_definitions.append( mesh_definition )
					
					# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj):
						self.exportShapeDefinition(obj, mesh_definition)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
				
				except InvalidGeometryException as err:
					LuxLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			LuxLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
示例#30
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and create a binary PLY file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			if len(mesh.materials) > 0 and mesh.materials[0] != None:
				mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
			else:
				mats = [(0, None)]
			
			for i, mat in mats:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					#sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.mitsuba_engine.partial_export and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures
						if len(uv_textures) > 0:
							if uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						ntris = 0
						co_no_uv_cache = []
						face_vert_indices = []		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if uv_layer:
									# Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
									uv_coord = (uv_layer[face.index].uv[j][0], 1.0 - uv_layer[face.index].uv[j][1])
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_coord )
									else:
										vert_data = (v.co[:], v.normal[:] )
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add(vert_data)
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							# For Mitsuba, we need to triangulate quad faces
							face_vert_indices.append( fvi[0:3] )
							ntris += 3
							if len(fvi) == 4:
								face_vert_indices.append(( fvi[0], fvi[2], fvi[3] ))
								ntris += 3
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by MtsBlend 2.5 exporter for Mitsuba - www.mitsuba.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % int(ntris / 3)).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in face_vert_indices:
								ply.write( struct.pack('<B', 3) )
								ply.write( struct.pack('<3I', *face) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						MtsLog('Binary PLY file written: %s' % (ply_path))
					else:
						MtsLog('Skipping already exported PLY: %s' % mesh_name)
					
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					if obj.data.mitsuba_mesh.normals == 'facenormals':
						shape_params.add_boolean('faceNormals', {'value' : 'true'})
					
					mesh_definition = (
						mesh_name,
						i,
						'ply',
						shape_params
					)
					# Only export Shapegroup and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj) and self.exportShapeDefinition(obj, mesh_definition):
						shape_params = ParamSet().add_reference(
							'id',
							'',
							mesh_name + '-shapegroup_%i' % (i)
						)
						
						mesh_definition = (
							mesh_name,
							i,
							'instance',
							shape_params
						)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
					
					mesh_definitions.append( mesh_definition )
					
				except InvalidGeometryException as err:
					MtsLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			MtsLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
示例#31
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a mesh_name and
		ParamSet for each part which will become a LuxRender PLYShape statement
		wrapped within objectBegin..objectEnd or placed in an
		attributeBegin..attributeEnd scope, depending if instancing is allowed.
		The actual geometry will be dumped to a binary ply file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces if bpy.app.version > (2, 62, 1 ) else mesh.faces # bmesh
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			number_of_mats = len(mesh.materials)
			if number_of_mats > 0:
				iterator_range = range(number_of_mats)
			else:
				iterator_range = [0]
			
			for i in iterator_range:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.luxrender_engine.partial_ply and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures if bpy.app.version > (2, 62, 0 ) else mesh.uv_textures # bmesh
						if len(uv_textures) > 0:
							if mesh.uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						co_no_uv_cache = []
						face_vert_indices = {}		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], v.normal[:])
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add( vert_data )
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							face_vert_indices[face.index] = fvi
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by LuxBlend 2.6 exporter for LuxRender - www.luxrender.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % len(ffaces_mats[i])).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in ffaces_mats[i]:
								lfvi = len(face_vert_indices[face.index])
								ply.write( struct.pack('<B', lfvi) )
								ply.write( struct.pack('<%dI'%lfvi, *face_vert_indices[face.index]) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						LuxLog('Binary PLY file written: %s' % (ply_path))
					else:
						LuxLog('Skipping already exported PLY: %s' % mesh_name)
					
					# Export the shape definition to LXO
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					
					# Add subdiv etc options
					shape_params.update( obj.data.luxrender_mesh.get_paramset() )
					
					mesh_definition = (
						mesh_name,
						i,
						'plymesh',
						shape_params
					)
					mesh_definitions.append( mesh_definition )
					
					# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj):
						self.exportShapeDefinition(obj, mesh_definition)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
				
				except InvalidGeometryException as err:
					LuxLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			LuxLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
示例#32
0
	def buildNativeMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a serialized mesh
		file for Mitsuba.
		"""
		
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			if len(mesh.materials) > 0 and mesh.materials[0] != None:
				mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
			else:
				mats = [(0, None)]
			
			for i, mat in mats:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat-index combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put Serialized files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					#sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_serfilename():
						ser_serial = self.ExportedSERs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ser_serial, i)
						ser_filename = '%s.serialized' % bpy.path.clean_name(mesh_name)
						ser_path = '/'.join([sc_fr, ser_filename])
						return mesh_name, ser_path
					
					mesh_name, ser_path = make_serfilename()
					
					# Ensure that all Serialized files have unique names
					while self.ExportedSERs.have(ser_path):
						mesh_name, ser_path = make_serfilename()
					
					self.ExportedSERs.add(ser_path, None)
					
					# skip writing the Serialized file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ser_path) or not (self.visibility_scene.mitsuba_engine.partial_export and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures
						if len(uv_textures) > 0:
							if uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Export data
						points = array.array('d',[])
						normals = array.array('d',[])
						uvs = array.array('d',[])
						ntris = 0
						face_vert_indices = array.array('I',[])		# list of face vert indices
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if uv_layer:
									# Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
									uv_coord = (uv_layer[face.index].uv[j][0], 1.0 - uv_layer[face.index].uv[j][1])
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_coord )
									else:
										vert_data = (v.co[:], v.normal[:], tuple() )
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add(vert_data)
										
										points.extend( vert_data[0] )
										normals.extend( vert_data[1] )
										uvs.extend( vert_data[2] )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									# all face-vert-co-no are unique, we cannot
									# cache them
									points.extend( v.co[:] )
									normals.extend( face.normal[:] )
									if uv_layer: uvs.extend( uv_coord )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							# For Mitsuba, we need to triangulate quad faces
							face_vert_indices.extend( fvi[0:3] )
							ntris += 3
							if len(fvi) == 4:
								face_vert_indices.extend(( fvi[0], fvi[2], fvi[3] ))
								ntris += 3
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ser_path, 'wb') as ser:
							# create mesh flags
							flags = 0
							# turn on double precision
							flags = flags | 0x2000
							# turn on vertex normals
							flags = flags | 0x0001
							# turn on uv layer
							if uv_layer:
								flags = flags | 0x0002
							
							# begin serialized mesh data
							ser.write(struct.pack('<HH', 0x041C, 0x0004))
							
							# encode serialized mesh
							encoder = zlib.compressobj()
							ser.write(encoder.compress(struct.pack('<I', flags)))
							ser.write(encoder.compress(bytes(mesh_name + "_serialized\0",'latin-1')))
							ser.write(encoder.compress(struct.pack('<QQ', vert_index, int(ntris/3))))
							ser.write(encoder.compress(points.tostring()))
							ser.write(encoder.compress(normals.tostring()))
							if uv_layer:
								ser.write(encoder.compress(uvs.tostring()))
							ser.write(encoder.compress(face_vert_indices.tostring()))
							ser.write(encoder.flush())
							
							ser.write(struct.pack('<Q', 0))
							ser.write(struct.pack('<I', 1))
							ser.close()
						
						MtsLog('Binary Serialized file written: %s' % (ser_path))
					else:
						MtsLog('Skipping already exported Serialized mesh: %s' % mesh_name)
					
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ser_path)
					)
					if obj.data.mitsuba_mesh.normals == 'facenormals':
						shape_params.add_boolean('faceNormals', {'value' : 'true'})
					
					mesh_definition = (
						mesh_name,
						i,
						'serialized',
						shape_params
					)
					# Only export Shapegroup and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj) and self.exportShapeDefinition(obj, mesh_definition):
						shape_params = ParamSet().add_reference(
							'id',
							'',
							mesh_name + '-shapegroup_%i' % (i)
						)
						
						mesh_definition = (
							mesh_name,
							i,
							'instance',
							shape_params
						)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
					
					mesh_definitions.append( mesh_definition )
					
				except InvalidGeometryException as err:
					MtsLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			MtsLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
示例#33
0
def exportLight(scene, lux_context, ob, matrix, portals = []):
	light = ob.data
	
	lg_gain = 1.0
	light_group = light.luxrender_lamp.lightgroup
	
	if not checkLightEnabled(scene, light):
		return False
	
	if light_group in scene.luxrender_lightgroups.lightgroups:
		lg_gain = scene.luxrender_lightgroups.lightgroups[light_group].gain
	
	if scene.luxrender_lightgroups.ignore or light.luxrender_lamp.lightgroup == "" or scene.luxrender_integrator.surfaceintegrator == 'exphotonmap': #Light groups don't work with exphotonmap
		light_group = 'default'
	
	# Params common to all light types
	light_params = ParamSet() \
		.add_float('gain', light.energy*lg_gain) \
		.add_float('importance', light.luxrender_lamp.importance)
	
	ies_data = ParamSet()
	if light.luxrender_lamp.iesname != '':
		if light.library is not None:
			iespath = bpy.path.abspath(light.luxrender_lamp.iesname, light.library.filepath)
		else:
			iespath = light.luxrender_lamp.iesname
		ies_data = ParamSet().add_string('iesname', efutil.path_relative_to_export(iespath))
	
	# Params from light sub-types
	light_params.update( getattr(light.luxrender_lamp, 'luxrender_lamp_%s'%light.type.lower() ).get_paramset(ob) )
	
	# Other lamp params from lamp object
	if light.type == 'SUN':
		invmatrix = matrix.inverted()
		invmatrix = fix_matrix_order(invmatrix) # matrix indexing hack
		if light.luxrender_lamp.luxrender_lamp_sun.sunsky_type != 'sky': light_params.add_vector('sundir', (invmatrix[2][0], invmatrix[2][1], invmatrix[2][2]))
		attr_light(scene, lux_context, light, ob.name, light_group, light.luxrender_lamp.luxrender_lamp_sun.sunsky_type, light_params, portals=portals)
		return True
	
	if light.type == 'HEMI':
		hemi_type = light.luxrender_lamp.luxrender_lamp_hemi.type
		if hemi_type == 'distant':
			light_params.add_point('from', (0,0,0))
			light_params.add_point('to', (0,0,-1))
		
		attr_light(scene, lux_context, light, ob.name, light_group, hemi_type, light_params, transform=matrix_to_list(matrix, apply_worldscale=True), portals=portals)
		return True
	
	if light.type == 'SPOT':
		light_params.update( ies_data )
		coneangle = degrees(light.spot_size) * 0.5
		conedeltaangle = degrees(light.spot_size * 0.5 * light.spot_blend)
		
		if light.luxrender_lamp.luxrender_lamp_spot.projector:
			light_type = 'projection'
			light_params.add_float('fov', coneangle*2)
		else:
			light_type = 'spot'
			light_params.add_point('from', (0,0,0))
			light_params.add_point('to', (0,0,-1))
			light_params.add_float('coneangle', coneangle)
			light_params.add_float('conedeltaangle', conedeltaangle)
		
		attr_light(scene, lux_context, light, ob.name, light_group, light_type, light_params, transform=matrix_to_list(matrix, apply_worldscale=True), portals=portals)
		return True

	if light.type == 'POINT':
		light_params.update( ies_data )
		#Here the use sphere option kicks in. If true, export an spherical area light (using Lux's geometric sphere primitive) rather than a true point light
		if light.luxrender_lamp.luxrender_lamp_point.usesphere == True and scene.luxrender_rendermode.renderer != 'hybrid': #no sphere primitives with hybrid!
			light_params.add_float('gain', light.energy * lg_gain * (get_worldscale(as_scalematrix=False)**2))
			light_params.add_integer('nsamples', [light.luxrender_lamp.luxrender_lamp_point.nsamples]) #Add this in manually, it is not used for the true point and thus is not in the normal parameter set
			lux_context.attributeBegin(ob.name, file=Files.MAIN)
			lux_context.transform(matrix_to_list(matrix, apply_worldscale=True))
			lux_context.lightGroup(light_group, [])
			
			if light.luxrender_lamp.Exterior_volume != '':
				lux_context.exterior(light.luxrender_lamp.Exterior_volume)
			elif scene.luxrender_world.default_exterior_volume != '':
				lux_context.exterior(scene.luxrender_world.default_exterior_volume)
				
			if light.luxrender_lamp.luxrender_lamp_point.null_lamp:		
				mat_params = ParamSet()
	
				mat_params.add_string('type', 'null')
					
				lux_context.makeNamedMaterial(ob.name, mat_params)
				
				lux_context.namedMaterial(ob.name)
			
			lux_context.areaLightSource('area', light_params)

			# always remove blender object_scale to avoid corona-effect
			x,y,z = bpy.data.objects[light.name].scale.copy() # get scale from blender
			lux_context.scale( 1/x, 1/y, 1/z)
		
			shape_params = ParamSet()
		
			shape_params.add_float('radius', [light.luxrender_lamp.luxrender_lamp_point.pointsize]) #Fetch point light size and use it for the sphere primitive's radius param
			shape_params.add_string('name', light.name)
			lux_context.shape('sphere', shape_params)
					
			for portal in portals:
				lux_context.portalInstance(portal)
		
			lux_context.attributeEnd()
		
		else: #export an actual point light
			light_params.add_point('from', (0,0,0)) # (0,0,0) is correct since there is an active Transform
			attr_light(scene, lux_context, light, ob.name, light_group, 'point', light_params, transform=matrix_to_list(matrix, apply_worldscale=True), portals=portals)
		return True
	
	if light.type == 'AREA':
		light_params.update( ies_data )
		# overwrite gain with a gain scaled by ws^2 to account for change in lamp area
		light_params.add_float('gain', light.energy * lg_gain * (get_worldscale(as_scalematrix=False)**2))
		lux_context.attributeBegin(ob.name, file=Files.MAIN)
		lux_context.transform(matrix_to_list(matrix, apply_worldscale=True))
		lux_context.lightGroup(light_group, [])
		
		if light.luxrender_lamp.Exterior_volume != '':
			lux_context.exterior(light.luxrender_lamp.Exterior_volume)
		elif scene.luxrender_world.default_exterior_volume != '':
			lux_context.exterior(scene.luxrender_world.default_exterior_volume)
					
		if light.luxrender_lamp.luxrender_lamp_area.null_lamp:		
			mat_params = ParamSet()

			mat_params.add_string('type', 'null')
				
			lux_context.makeNamedMaterial(ob.name, mat_params)
			
			lux_context.namedMaterial(ob.name)
			
		lux_context.areaLightSource('area', light_params)
		
		areax = light.size
		
		if light.shape == 'SQUARE':
			areay = areax
		elif light.shape == 'RECTANGLE':
			areay = light.size_y
		else:
			areay = areax # not supported yet
		
		points = [-areax/2.0, areay/2.0, 0.0, areax/2.0, areay/2.0, 0.0, areax/2.0, -areay/2.0, 0.0, -areax/2.0, -areay/2.0, 0.0]
		
		shape_params = ParamSet()
		
		if lux_context.API_TYPE == 'PURE':
			# ntris isn't really the number of tris!!
			shape_params.add_integer('ntris', 6)
			shape_params.add_integer('nvertices', 4)
		
		shape_params.add_integer('indices', [0, 1, 2, 0, 2, 3])
		shape_params.add_point('P', points)
		shape_params.add_string('name', light.name)
		
		lux_context.shape('trianglemesh', shape_params)
		
		for portal in portals:
			lux_context.portalInstance(portal)
		
		lux_context.attributeEnd()
		
		return True

	return False
示例#34
0
	def handler_Duplis_PATH(self, obj, *args, **kwargs):
		if not 'particle_system' in kwargs.keys():
			MtsLog('ERROR: handler_Duplis_PATH called without particle_system')
			return
		
		psys = kwargs['particle_system']
		
		if not psys.settings.type == 'HAIR':
			MtsLog('ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")' % psys.name)
			return
		
		for mod in obj.modifiers:
			if mod.type == 'PARTICLE_SYSTEM' and mod.show_render == False:
				return
				
		MtsLog('Exporting Hair system "%s"...' % psys.name)
		
		size = psys.settings.particle_size / 2.0 / 1000.0
		psys.set_resolution(self.geometry_scene, obj, 'RENDER')
		steps = 2**psys.settings.render_step
		num_parents = len(psys.particles)
		num_children = len(psys.child_particles)
		
		partsys_name = '%s_%s'%(obj.name, psys.name)
		det = DupliExportProgressThread()
		det.start(num_parents + num_children)
		
		# Put Hair files in frame-numbered subfolders to avoid
		# clobbering when rendering animations
		sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
		if not os.path.exists( sc_fr ):
			os.makedirs(sc_fr)
		
		hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
		hair_file_path = '/'.join([sc_fr, hair_filename])
		
		shape_params = ParamSet().add_string(
			'filename',
			efutil.path_relative_to_export(hair_file_path)
		).add_float(
			'radius',
			size
		)
		mesh_definitions = []
		mesh_definition = (
			psys.name,
			psys.settings.material - 1,
			'hair',
			shape_params
		)
		mesh_definitions.append( mesh_definition )
		self.exportShapeInstances(obj, mesh_definitions)
		
		hair_file = open(hair_file_path, 'w')
		
		transform = obj.matrix_world.inverted()
		for pindex in range(num_parents + num_children):
			det.exported_objects += 1
			points = []
			
			for step in range(0,steps+1):
				co = psys.co_hair(obj, mod, pindex, step)
				if not co.length_squared == 0:
					points.append(transform*co)
			
			if psys.settings.use_hair_bspline:
				temp = []
				degree = 2
				dimension = 3
				for i in range(math.trunc(math.pow(2,psys.settings.render_step))):
					if i > 0:
						u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)-0.0000000000001
					else:
						u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)
					temp.append(self.BSpline(points, dimension, degree, u))
				points = temp
			
			for p in points:
				hair_file.write('%f %f %f\n' % (p[0], p[1], p[2]))
			
			hair_file.write('\n')
		
		hair_file.close()
		
		psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
		det.stop()
		det.join()
		
		MtsLog('... done, exported %s hairs' % det.exported_objects)