예제 #1
0
 def exportVoxelData(self, objName, scene):
     obj = None
     try:
         obj = bpy.data.objects[objName]
     except:
         MtsLog("ERROR : assigning the object")
     # path where to put the VOXEL FILES
     scene_filename = efutil.scene_filename()
     geo = bpy.path.clean_name(scene.name)
     sc_fr = '%s/%s/%s/%05d' % (self.meshes_dir, scene_filename, geo,
                                scene.frame_current)
     if not os.path.exists(sc_fr):
         os.makedirs(sc_fr)
     # path to the .bphys file
     dir_name = os.path.dirname(
         bpy.data.filepath) + "/blendcache_" + os.path.basename(
             bpy.data.filepath)[:-6]
     cachname = ("/%s_%06d_00.bphys" %
                 (obj.modifiers['Smoke'].domain_settings.point_cache.name,
                  scene.frame_current))
     cachFile = dir_name + cachname
     volume = volumes()
     filenames = volume.smoke_convertion(MtsLog, cachFile, sc_fr,
                                         scene.frame_current, obj)
     return filenames
예제 #2
0
    def set_filename(self, scene, name, LXV=True):
        '''
		name				string
		
		Open the main, materials, and geometry files for output,
		using filenames based on the given name.
		
		Returns None
		'''

        # If any files happen to be open, close them and start again
        for f in self.files:
            if f is not None:
                f.close()

        self.files = []
        self.file_names = []

        self.file_names.append('%s.lxs' % name)
        self.files.append(open(self.file_names[Files.MAIN], 'w'))
        self.wf(Files.MAIN, '# Main Scene File')

        subdir = '%s%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(),
                                   bpy.path.clean_name(
                                       scene.name), scene.frame_current)

        if not os.path.exists(subdir):
            os.makedirs(subdir)

        self.file_names.append('%s/LuxRender-Materials.lxm' % subdir)
        self.files.append(open(self.file_names[Files.MATS], 'w'))
        self.wf(Files.MATS, '# Materials File')

        self.file_names.append('%s/LuxRender-Geometry.lxo' % subdir)
        self.files.append(open(self.file_names[Files.GEOM], 'w'))
        self.wf(Files.GEOM, '# Geometry File')

        self.file_names.append('%s/LuxRender-Volumes.lxv' % subdir)
        if LXV:
            self.files.append(open(self.file_names[Files.VOLM], 'w'))
            self.wf(Files.VOLM, '# Volume File')
        else:
            self.files.append(None)

        self.set_output_file(Files.MAIN)
예제 #3
0
	def exportVoxelData(self,objName , scene):
		obj = None		
		try :
			obj = bpy.data.objects[objName]
		except :
			MtsLog("ERROR : assigning the object")
		# path where to put the VOXEL FILES	
		scene_filename = efutil.scene_filename()
		geo = bpy.path.clean_name(scene.name)				
		sc_fr = '%s/%s/%s/%05d' %(self.meshes_dir , scene_filename , geo , scene.frame_current)		
		if not os.path.exists(sc_fr):
			os.makedirs(sc_fr)
		# path to the .bphys file	
		dir_name = os.path.dirname(bpy.data.filepath) + "/blendcache_" + os.path.basename(bpy.data.filepath)[:-6]		
		cachname = ("/%s_%06d_00.bphys"%(obj.modifiers['Smoke'].domain_settings.point_cache.name ,scene.frame_current) )
		cachFile = dir_name + cachname		
		volume = volumes()	
		filenames = volume.smoke_convertion( MtsLog, cachFile, sc_fr, scene.frame_current, obj)
		return filenames
예제 #4
0
	def set_filename(self, scene, name, LXV=True):
		'''
		name				string
		
		Open the main, materials, and geometry files for output,
		using filenames based on the given name.
		
		Returns None
		'''
		
		# If any files happen to be open, close them and start again
		for f in self.files:
			if f is not None:
				f.close()
		
		self.files = []
		self.file_names = []
		
		self.file_names.append('%s.lxs' % name)
		self.files.append(open(self.file_names[Files.MAIN], 'w'))
		self.wf(Files.MAIN, '# Main Scene File')
		
		subdir = '%s%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(scene.name), scene.frame_current)
		
		if not os.path.exists(subdir):
			os.makedirs(subdir)
		
		self.file_names.append('%s/LuxRender-Materials.lxm' % subdir)
		self.files.append(open(self.file_names[Files.MATS], 'w'))
		self.wf(Files.MATS, '# Materials File')
		
		self.file_names.append('%s/LuxRender-Geometry.lxo' % subdir)
		self.files.append(open(self.file_names[Files.GEOM], 'w'))
		self.wf(Files.GEOM, '# Geometry File')
		
		self.file_names.append('%s/LuxRender-Volumes.lxv' % subdir)
		if LXV:
			self.files.append(open(self.file_names[Files.VOLM], 'w'))
			self.wf(Files.VOLM, '# Volume File')
		else:
			self.files.append(None)
		
		self.set_output_file(Files.MAIN)
예제 #5
0
파일: Base.py 프로젝트: theres1/blendigo
    def get_channel(self, property_group, channel_name, channel_prop_name):
        d = {}

        channel_type = getattr(property_group, channel_prop_name + '_type')

        if channel_type == 'spectrum':
            spectrum_type = getattr(property_group,
                                    channel_prop_name + '_SP_type')
            if spectrum_type == 'rgb':
                d[channel_name] = {
                    'constant':
                    rgb([
                        i for i in getattr(property_group, channel_prop_name +
                                           '_SP_rgb') *
                        getattr(property_group, channel_prop_name +
                                '_SP_rgb_gain', 1.0)
                    ])
                }
            elif spectrum_type == 'uniform':
                d[channel_name] = {
                    'constant': uniform([
                        getattr(property_group, channel_prop_name + '_SP_uniform_val') * \
                        10**getattr(property_group, channel_prop_name + '_SP_uniform_exp')
                    ])
                }
            elif spectrum_type == 'blackbody':
                d[channel_name] = {
                    'constant':
                    blackbody([
                        getattr(property_group,
                                channel_prop_name + '_SP_blackbody_temp')
                    ], [
                        getattr(property_group,
                                channel_prop_name + '_SP_blackbody_gain')
                    ])
                }

        elif channel_type == 'texture':
            tex_name = getattr(property_group,
                               channel_prop_name + '_TX_texture')

            if tex_name:  # string is not empty
                if channel_prop_name not in self.found_texture_indices:
                    self.found_texture_indices.append(channel_prop_name)

                    if not tex_name in bpy.data.textures:
                        raise Exception(
                            "Texture \"%s\" assigned to material \"%s\" doesn't exist!"
                            % (tex_name, self.material_name))

                    tex_property_group = bpy.data.textures[
                        tex_name].indigo_texture

                    if tex_property_group.image_ref == 'file':
                        relative_texture_path = efutil.path_relative_to_export(
                            getattr(tex_property_group, 'path'))
                    elif tex_property_group.image_ref == 'blender':
                        if not tex_property_group.image in bpy.data.images:
                            raise Exception(
                                "Error with image reference on texture \"%s\""
                                % tex_name)

                        img = bpy.data.images[tex_property_group.image]

                        if img.filepath == '':
                            bl_img_path = 'blendigo_extracted_image_%s.png' % bpy.path.clean_name(
                                tex_name)
                        else:
                            bl_img_path = img.filepath

                        if img.source != 'FILE' or img.packed_file:
                            bl_file_formatted = os.path.splitext(
                                os.path.basename(bl_img_path))[0]
                            bl_file_formatted = '%s.%s' % (
                                bl_file_formatted,
                                self.scene.render.image_settings.file_format)
                            bl_img_path = os.path.join(
                                efutil.export_path, efutil.scene_filename(),
                                bpy.path.clean_name(self.scene.name),
                                '%05d' % self.scene.frame_current,
                                bl_file_formatted)
                            img.save_render(bl_img_path, self.scene)

                        relative_texture_path = efutil.path_relative_to_export(
                            bl_img_path)

                    if not getattr(property_group,
                                   channel_prop_name + '_TX_abc_from_tex'):
                        abc_property_group = property_group
                        abc_prefix = channel_prop_name + '_TX_'
                    else:
                        abc_property_group = tex_property_group
                        abc_prefix = ''

                    uv_set_name = getattr(property_group,
                                          channel_prop_name + '_TX_uvset')
                    try:
                        uv_set_index = self.obj.data.uv_textures.keys().index(
                            uv_set_name)
                    except:
                        uv_set_index = 0

                    self.found_textures.append({
                        'uv_set_index': [
                            uv_set_index
                        ],  #getattr(property_group, channel_prop_name + '_TX_uv_index')],
                        'path': [relative_texture_path],
                        'exponent': [getattr(tex_property_group, 'gamma')],
                        'a': [getattr(abc_property_group, abc_prefix + 'A')],
                        'b': [getattr(abc_property_group, abc_prefix + 'B')],
                        'c': [getattr(abc_property_group, abc_prefix + 'C')],
                        'smooth': [
                            str(
                                getattr(property_group, channel_prop_name +
                                        '_TX_smooth')).lower()
                        ]
                    })

                d[channel_name] = {
                    'texture': {
                        'texture_index':
                        [self.found_texture_indices.index(channel_prop_name)],
                    }
                }

        elif channel_type == 'shader':
            try:
                shader_name = getattr(property_group,
                                      channel_prop_name + '_SH_text')
                if not shader_name in bpy.data.texts:
                    raise Exception(
                        'Referenced Text "%s" for shader on material "%s" not found'
                        % (shader_name, self.material_name))

                shader_text = '\n' + bpy.data.texts[shader_name].as_string()
                d[channel_name] = {
                    'shader': {
                        'shader': xml_cdata(shader_text)
                    }
                }
            except:
                pass

        return d
예제 #6
0
    def execute(self, master_scene):
        try:
            if master_scene is None:
                #indigo_log('Scene context is invalid')
                raise Exception('Scene context is invalid')

            #------------------------------------------------------------------------------
            # Init stats
            if self.verbose: indigo_log('Indigo export started ...')
            export_start_time = time.time()

            igs_filename = self.check_output_path(self.properties.directory)
            export_scenes = [master_scene.background_set, master_scene]

            if self.verbose: indigo_log('Export render settings')

            #------------------------------------------------------------------------------
            # Start with render settings, this also creates the root <scene>
            self.scene_xml = master_scene.indigo_engine.build_xml_element(
                master_scene)

            #------------------------------------------------------------------------------
            # Tonemapping
            self.export_tonemapping(master_scene)

            #------------------------------------------------------------------------------
            # Materials - always export the default clay material and a null material
            self.export_default_materials(master_scene)

            # Initialise values used for motion blur export.
            fps = master_scene.render.fps / master_scene.render.fps_base
            start_frame = master_scene.frame_current
            exposure = 1 / master_scene.camera.data.indigo_camera.exposure
            camera = (master_scene.camera, [])

            # Make a relative igs and mesh dir path like "TheAnimation/00002"
            rel_mesh_dir = efutil.scene_filename()
            rel_frame_dir = '%s/%05i' % (
                rel_mesh_dir, start_frame
            )  #bpy.path.clean_name(master_scene.name),
            mesh_dir = '/'.join([efutil.export_path, rel_mesh_dir])
            frame_dir = '/'.join([efutil.export_path, rel_frame_dir])

            # Initialise GeometryExporter.
            geometry_exporter = geometry.GeometryExporter()
            geometry_exporter.mesh_dir = mesh_dir
            geometry_exporter.rel_mesh_dir = rel_mesh_dir
            geometry_exporter.skip_existing_meshes = master_scene.indigo_engine.skip_existing_meshes
            geometry_exporter.verbose = self.verbose

            # Make frame_dir directory if it does not exist yet.
            if not os.path.exists(frame_dir):
                os.makedirs(frame_dir)

            if master_scene.indigo_engine.motionblur:
                # When motion blur is on, calculate the number of frames covered by the exposure time
                start_time = start_frame / fps
                end_time = start_time + exposure
                end_frame = math.ceil(end_time * fps)

                # end_frame + 1 because range is max excl
                frame_list = [x for x in range(start_frame, end_frame + 1)]
            else:
                frame_list = [start_frame]

            #indigo_log('frame_list: %s'%frame_list)

            #------------------------------------------------------------------------------
            # Process all objects in all frames in all scenes.
            for cur_frame in frame_list:
                # Calculate normalised time for keyframes.
                normalised_time = (cur_frame - start_frame) / fps / exposure
                if self.verbose:
                    indigo_log('Processing frame: %i time: %f' %
                               (cur_frame, normalised_time))

                geometry_exporter.normalised_time = normalised_time

                if master_scene.indigo_engine.motionblur:
                    bpy.context.scene.frame_set(
                        cur_frame, 0.0
                    )  # waaay too slow for many objects (probably dupli_list gets recreated). Obligatory for motion blur.
                else:
                    bpy.context.scene.frame_current = cur_frame  # is it enough?

                # Add Camera matrix.
                camera[1].append(
                    (normalised_time, camera[0].matrix_world.copy()))

                for ex_scene in export_scenes:
                    if ex_scene is None: continue

                    if self.verbose:
                        indigo_log('Processing objects for scene %s' %
                                   ex_scene.name)
                    geometry_exporter.iterateScene(ex_scene)

            # Export background light if no light exists.
            self.export_default_background_light(
                geometry_exporter.isLightingValid())

            #------------------------------------------------------------------------------
            # Export camera
            if self.verbose: indigo_log('Exporting camera')
            self.scene_xml.append(
                camera[0].data.indigo_camera.build_xml_element(
                    master_scene, camera[1]))
            #------------------------------------------------------------------------------
            # Export light layers
            from ..export.light_layer import light_layer_xml
            # TODO:
            # light_layer_count was supposed to export correct indices when there
            # is a background_set with emitters on light layers -
            # however, the re-indexing at material export time is non-trivial for
            # now and probably not worth it.
            #light_layer_count = 0
            xml_render_settings = self.scene_xml.find('renderer_settings')
            for ex_scene in export_scenes:
                if ex_scene is None: continue

                # Light layer names
                lls = ex_scene.indigo_lightlayers.enumerate()

                for layer_name, idx in sorted(lls.items(), key=lambda x: x[1]):
                    if self.verbose:
                        indigo_log('Light layer %i: %s' % (idx, layer_name))
                    xml_render_settings.append(
                        light_layer_xml().build_xml_element(
                            ex_scene, idx, layer_name))

            if self.verbose: indigo_log('Exporting lamps')

            # use special n==1 case due to bug in indigo <sum> material
            num_lamps = len(geometry_exporter.ExportedLamps)

            if num_lamps == 1:
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_mat = ET.Element(
                    'background_material')
                scene_background_settings.append(scene_background_settings_mat)

                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        scene_background_settings_mat.append(xml)

                self.scene_xml.append(scene_background_settings)

            if num_lamps > 1:

                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_fmt = {
                    'background_material': {
                        'material': {
                            'name': ['background_material'],
                            'sum': {
                                'mat': xml_multichild()
                            }
                        }
                    }
                }

                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        self.scene_xml.append(xml)

                    scene_background_settings_fmt['background_material'][
                        'material']['sum']['mat'].append({
                            'mat_name': [ck],
                            'weight': {
                                'constant': [1]
                            }
                        })
                scene_background_settings_obj = xml_builder()
                scene_background_settings_obj.build_subelements(
                    None, scene_background_settings_fmt,
                    scene_background_settings)
                self.scene_xml.append(scene_background_settings)

            #------------------------------------------------------------------------------
            # Export Medium
            from ..export.materials.medium import medium_xml
            # TODO:
            # check if medium is currently used by any material and add
            # basic medium for SpecularMaterial default

            for ex_scene in export_scenes:
                if ex_scene is None: continue

                indigo_material_medium = ex_scene.indigo_material_medium
                medium = indigo_material_medium.medium

                if len(indigo_material_medium.medium.items()) == 0: continue

                for medium_name, medium_data in medium.items():

                    medium_index = ex_scene.indigo_material_medium.medium.find(
                        medium_name)  # more precise if same name

                    indigo_log('Exporting medium: %s ' % (medium_name))
                    self.scene_xml.append(
                        medium_xml(ex_scene, medium_name, medium_index,
                                   medium_data).build_xml_element(
                                       ex_scene, medium_name, medium_data))
                indigo_log('Exporting Medium: %s ' % (medium_name))
                # TODO:
                # check for unused medium
            basic_medium = ET.fromstring("""
                                <medium>
                                   <uid>4294967294</uid>
		                             <name>basic</name>
			                           <precedence>10</precedence>
			                             <basic>
				                           <ior>1.5</ior>
				                           <cauchy_b_coeff>0</cauchy_b_coeff>
				                           <max_extinction_coeff>1</max_extinction_coeff>
				                           <absorption_coefficient>
					                         <constant>
						                      <uniform>
							                   <value>0</value>
						                      </uniform>
					                         </constant>
				                           </absorption_coefficient>
			                             </basic>
	                            </medium>   
                         """)

            self.scene_xml.append(basic_medium)

            #------------------------------------------------------------------------------
            # Export used materials.
            if self.verbose: indigo_log('Exporting used materials')
            material_count = 0
            for ck, ci in geometry_exporter.ExportedMaterials.items():
                for xml in ci:
                    self.scene_xml.append(xml)
                material_count += 1
            if self.verbose:
                indigo_log('Exported %i materials' % material_count)

            # Export used meshes.
            if self.verbose: indigo_log('Exporting meshes')
            mesh_count = 0
            for ck, ci in geometry_exporter.MeshesOnDisk.items():
                mesh_name, xml = ci
                self.scene_xml.append(xml)
                mesh_count += 1
            if self.verbose: indigo_log('Exported %i meshes' % mesh_count)

            #------------------------------------------------------------------------------
            # We write object instances to a separate file
            oc = 0
            scene_data_xml = ET.Element('scenedata')
            for ck, ci in geometry_exporter.ExportedObjects.items():
                obj_type = ci[0]

                if obj_type == 'OBJECT':
                    obj = ci[1]
                    mesh_name = ci[2]
                    obj_matrices = ci[3]
                    scene = ci[4]

                    xml = geometry.model_object(scene).build_xml_element(
                        obj, mesh_name, obj_matrices)
                else:
                    xml = ci[1]
                scene_data_xml.append(xml)
                oc += 1

            objects_file_name = '%s/objects.igs' % (frame_dir)
            objects_file = open(objects_file_name, 'wb')
            ET.ElementTree(element=scene_data_xml).write(objects_file,
                                                         encoding='utf-8')
            objects_file.close()
            # indigo_log('Exported %i object instances to %s' % (oc,objects_file_name))
            scene_data_include = include.xml_include(
                efutil.path_relative_to_export(objects_file_name))
            self.scene_xml.append(
                scene_data_include.build_xml_element(master_scene))

            #------------------------------------------------------------------------------
            # Write formatted XML for settings, materials and meshes
            out_file = open(igs_filename, 'w')
            xml_str = ET.tostring(self.scene_xml, encoding='utf-8').decode()

            # substitute back characters protected from entity encoding in CDATA nodes
            xml_str = xml_str.replace('{_LESSTHAN_}', '<')
            xml_str = xml_str.replace('{_GREATERTHAN_}', '>')

            xml_dom = MD.parseString(xml_str)
            xml_dom.writexml(out_file,
                             addindent='\t',
                             newl='\n',
                             encoding='utf-8')
            out_file.close()

            #------------------------------------------------------------------------------
            # Computing devices
            if len(master_scene.indigo_engine.render_devices):
                from ..core.util import getSettingsPath
                settings_file = getSettingsPath()

                outermark = \
                """<selected_opencl_devices>
                {}
                </selected_opencl_devices>"""

                devicemark = \
                """<device>
                        <device_name><![CDATA[{}]]></device_name>
                        <vendor_name><![CDATA[{}]]></vendor_name>
                        <id>{}</id>
                    </device>"""
                devices = ''
                for d in bpy.context.scene.indigo_engine.render_devices:
                    if d.use:
                        devices += devicemark.format(d.device, d.vendor, d.id)
                selected_devices_xml = outermark.format(devices)

                if os.path.exists(settings_file):
                    # settings file exists
                    with open(settings_file, 'r') as f:
                        xml_string = f.read()

                    import re
                    pattern = r'<settings>.*</settings>'
                    if re.search(pattern, xml_string,
                                 re.DOTALL | re.IGNORECASE):
                        # <settings> tag exists (file seems to be correct)
                        pattern = r'<selected_opencl_devices>.*</selected_opencl_devices>'
                        if re.search(pattern, xml_string,
                                     re.DOTALL | re.IGNORECASE):
                            # computing devices already exists
                            xml_string = re.sub(pattern,
                                                selected_devices_xml,
                                                xml_string,
                                                flags=re.DOTALL
                                                | re.IGNORECASE)
                        else:
                            # computing devices does not exists yet
                            xml_string = re.sub(
                                r'</settings>',
                                selected_devices_xml + "</settings>",
                                xml_string,
                                flags=re.DOTALL | re.IGNORECASE)
                    else:
                        # settings tag does not exist. create new body
                        xml_string =\
                """<?xml version="1.0" encoding="utf-8"?>
                <settings>
                    {}
                </settings>""".format(selected_devices_xml)

                else:
                    # create new file
                    xml_string =\
                """<?xml version="1.0" encoding="utf-8"?>
                <settings>
                    {}
                </settings>""".format(selected_devices_xml)

                with open(settings_file, 'w') as f:
                    f.write(xml_string)

            #------------------------------------------------------------------------------
            # Print stats
            export_end_time = time.time()
            if self.verbose:
                indigo_log('Total mesh export time: %f seconds' %
                           (geometry_exporter.total_mesh_export_time))
            indigo_log('Export finished; took %f seconds' %
                       (export_end_time - export_start_time))

            # Reset to start_frame.
            if len(frame_list) > 1:
                bpy.context.scene.frame_set(start_frame)

            return {'FINISHED'}

        except Exception as err:
            indigo_log('%s' % err, message_type='ERROR')
            if os.getenv('B25_OBJECT_ANALYSIS', False):
                raise err
            return {'CANCELLED'}
예제 #7
0
    def render(self, scene):

        if self is None or scene is None:
            sunflowLog('ERROR: Scene is missing!')
            return

        scene.render.use_placeholder = False

        with self.render_lock:  # just render one thing at a time

            if scene.name == 'preview':
                self.render_preview(scene)
                return

            scene_path = efutil.filesystem_path(scene.render.filepath)
            if os.path.isdir(scene_path):
                output_dir = scene_path
            else:
                output_dir = os.path.dirname(scene_path)

            output_dir = os.path.abspath(
                os.path.join(output_dir, efutil.scene_filename()))
            if not os.path.exists(output_dir):
                os.mkdir(output_dir)
            #----------- sunflowLog('Sunflow: Current directory = "%s"' % output_dir)

            #--------------------------------------- if DEBUG: pydevd.settrace()

            if not getExporter(output_dir, scene.name, scene.frame_current):
                return

            if self.is_animation:
                return

            arguments = self.getCommandLineArgs(scene)

            jarpath = efutil.find_config_value('sunflow', 'defaults',
                                               'jar_path', '')
            javapath = efutil.find_config_value('sunflow', 'defaults',
                                                'java_path', '')
            memory = "-Xmx%sm" % efutil.find_config_value(
                'sunflow', 'defaults', 'memoryalloc', '')
            image_name = "%s.%03d.%s" % (scene.name, scene.frame_current,
                                         arguments['format'])

            if scene.sunflow_performance.useRandom:
                image_name = self.check_randomname(output_dir, image_name)

            sunflow_file = "%s.%03d.sc" % (scene.name, scene.frame_current)
            image_file = os.path.abspath(os.path.join(output_dir, image_name))
            sc_file_path = os.path.abspath(
                os.path.join(output_dir, sunflow_file))

            cmd_line = [javapath, memory, '-server', '-jar', jarpath]
            final_line = ['-o', image_file, sc_file_path]

            extra = []
            for key in arguments:
                if key == 'format':
                    continue
                if arguments[key] != '':
                    values = arguments[key].split()
                    extra.extend(values)

            if arguments['format'] != 'png':
                extra.append('-nogui')

            cmd_line.extend(extra)
            cmd_line.extend(final_line)

            sunflow_process = subprocess.Popen(cmd_line)
            refresh_interval = 5

            framebuffer_thread = sunflowFilmDisplay()
            framebuffer_thread.set_kick_period(refresh_interval)
            framebuffer_thread.begin(self, image_file, resolution(scene))
            render_update_timer = None
            while sunflow_process.poll() == None and not self.test_break():
                render_update_timer = threading.Timer(1,
                                                      self.process_wait_timer)
                render_update_timer.start()
                if render_update_timer.isAlive(): render_update_timer.join()

            # If we exit the wait loop (user cancelled) and sunflow is still running, then send SIGINT
            if sunflow_process.poll() == None:
                # Use SIGTERM because that's the only one supported on Windows
                sunflow_process.send_signal(subprocess.signal.SIGTERM)

            # Stop updating the render result and load the final image
            framebuffer_thread.stop()
            framebuffer_thread.join()

            if sunflow_process.poll(
            ) != None and sunflow_process.returncode != 0:
                sunflowLog("Sunflow: Rendering failed -- check the console")
            else:
                framebuffer_thread.kick(render_end=True)
            framebuffer_thread.shutdown()
예제 #8
0
    def render(self, context):
        '''
        Render the scene file, or in our case, export the frame(s)
        and launch an Indigo process.
        '''

        with RENDERENGINE_indigo.render_lock:  # Just render one thing at a time.
            self.renderer = None
            self.message_thread = None
            self.stats_thread = None
            self.framebuffer_thread = None
            self.render_update_timer = None
            self.rendering = False

            # force scene update to current rendering frame
            # Not sure why - Yves
            #context.frame_set(context.frame_current)

            #------------------------------------------------------------------------------
            # Export the Scene

            # Get the frame path.
            frame_path = efutil.filesystem_path(context.render.frame_path())

            # Get the filename for the frame sans extension.
            image_out_path = os.path.splitext(frame_path)[0]

            # Generate the name for the scene file(s).
            if context.indigo_engine.use_output_path == True:
                # Get the output path from the frame path.
                output_path = os.path.dirname(frame_path)

                # Generate the output filename
                output_filename = '%s.%s.%05i.igs' % (
                    efutil.scene_filename(), bpy.path.clean_name(
                        context.name), context.frame_current)
            else:
                # Get export path from the indigo_engine.
                export_path = efutil.filesystem_path(
                    context.indigo_engine.export_path)

                # Get the directory name from the output path.
                output_path = os.path.dirname(export_path)

                # Get the filename from the output path and remove the extension.
                output_filename = os.path.splitext(
                    os.path.basename(export_path))[0]

                # Count contiguous # chars and replace them with the frame number.
                # If the hash count is 0 and we are exporting an animation, append the frame numbers.
                hash_count = util.count_contiguous('#', output_filename)
                if hash_count != 0:
                    output_filename = output_filename.replace(
                        '#' * hash_count,
                        ('%%0%0ii' % hash_count) % context.frame_current)
                elif self.is_animation:
                    output_filename = output_filename + (
                        '%%0%0ii' % 4) % context.frame_current

                # Add .igs extension.
                output_filename += '.igs'

            # The full path of the exported scene file.
            exported_file = '/'.join([output_path, output_filename])

            # Create output_path if it does not exist.
            if not os.path.exists(output_path):
                os.makedirs(output_path)

            # If an animation is rendered, write an indigo queue file (.igq).
            if self.is_animation:
                igq_filename = '%s/%s.%s.igq' % (
                    output_path, efutil.scene_filename(),
                    bpy.path.clean_name(context.name))

                if context.frame_current == context.frame_start:
                    # Start a new igq file.
                    igq_file = open(igq_filename, 'w')
                    igq_file.write(
                        '<?xml version="1.0" encoding="utf-8" standalone="no" ?>\n'
                    )
                    igq_file.write('<render_queue>\n')
                else:
                    # Append to existing igq.
                    igq_file = open(igq_filename, 'a')

                rnd = random.Random()
                rnd.seed(context.frame_current)

                # Write igq item.
                igq_file.write('\t<item>\n')
                igq_file.write('\t\t<scene_path>%s</scene_path>\n' %
                               exported_file)
                igq_file.write('\t\t<halt_time>%d</halt_time>\n' %
                               context.indigo_engine.halttime)
                igq_file.write('\t\t<halt_spp>%d</halt_spp>\n' %
                               context.indigo_engine.haltspp)
                igq_file.write('\t\t<output_path>%s</output_path>\n' %
                               image_out_path)
                igq_file.write('\t\t<seed>%s</seed>\n' %
                               rnd.randint(1, 1000000))
                igq_file.write('\t</item>\n')

                # If this is the last frame, write the closing tag.
                if context.frame_current == context.frame_end:
                    igq_file.write('</render_queue>\n')

                igq_file.close()

                # Calculate the progress by frame with frame range (fr) and frame offset (fo).
                fr = context.frame_end - context.frame_start
                fo = context.frame_current - context.frame_start
                self.update_progress(fo / fr)

            scene_writer = indigo.operators._Impl_OT_indigo(
                directory=output_path,
                filename=output_filename).set_report(self.report)

            # Write the scene file.
            export_result = scene_writer.execute(context)

            # Return if the export didn't finish.
            if not 'FINISHED' in export_result:
                return

            #------------------------------------------------------------------------------
            # Update indigo defaults config file .
            config_updates = {
                'auto_start': context.indigo_engine.auto_start,
                'console_output': context.indigo_engine.console_output
            }

            if context.indigo_engine.use_console:
                indigo_path = getConsolePath(context)
            else:
                indigo_path = getGuiPath(context)

            if os.path.exists(indigo_path):
                config_updates['install_path'] = getInstallPath(context)

            try:
                for k, v in config_updates.items():
                    efutil.write_config_value('indigo', 'defaults', k, v)
            except Exception as err:
                indigo_log('Saving indigo config failed: %s' % err,
                           message_type='ERROR')

            # Make sure that the Indigo we are going to launch is at least as
            # new as the exporter version.
            version_ok = True
            if not context.indigo_engine.skip_version_check:
                iv = getVersion(context)
                for i in range(3):
                    version_ok &= iv[i] >= bl_info['version'][i]

            #------------------------------------------------------------------------------
            # Conditionally Spawn Indigo.
            if context.indigo_engine.auto_start:

                exe_path = efutil.filesystem_path(indigo_path)

                if not os.path.exists(exe_path):
                    print("Failed to find indigo at '" + str(exe_path) + "'")
                    msg = "Failed to find indigo at '" + str(exe_path) + "'."
                    msg + "\n  "
                    msg += "Please make sure you have Indigo installed, and that the path to indigo in the 'Indigo Render Engine Settings' is set correctly."
                    self.report({'ERROR'}, msg)

                #if not version_ok:
                #indigo_log("Unsupported version v%s; Cannot start Indigo with this scene" % ('.'.join(['%s'%i for i in iv])), message_type='ERROR')
                #return

                # if it's an animation, don't execute until final frame
                if self.is_animation and context.frame_current != context.frame_end:
                    return

                # if animation and final frame, launch queue instead of single frame
                if self.is_animation and context.frame_current == context.frame_end:
                    exported_file = igq_filename
                    indigo_args = [exe_path, exported_file]
                else:
                    indigo_args = [
                        exe_path, exported_file, '-o', image_out_path + '.png'
                    ]

                # Set master or working master command line args.
                if context.indigo_engine.network_mode == 'master':
                    indigo_args.extend(['-n', 'm'])
                elif context.indigo_engine.network_mode == 'working_master':
                    indigo_args.extend(['-n', 'wm'])

                # Set port arg if network rendering is enabled.
                if context.indigo_engine.network_mode in [
                        'master', 'working_master'
                ]:
                    indigo_args.extend(
                        ['-p', '%i' % context.indigo_engine.network_port])

                # Set hostname and port arg.
                if context.indigo_engine.network_mode == 'manual':
                    indigo_args.extend([
                        '-h',
                        '%s:%i' % (context.indigo_engine.network_host,
                                   context.indigo_engine.network_port)
                    ])

                # indigo_log("Starting indigo: %s" % indigo_args)

                # If we're starting a console or should wait for the process, listen to the output.
                if context.indigo_engine.use_console or context.indigo_engine.wait_for_process:
                    f_stdout = subprocess.PIPE
                else:
                    f_stdout = None

                # Launch the Indigo process.
                indigo_proc = subprocess.Popen(indigo_args, stdout=f_stdout)
                indigo_pid = indigo_proc.pid
                indigo_log('Started Indigo process, PID: %i' % indigo_pid)

                # Wait for the render to finish if we use the console or should wait for the process.
                if context.indigo_engine.use_console or context.indigo_engine.wait_for_process:
                    while indigo_proc.poll() == None:
                        indigo_proc.communicate()
                        time.sleep(2)

                    indigo_proc.wait()
                    if not indigo_proc.stdout.closed:
                        indigo_proc.communicate()
                    if indigo_proc.returncode == -1:
                        sys.exit(-1)

            else:
                indigo_log("Scene was exported to %s" % exported_file)

            #------------------------------------------------------------------------------
            # Finished
            return
예제 #9
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a mesh_name and
		ParamSet for each part which will become a LuxRender PLYShape statement
		wrapped within objectBegin..objectEnd or placed in an
		attributeBegin..attributeEnd scope, depending if instancing is allowed.
		The actual geometry will be dumped to a binary ply file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			for f in mesh.faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			number_of_mats = len(mesh.materials)
			if number_of_mats > 0:
				iterator_range = range(number_of_mats)
			else:
				iterator_range = [0]
			
			for i in iterator_range:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.luxrender_engine.partial_ply and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = get_uv_textures(mesh)
						if len(uv_textures) > 0:
							if mesh.uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						co_no_uv_cache = []
						face_vert_indices = {}		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], v.normal[:])
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add( vert_data )
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							face_vert_indices[face.index] = fvi
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by LuxBlend 2.5 exporter for LuxRender - www.luxrender.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % len(ffaces_mats[i])).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in ffaces_mats[i]:
								lfvi = len(face_vert_indices[face.index])
								ply.write( struct.pack('<B', lfvi) )
								ply.write( struct.pack('<%dI'%lfvi, *face_vert_indices[face.index]) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						LuxLog('Binary PLY file written: %s' % (ply_path))
					else:
						LuxLog('Skipping already exported PLY: %s' % mesh_name)
					
					# Export the shape definition to LXO
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					
					# Add subdiv etc options
					shape_params.update( obj.data.luxrender_mesh.get_paramset() )
					
					mesh_definition = (
						mesh_name,
						i,
						'plymesh',
						shape_params
					)
					mesh_definitions.append( mesh_definition )
					
					# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj):
						self.exportShapeDefinition(obj, mesh_definition)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
				
				except InvalidGeometryException as err:
					LuxLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			LuxLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
예제 #10
0
    def buildBinaryPLYMesh(self, obj):
        """
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and create a binary PLY file.
		"""
        try:
            mesh_definitions = []
            mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
            if mesh is None:
                raise UnexportableObjectException(
                    'Cannot create render/export mesh')

            # collate faces by mat index
            ffaces_mats = {}
            mesh_faces = mesh.tessfaces
            for f in mesh_faces:
                mi = f.material_index
                if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
                ffaces_mats[mi].append(f)
            material_indices = ffaces_mats.keys()

            if len(mesh.materials) > 0 and mesh.materials[0] != None:
                mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
            else:
                mats = [(0, None)]

            for i, mat in mats:
                try:
                    if i not in material_indices: continue

                    # If this mesh/mat combo has already been processed, get it from the cache
                    mesh_cache_key = (self.geometry_scene, obj.data, i)
                    if self.allow_instancing(obj) and self.ExportedMeshes.have(
                            mesh_cache_key):
                        mesh_definitions.append(
                            self.ExportedMeshes.get(mesh_cache_key))
                        continue

                    # Put PLY files in frame-numbered subfolders to avoid
                    # clobbering when rendering animations
                    #sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
                    sc_fr = '%s/%s/%s/%05d' % (
                        self.mts_context.meshes_dir, efutil.scene_filename(),
                        bpy.path.clean_name(self.geometry_scene.name),
                        self.visibility_scene.frame_current)
                    if not os.path.exists(sc_fr):
                        os.makedirs(sc_fr)

                    def make_plyfilename():
                        ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
                        mesh_name = '%s_%04d_m%03d' % (obj.data.name,
                                                       ply_serial, i)
                        ply_filename = '%s.ply' % bpy.path.clean_name(
                            mesh_name)
                        ply_path = '/'.join([sc_fr, ply_filename])
                        return mesh_name, ply_path

                    mesh_name, ply_path = make_plyfilename()

                    # Ensure that all PLY files have unique names
                    while self.ExportedPLYs.have(ply_path):
                        mesh_name, ply_path = make_plyfilename()

                    self.ExportedPLYs.add(ply_path, None)

                    # skip writing the PLY file if the box is checked
                    skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
                    if not os.path.exists(ply_path) or not (
                            self.visibility_scene.mitsuba_engine.partial_export
                            and skip_exporting):

                        GeometryExporter.NewExportedObjects.add(obj)

                        uv_textures = mesh.tessface_uv_textures
                        if len(uv_textures) > 0:
                            if uv_textures.active and uv_textures.active.data:
                                uv_layer = uv_textures.active.data
                        else:
                            uv_layer = None

                        # Here we work out exactly which vert+normal combinations
                        # we need to export. This is done first, and the export
                        # combinations cached before writing to file because the
                        # number of verts needed needs to be written in the header
                        # and that number is not known before this is done.

                        # Export data
                        ntris = 0
                        co_no_uv_cache = []
                        face_vert_indices = [
                        ]  # mapping of face index to list of exported vert indices for that face

                        # Caches
                        vert_vno_indices = {
                        }  # mapping of vert index to exported vert index for verts with vert normals
                        vert_use_vno = set(
                        )  # Set of vert indices that use vert normals

                        vert_index = 0  # exported vert index
                        for face in ffaces_mats[i]:
                            fvi = []
                            for j, vertex in enumerate(face.vertices):
                                v = mesh.vertices[vertex]

                                if uv_layer:
                                    # Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
                                    uv_coord = (uv_layer[face.index].uv[j][0],
                                                1.0 -
                                                uv_layer[face.index].uv[j][1])

                                if face.use_smooth:

                                    if uv_layer:
                                        vert_data = (v.co[:], v.normal[:],
                                                     uv_coord)
                                    else:
                                        vert_data = (v.co[:], v.normal[:])

                                    if vert_data not in vert_use_vno:
                                        vert_use_vno.add(vert_data)

                                        co_no_uv_cache.append(vert_data)

                                        vert_vno_indices[
                                            vert_data] = vert_index
                                        fvi.append(vert_index)

                                        vert_index += 1
                                    else:
                                        fvi.append(vert_vno_indices[vert_data])

                                else:

                                    if uv_layer:
                                        vert_data = (
                                            v.co[:], face.normal[:],
                                            uv_layer[face.index].uv[j][:])
                                    else:
                                        vert_data = (v.co[:], face.normal[:])

                                    # All face-vert-co-no are unique, we cannot
                                    # cache them
                                    co_no_uv_cache.append(vert_data)

                                    fvi.append(vert_index)

                                    vert_index += 1

                            # For Mitsuba, we need to triangulate quad faces
                            face_vert_indices.append(fvi[0:3])
                            ntris += 3
                            if len(fvi) == 4:
                                face_vert_indices.append(
                                    (fvi[0], fvi[2], fvi[3]))
                                ntris += 3

                        del vert_vno_indices
                        del vert_use_vno

                        with open(ply_path, 'wb') as ply:
                            ply.write(b'ply\n')
                            ply.write(b'format binary_little_endian 1.0\n')
                            ply.write(
                                b'comment Created by MtsBlend 2.5 exporter for Mitsuba - www.mitsuba.net\n'
                            )

                            # vert_index == the number of actual verts needed
                            ply.write(
                                ('element vertex %d\n' % vert_index).encode())
                            ply.write(b'property float x\n')
                            ply.write(b'property float y\n')
                            ply.write(b'property float z\n')

                            ply.write(b'property float nx\n')
                            ply.write(b'property float ny\n')
                            ply.write(b'property float nz\n')

                            if uv_layer:
                                ply.write(b'property float s\n')
                                ply.write(b'property float t\n')

                            ply.write(('element face %d\n' %
                                       int(ntris / 3)).encode())
                            ply.write(
                                b'property list uchar uint vertex_indices\n')

                            ply.write(b'end_header\n')

                            # dump cached co/no/uv
                            if uv_layer:
                                for co, no, uv in co_no_uv_cache:
                                    ply.write(struct.pack('<3f', *co))
                                    ply.write(struct.pack('<3f', *no))
                                    ply.write(struct.pack('<2f', *uv))
                            else:
                                for co, no in co_no_uv_cache:
                                    ply.write(struct.pack('<3f', *co))
                                    ply.write(struct.pack('<3f', *no))

                            # dump face vert indices
                            for face in face_vert_indices:
                                ply.write(struct.pack('<B', 3))
                                ply.write(struct.pack('<3I', *face))

                            del co_no_uv_cache
                            del face_vert_indices

                        MtsLog('Binary PLY file written: %s' % (ply_path))
                    else:
                        MtsLog('Skipping already exported PLY: %s' % mesh_name)

                    shape_params = ParamSet().add_string(
                        'filename', efutil.path_relative_to_export(ply_path))
                    if obj.data.mitsuba_mesh.normals == 'facenormals':
                        shape_params.add_boolean('faceNormals',
                                                 {'value': 'true'})

                    mesh_definition = (mesh_name, i, 'ply', shape_params)
                    # Only export Shapegroup and cache this mesh_definition if we plan to use instancing
                    if self.allow_instancing(
                            obj) and self.exportShapeDefinition(
                                obj, mesh_definition):
                        shape_params = ParamSet().add_reference(
                            'id', '', mesh_name + '-shapegroup_%i' % (i))

                        mesh_definition = (mesh_name, i, 'instance',
                                           shape_params)
                        self.ExportedMeshes.add(mesh_cache_key,
                                                mesh_definition)

                    mesh_definitions.append(mesh_definition)

                except InvalidGeometryException as err:
                    MtsLog('Mesh export failed, skipping this mesh: %s' % err)

            del ffaces_mats
            bpy.data.meshes.remove(mesh)

        except UnexportableObjectException as err:
            MtsLog('Object export failed, skipping this object: %s' % err)

        return mesh_definitions
예제 #11
0
def convert_texture(scene, texture, variant_hint=None):
    # Lux only supports blender's textures in float variant (except for image/ocean, but both of these are exported as imagemap)
    variant = 'float'
    paramset = ParamSet()

    lux_tex_name = 'blender_%s' % texture.type.lower()

    mapping_type = '3D'

    if texture.type not in ('IMAGE', 'OCEAN'):
        paramset.add_float('bright', texture.intensity)
        paramset.add_float('contrast', texture.contrast)

    if texture.type == 'BLEND':
        progression_map = {
            'LINEAR': 'lin',
            'QUADRATIC': 'quad',
            'EASING': 'ease',
            'DIAGONAL': 'diag',
            'SPHERICAL': 'sphere',
            'QUADRATIC_SPHERE': 'halo',
            'RADIAL': 'radial',
        }
        paramset.add_bool('flipxy', texture.use_flip_axis) \
          .add_string('type', progression_map[texture.progression])

    if texture.type == 'CLOUDS':
        paramset.add_string('noisetype', texture.noise_type.lower() ) \
          .add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_float('noisesize', texture.noise_scale) \
          .add_integer('noisedepth', texture.noise_depth)

    if texture.type == 'DISTORTED_NOISE':
        lux_tex_name = 'blender_distortednoise'
        paramset.add_string('type', texture.noise_distortion.lower()) \
          .add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_float('distamount', texture.distortion) \
          .add_float('noisesize', texture.noise_scale) \
          .add_float('nabla', texture.nabla)

    if texture.type == 'MAGIC':
        paramset.add_integer('noisedepth', texture.noise_depth) \
          .add_float('turbulence', texture.turbulence)

    if texture.type == 'MARBLE':
        paramset.add_string('type', texture.marble_type.lower() ) \
          .add_string('noisetype', texture.noise_type.lower() ) \
          .add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_string('noisebasis2', texture.noise_basis_2.lower() ) \
          .add_float('noisesize', texture.noise_scale) \
          .add_float('turbulence', texture.turbulence) \
          .add_integer('noisedepth', texture.noise_depth)

    if texture.type == 'MUSGRAVE':
        paramset.add_string('type', texture.musgrave_type.lower() ) \
          .add_float('h', texture.dimension_max) \
          .add_float('lacu', texture.lacunarity) \
          .add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_float('noisesize', texture.noise_scale) \
          .add_float('octs', texture.octaves)

    # NOISE shows no params ?

    if texture.type == 'STUCCI':
        paramset.add_string('type', texture.stucci_type.lower() ) \
          .add_string('noisetype', texture.noise_type.lower() ) \
          .add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_float('noisesize', texture.noise_scale) \
          .add_float('turbulence', texture.turbulence)

    if texture.type == 'VORONOI':
        distancem_map = {
            'DISTANCE': 'actual_distance',
            'DISTANCE_SQUARED': 'distance_squared',
            'MANHATTAN': 'manhattan',
            'CHEBYCHEV': 'chebychev',
            'MINKOVSKY_HALF': 'minkovsky_half',
            'MINKOVSKY_FOUR': 'minkovsky_four',
            'MINKOVSKY': 'minkovsky'
        }
        paramset.add_string('distmetric', distancem_map[texture.distance_metric]) \
          .add_float('minkovsky_exp', texture.minkovsky_exponent) \
          .add_float('noisesize', texture.noise_scale) \
          .add_float('nabla', texture.nabla) \
          .add_float('w1', texture.weight_1) \
          .add_float('w2', texture.weight_2) \
          .add_float('w3', texture.weight_3) \
          .add_float('w4', texture.weight_4)

    if texture.type == 'WOOD':
        paramset.add_string('noisebasis', texture.noise_basis.lower() ) \
          .add_string('noisebasis2', texture.noise_basis_2.lower() ) \
          .add_float('noisesize', texture.noise_scale) \
          .add_string('noisetype', texture.noise_type.lower() ) \
          .add_float('turbulence', texture.turbulence) \
          .add_string('type', texture.wood_type.lower() )

    # Translate Blender Image/movie into lux tex
    if texture.type == 'IMAGE' and texture.image and texture.image.source in [
            'GENERATED', 'FILE', 'SEQUENCE'
    ]:

        extract_path = os.path.join(efutil.scene_filename(),
                                    bpy.path.clean_name(scene.name),
                                    '%05d' % scene.frame_current)

        if texture.image.source == 'GENERATED':
            tex_image = 'luxblend_baked_image_%s.%s' % (bpy.path.clean_name(
                texture.name), scene.render.image_settings.file_format)
            tex_image = os.path.join(extract_path, tex_image)
            texture.image.save_render(tex_image, scene)

        if texture.image.source == 'FILE':
            if texture.image.packed_file:
                tex_image = 'luxblend_extracted_image_%s.%s' % (
                    bpy.path.clean_name(
                        texture.name), scene.render.image_settings.file_format)
                tex_image = os.path.join(extract_path, tex_image)
                texture.image.save_render(tex_image, scene)
            else:
                if texture.library is not None:
                    f_path = efutil.filesystem_path(
                        bpy.path.abspath(texture.image.filepath,
                                         texture.library.filepath))
                else:
                    f_path = efutil.filesystem_path(texture.image.filepath)
                if not os.path.exists(f_path):
                    raise Exception(
                        'Image referenced in blender texture %s doesn\'t exist: %s'
                        % (texture.name, f_path))
                tex_image = efutil.filesystem_path(f_path)

        if texture.image.source == 'SEQUENCE':
            if texture.image.packed_file:
                tex_image = 'luxblend_extracted_image_%s.%s' % (
                    bpy.path.clean_name(
                        texture.name), scene.render.image_settings.file_format)
                tex_image = os.path.join(extract_path, tex_image)
                texture.image.save_render(tex_image, scene)
            else:
                # sequence params from blender
                sequence = bpy.data.textures[(texture.name).replace(
                    '.001', ''
                )].image_user  # remove tex_preview extension to avoid error
                seqframes = sequence.frame_duration
                seqoffset = sequence.frame_offset
                seqstartframe = sequence.frame_start  # the global frame at which the imagesequence starts
                seqcyclic = sequence.use_cyclic
                currentframe = scene.frame_current

                if texture.library is not None:
                    f_path = efutil.filesystem_path(
                        bpy.path.abspath(texture.image.filepath,
                                         texture.library.filepath))
                else:
                    f_path = efutil.filesystem_path(texture.image.filepath)

                if currentframe < seqstartframe:
                    fnumber = 1 + seqoffset
                else:
                    fnumber = currentframe - (seqstartframe - 1) + seqoffset

                if fnumber > seqframes:
                    if seqcyclic == False:
                        fnumber = seqframes
                    else:
                        fnumber = (currentframe -
                                   (seqstartframe - 1)) % seqframes
                        if fnumber == 0:
                            fnumber = seqframes

                import re

                def get_seq_filename(number, f_path):
                    m = re.findall(r'(\d+)', f_path)
                    if len(m) == 0:
                        return "ERR: Can't find pattern"

                    rightmost_number = m[len(m) - 1]
                    seq_length = len(rightmost_number)

                    nstr = "%i" % number
                    new_seq_number = nstr.zfill(seq_length)

                    return f_path.replace(rightmost_number, new_seq_number)

                f_path = get_seq_filename(fnumber, f_path)

                #				print("-----------------", f_path)

                if not os.path.exists(f_path):
                    raise Exception(
                        'Image referenced in blender texture %s doesn\'t exist: %s'
                        % (texture.name, f_path))
                tex_image = efutil.filesystem_path(f_path)

        lux_tex_name = 'imagemap'
        sampling = texture.luxrender_texture.luxrender_tex_imagesampling
        if variant_hint:
            variant = variant_hint
        else:
            variant = 'color'
        paramset.add_string('filename', tex_image)
        if variant_hint == float:
            paramset.add_string('channel', sampling.channel)
        paramset.add_integer('discardmipmaps', sampling.discardmipmaps)
        paramset.add_float('gain', sampling.gain)
        paramset.add_float('gamma', sampling.gamma)
        paramset.add_float('maxanisotropy', sampling.maxanisotropy)
        paramset.add_string('wrap', sampling.wrap)
        mapping_type = '2D'

    # Similar to image handler, but for Ocean tex
    if texture.type == 'OCEAN':
        if texture.ocean.output == 'FOAM':

            ocean_mods = [
                m for m in texture.ocean.ocean_object.modifiers
                if m.type == 'OCEAN'
            ]
            if len(ocean_mods) == 0:
                print('No ocean modifiers!')
            else:
                ocean_mod = ocean_mods[0]

            if texture.ocean.output == 'FOAM':
                tex_image = efutil.filesystem_path(
                    os.path.join(ocean_mod.filepath,
                                 'foam_%04d.exr' % scene.frame_current))
            #SOON! (until 3D disp support...)
            #elif texture.ocean.output == 'DISPLACEMENT':
            #tex_image = os.path.join(ocean_mod.filepath, 'disp_%04d.exr' % scene.frame_current)

            lux_tex_name = 'imagemap'
            if variant_hint:
                variant = variant_hint
            else:
                variant = 'color'
            paramset.add_string('filename', tex_image)
            paramset.add_float('gamma', 1.0)
            mapping_type = '2D'

        else:
            lux_tex_name = 'constant'

    if mapping_type == '3D':
        paramset.update(
            texture.luxrender_texture.luxrender_tex_transform.get_paramset(
                scene))
    else:
        paramset.update(
            texture.luxrender_texture.luxrender_tex_mapping.get_paramset(
                scene))

    return variant, lux_tex_name, paramset
예제 #12
0
def convert_texture(scene, texture, variant_hint=None):
	# Lux only supports blender's textures in float variant (except for image/ocean (both of these are exported as imagemap)
	variant = 'float'
	paramset = ParamSet()
	
	lux_tex_name = 'blender_%s' % texture.type.lower()
	
	mapping_type = '3D'
	
	if texture.type not in ('IMAGE', 'OCEAN'):
		paramset.add_float('bright', texture.intensity)
		paramset.add_float('contrast', texture.contrast)
	
	if texture.type == 'BLEND':
		progression_map = {
			'LINEAR':			'lin',
			'QUADRATIC':		'quad',
			'EASING':			'ease',
			'DIAGONAL':			'diag',
			'SPHERICAL':		'sphere',
			'QUADRATIC_SPHERE':	'halo',
			'RADIAL':			'radial',
		}
		paramset.add_bool('flipxy', texture.use_flip_axis) \
				.add_string('type', progression_map[texture.progression])
	
	if texture.type == 'CLOUDS':
		paramset.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_integer('noisedepth', texture.noise_depth)
	
	if texture.type == 'DISTORTED_NOISE':
		lux_tex_name = 'blender_distortednoise'
		paramset.add_string('type', texture.noise_distortion.lower()) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('distamount', texture.distortion) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('nabla', texture.nabla)
	
	if texture.type == 'MAGIC':
		paramset.add_integer('noisedepth', texture.noise_depth) \
				.add_float('turbulence', texture.turbulence)
	
	if texture.type == 'MARBLE':
		paramset.add_string('type', texture.marble_type.lower() ) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_string('noisebasis2', texture.noise_basis_2.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('turbulence', texture.turbulence) \
				.add_integer('noisedepth', texture.noise_depth)
	
	if texture.type == 'MUSGRAVE':
		paramset.add_string('type', texture.musgrave_type.lower() ) \
				.add_float('h', texture.dimension_max) \
				.add_float('lacu', texture.lacunarity) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('octs', texture.octaves)
	
	# NOISE shows no params ?
	
	if texture.type == 'STUCCI':
		paramset.add_string('type', texture.stucci_type.lower() ) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('turbulence', texture.turbulence)
	
	if texture.type == 'VORONOI':
		distancem_map = {
			'DISTANCE': 'actual_distance',
			'DISTANCE_SQUARED': 'distance_squared',
			'MANHATTAN': 'manhattan',
			'CHEBYCHEV': 'chebychev',
			'MINKOVSKY_HALF': 'minkovsky_half',
			'MINKOVSKY_FOUR': 'minkovsky_four',
			'MINKOVSKY': 'minkovsky'
		}
		paramset.add_string('distmetric', distancem_map[texture.distance_metric]) \
				.add_float('minkovsky_exp', texture.minkovsky_exponent) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('nabla', texture.nabla) \
				.add_float('w1', texture.weight_1) \
				.add_float('w2', texture.weight_2) \
				.add_float('w3', texture.weight_3) \
				.add_float('w4', texture.weight_4)
	
	if texture.type == 'WOOD':
		paramset.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_string('noisebasis2', texture.noise_basis_2.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_float('turbulence', texture.turbulence) \
				.add_string('type', texture.wood_type.lower() )
	
	# Translate Blender Image/movie into lux tex
	if texture.type == 'IMAGE' and texture.image and texture.image.source in ['GENERATED', 'FILE']:
		
		extract_path = os.path.join(
			efutil.scene_filename(),
			bpy.path.clean_name(scene.name),
			'%05d' % scene.frame_current
		)
		
		if texture.image.source == 'GENERATED':
			tex_image = 'luxblend_baked_image_%s.%s' % (bpy.path.clean_name(texture.name), scene.render.image_settings.file_format)
			tex_image = os.path.join(extract_path, tex_image)
			texture.image.save_render(tex_image, scene)
		
		if texture.image.source == 'FILE':
			if texture.image.packed_file:
				tex_image = 'luxblend_extracted_image_%s.%s' % (bpy.path.clean_name(texture.name), scene.render.image_settings.file_format)
				tex_image = os.path.join(extract_path, tex_image)
				texture.image.save_render(tex_image, scene)
			else:
				if texture.library is not None:
					f_path = efutil.filesystem_path(bpy.path.abspath( texture.image.filepath, texture.library.filepath))
				else:
					f_path = efutil.filesystem_path(texture.image.filepath)
				if not os.path.exists(f_path):
					raise Exception('Image referenced in blender texture %s doesn\'t exist: %s' % (texture.name, f_path))
				tex_image = efutil.filesystem_path(f_path)
		
		lux_tex_name = 'imagemap'
		if variant_hint:
			variant = variant_hint
		else:
			variant = 'color'
		paramset.add_string('filename', tex_image)
		paramset.add_float('gamma', 2.2)
		mapping_type = '2D'
	
	# Similar to image handler, but for Ocean tex
	if texture.type == 'OCEAN':
		if texture.ocean.output == 'FOAM':

			ocean_mods = [m for m in texture.ocean.ocean_object.modifiers if m.type == 'OCEAN']
			if len(ocean_mods) == 0:
				print ('No ocean modifiers!')
			else:
				ocean_mod = ocean_mods[0]
			
			if texture.ocean.output == 'FOAM':
				tex_image = efutil.filesystem_path(os.path.join(ocean_mod.filepath, 'foam_%04d.exr' % scene.frame_current)) 
			#SOON! (until 3D disp support...)
			#elif texture.ocean.output == 'DISPLACEMENT':
				#tex_image = os.path.join(ocean_mod.filepath, 'disp_%04d.exr' % scene.frame_current) 
			
			lux_tex_name = 'imagemap'
			if variant_hint:
				variant = variant_hint
			else:
				variant = 'color'
			paramset.add_string('filename', tex_image)
			paramset.add_float('gamma', 1.0)
			mapping_type = '2D'
			
		else:
			lux_tex_name = 'constant'
		
	if mapping_type == '3D':
		paramset.update( texture.luxrender_texture.luxrender_tex_transform.get_paramset(scene) )
	else:
		paramset.update( texture.luxrender_texture.luxrender_tex_mapping.get_paramset(scene) )
	
	return variant, lux_tex_name, paramset
예제 #13
0
	def handler_Duplis_PATH(self, obj, *args, **kwargs):
		if not 'particle_system' in kwargs.keys():
			LuxLog('ERROR: handler_Duplis_PATH called without particle_system')
			return
		
		psys = kwargs['particle_system']
		
		if not psys.settings.type == 'HAIR':
			LuxLog('ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")' % psys.name)
			return
	
		if bpy.context.scene.luxrender_engine.export_hair == False:
			return
			
		for mod in obj.modifiers:
			if mod.type == 'PARTICLE_SYSTEM':
				if mod.particle_system.name == psys.name:
					break;

		if not (mod.type == 'PARTICLE_SYSTEM'):
			return
		elif not mod.particle_system.name == psys.name or mod.show_render == False:
			return
				
		LuxLog('Exporting Hair system "%s"...' % psys.name)

		size = psys.settings.luxrender_hair.hair_size / 2.0
		psys.set_resolution(self.geometry_scene, obj, 'RENDER')
		steps = 2**psys.settings.render_step
		num_parents = len(psys.particles)
		num_children = len(psys.child_particles)
		if num_children == 0:
			start = 0
		else:
			# Number of virtual parents reduces the number of exported children
			num_virtual_parents = math.trunc(0.3 * psys.settings.virtual_parents * psys.settings.child_nbr * num_parents)
			start = num_parents + num_virtual_parents
		
		partsys_name = '%s_%s'%(obj.name, psys.name)
		det = DupliExportProgressThread()
		det.start(num_parents + num_children)

		if psys.settings.luxrender_hair.use_binary_output:
			# Put HAIR_FILES files in frame-numbered subfolders to avoid
			# clobbering when rendering animations
			sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
			if not os.path.exists( sc_fr ):
				os.makedirs(sc_fr)
					
			hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
			hair_file_path = '/'.join([sc_fr, hair_filename])

			segments = []
			points = []
			thickness = []
			colors = []
			uv_coords = []
			total_segments_count = 0
			vertex_color_layer = None
			uv_tex = None
			colorflag = 0
			uvflag = 0                      
			image_width = 0
			image_height = 0
			image_pixels = []
			
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			uv_textures = mesh.tessface_uv_textures
			vertex_color =  mesh.tessface_vertex_colors

			if psys.settings.luxrender_hair.export_color == 'vertex_color':
				if vertex_color.active and vertex_color.active.data:
					vertex_color_layer = vertex_color.active.data
					colorflag = 1

			if uv_textures.active and uv_textures.active.data:
				uv_tex = uv_textures.active.data
				if psys.settings.luxrender_hair.export_color == 'uv_texture_map':
					if uv_tex[0].image:
						image_width = uv_tex[0].image.size[0]
						image_height = uv_tex[0].image.size[1]
						image_pixels = uv_tex[0].image.pixels[:]
						colorflag = 1
				uvflag = 1

			info = 'Created by LuxBlend 2.6 exporter for LuxRender - www.luxrender.net'

			transform = obj.matrix_world.inverted()
			total_strand_count = 0	
				
			for pindex in range(start, num_parents + num_children):                        
				det.exported_objects += 1                               
				point_count = 0
				i = 0

				if num_children == 0:
					i = pindex
		
				# A small optimization in order to speedup the export
				# process: cache the uv_co and color value
				uv_co = None
				col = None
				seg_length = 1.0				
				for step in range(0, steps):
					co = psys.co_hair(obj, mod, pindex, step)                               
					if (step > 0): seg_length = (co-obj.matrix_world*points[len(points)-1]).length_squared 
					if not (co.length_squared == 0 or seg_length == 0):
						points.append(transform*co)
						point_count = point_count + 1

						if uvflag:
							if not uv_co:
								uv_co = psys.uv_on_emitter(mod, psys.particles[i], pindex, uv_textures.active_index)
							uv_coords.append(uv_co)

						if psys.settings.luxrender_hair.export_color == 'uv_texture_map' and not len(image_pixels) == 0:
							if not col:
								x_co = round(uv_co[0] * (image_width - 1))
								y_co = round(uv_co[1] * (image_height - 1))
							
								pixelnumber = (image_width * y_co) + x_co
							
								r = image_pixels[pixelnumber*4]
								g = image_pixels[pixelnumber*4+1]
								b = image_pixels[pixelnumber*4+2]
								col = (r,g,b)
							colors.append(col)
						elif psys.settings.luxrender_hair.export_color == 'vertex_color':
							if not col:
								col = psys.mcol_on_emitter(mod, psys.particles[i], pindex, vertex_color.active_index)
							colors.append(col)

				if point_count == 1:
					points.pop()
					point_count = point_count - 1
				elif point_count > 1:
					segments.append(point_count - 1)
					total_strand_count = total_strand_count + 1
					total_segments_count = total_segments_count + point_count - 1
			hair_file_path = efutil.path_relative_to_export(hair_file_path)
			with open(hair_file_path, 'wb') as hair_file:
				## Binary hair file format from
				## http://www.cemyuksel.com/research/hairmodels/
				##
				##File header
				hair_file.write(b'HAIR')        #magic number
				hair_file.write(struct.pack('<I', total_strand_count)) #total strand count
				hair_file.write(struct.pack('<I', len(points))) #total point count 
				hair_file.write(struct.pack('<I', 1+2+16*colorflag+32*uvflag)) #bit array for configuration
				hair_file.write(struct.pack('<I', steps))       #default segments count
				hair_file.write(struct.pack('<f', size*2))      #default thickness
				hair_file.write(struct.pack('<f', 0.0))         #default transparency
				color = (0.65, 0.65, 0.65)
				hair_file.write(struct.pack('<3f', *color))     #default color
				hair_file.write(struct.pack('<88s', info.encode())) #information
				
				##hair data
				hair_file.write(struct.pack('<%dH'%(len(segments)), *segments))
				for point in points:
					hair_file.write(struct.pack('<3f', *point))
				if colorflag:
					for col in colors:
						hair_file.write(struct.pack('<3f', *col))
				if uvflag:
					for uv in uv_coords:
						hair_file.write(struct.pack('<2f', *uv))
					
			LuxLog('Binary hair file written: %s' % (hair_file_path))
			
			hair_mat = obj.material_slots[psys.settings.material - 1].material

			#Shape parameters			
			hair_shape_params = ParamSet()
			
			hair_shape_params.add_string('filename', hair_file_path)
			hair_shape_params.add_string('name', bpy.path.clean_name(partsys_name))
			hair_shape_params.add_point('camerapos', bpy.context.scene.camera.location)
			hair_shape_params.add_string('tesseltype', psys.settings.luxrender_hair.tesseltype)
			hair_shape_params.add_string('acceltype', psys.settings.luxrender_hair.acceltype)
		
			if psys.settings.luxrender_hair.tesseltype in ['ribbonadaptive', 'solidadaptive']:
				hair_shape_params.add_integer('adaptive_maxdepth', psys.settings.luxrender_hair.adaptive_maxdepth)
				hair_shape_params.add_float('adaptive_error', psys.settings.luxrender_hair.adaptive_error)
	
			if psys.settings.luxrender_hair.tesseltype in ['solid', 'solidadaptive']:
				hair_shape_params.add_integer('solid_sidecount', psys.settings.luxrender_hair.solid_sidecount)
				hair_shape_params.add_bool('solid_capbottom', psys.settings.luxrender_hair.solid_capbottom)
				hair_shape_params.add_bool('solid_captop', psys.settings.luxrender_hair.solid_captop)
			
 			# Export shape definition to .LXO file			
			self.lux_context.attributeBegin('hairfile_%s'%partsys_name)
			self.lux_context.transform( matrix_to_list(obj.matrix_world, apply_worldscale=True) )
			self.lux_context.namedMaterial(hair_mat.name)
			self.lux_context.shape('hairfile', hair_shape_params)
			self.lux_context.attributeEnd()
			self.lux_context.set_output_file(Files.MATS)
			mat_export_result = hair_mat.luxrender_material.export(self.visibility_scene, self.lux_context, hair_mat, mode='indirect')
			self.lux_context.set_output_file(Files.GEOM)
	
		else:
			#Old export with cylinder and sphere primitives
			# This should force the strand/junction objects to be instanced
			self.objects_used_as_duplis.add(obj)
			hair_Junction = (
				(
					'HAIR_Junction_%s'%partsys_name,
					psys.settings.material - 1,
					'sphere',
					ParamSet().add_float('radius', size)
				),
			)
			hair_Strand = (
				(
					'HAIR_Strand_%s'%partsys_name,
					psys.settings.material - 1,
					'cylinder',
					ParamSet() \
						.add_float('radius', size) \
						.add_float('zmin', 0.0) \
						.add_float('zmax', 1.0)
				),
			)
		
			for sn, si, st, sp in hair_Junction:
				self.lux_context.objectBegin(sn)
				self.lux_context.shape(st, sp)
				self.lux_context.objectEnd()
		
			for sn, si, st, sp in hair_Strand:
				self.lux_context.objectBegin(sn)
				self.lux_context.shape(st, sp)
				self.lux_context.objectEnd()
				
			for pindex in range(num_parents + num_children):
				det.exported_objects += 1
				points = []

				for step in range(0,steps):
					co = psys.co_hair(obj, mod, pindex, step)
					if not co.length_squared == 0:
						points.append(co)
						
				if psys.settings.use_hair_bspline:
					temp = []
					degree = 2
					dimension = 3
					for i in range(math.trunc(math.pow(2,psys.settings.render_step))):
						if i > 0:
							u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)-0.0000000000001
						else:
							u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)
						temp.append(self.BSpline(points, dimension, degree, u))
					points = temp
			
				for j in range(len(points)-1):
					# transpose SB so we can extract columns
					# TODO - change when matrix.col is available
					SB = obj.matrix_basis.transposed().to_3x3()
					SB = fix_matrix_order(SB) # matrix indexing hack
					v1 = points[j+1] - points[j]
					v2 = SB[2].cross(v1)
					v3 = v1.cross(v2)
					v2.normalize()
					v3.normalize()
					if any(v.length_squared == 0 for v in (v1, v2, v3)):
						#Use standard basis and scale according to segment length
						M = SB
						v = v1+v2+v3
						scale = v.length
						v.normalize()						
						M = mathutils.Matrix.Scale(abs(scale),3,v)*M						
					else:
						# v1, v2, v3 are the new columns
						# set as rows, transpose later
						M = mathutils.Matrix( (v3,v2,v1) )
						M = fix_matrix_order(M) # matrix indexing hack
					M = M.transposed().to_4x4()
					
					Mtrans = mathutils.Matrix.Translation(points[j])
					matrix = Mtrans * M
				
					self.exportShapeInstances(
						obj,
						hair_Strand,
						matrix=[matrix,None]
					)
					
					self.exportShapeInstances(
						obj,
						hair_Junction,
						matrix=[Mtrans,None]
					)

				matrix = mathutils.Matrix.Translation(points[len(points)-1])
				self.exportShapeInstances(
					obj,
					hair_Junction,
					matrix=[matrix,None]
				)
		
		psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
		det.stop()
		det.join()
		
		LuxLog('... done, exported %s hairs' % det.exported_objects)
예제 #14
0
    def render(self, scene):
        '''
		scene:	bpy.types.Scene
		
		Export the given scene to LuxRender.
		Choose from one of several methods depending on what needs to be rendered.
		
		Returns None
		'''

        with RENDERENGINE_luxrender.render_lock:  # just render one thing at a time
            prev_cwd = os.getcwd()
            try:
                self.LuxManager = None
                self.render_update_timer = None
                self.output_dir = efutil.temp_directory()
                self.output_file = 'default.png'

                if scene is None:
                    LuxLog('ERROR: Scene to render is not valid')
                    return

                if scene.name == 'preview':
                    self.render_preview(scene)
                    return

                if scene.display_settings.display_device != "sRGB":
                    LuxLog(
                        'WARNING: Colour Management not set to sRGB, render results may look too dark.'
                    )

                api_type, write_files = self.set_export_path(scene)

                os.chdir(efutil.export_path)

                is_animation = hasattr(self,
                                       'is_animation') and self.is_animation
                make_queue = scene.luxrender_engine.export_type == 'EXT' and scene.luxrender_engine.binary_name == 'luxrender' and write_files

                if is_animation and make_queue:
                    queue_file = efutil.export_path + '%s.%s.lxq' % (
                        efutil.scene_filename(), bpy.path.clean_name(
                            scene.name))

                    # Open/reset a queue file
                    if scene.frame_current == scene.frame_start:
                        open(queue_file, 'w').close()

                    if hasattr(self, 'update_progress'):
                        fr = scene.frame_end - scene.frame_start
                        fo = scene.frame_current - scene.frame_start
                        self.update_progress(fo / fr)

                exported_file = self.export_scene(scene)
                if exported_file == False:
                    return  # Export frame failed, abort rendering

                if is_animation and make_queue:
                    self.LuxManager = LuxManager.GetActive()
                    self.LuxManager.lux_context.worldEnd()
                    with open(queue_file, 'a') as qf:
                        qf.write("%s\n" % exported_file)

                    if scene.frame_current == scene.frame_end:
                        # run the queue
                        self.render_queue(scene, queue_file)
                else:
                    self.render_start(scene)

            except Exception as err:
                LuxLog('%s' % err)
                self.report({'ERROR'}, '%s' % err)

            os.chdir(prev_cwd)
예제 #15
0
파일: Base.py 프로젝트: Zurkdahool/blendigo
 def get_channel(self, property_group, channel_name, channel_prop_name):
     d = {}
     
     channel_type = getattr(property_group, channel_prop_name + '_type')
     
     if channel_type == 'spectrum':
         spectrum_type = getattr(property_group, channel_prop_name + '_SP_type')
         if spectrum_type == 'rgb':
             d[channel_name] = {
                 'constant': rgb([i for i in getattr(property_group, channel_prop_name + '_SP_rgb') * getattr(property_group, channel_prop_name + '_SP_rgb_gain', 1.0)])
             }
         elif spectrum_type == 'uniform':
             d[channel_name] = {
                 'constant': uniform([
                     getattr(property_group, channel_prop_name + '_SP_uniform_val') * \
                     10**getattr(property_group, channel_prop_name + '_SP_uniform_exp')
                 ])
             }
         elif spectrum_type == 'blackbody':
             d[channel_name] = {
                 'constant': blackbody(
                     [getattr(property_group, channel_prop_name + '_SP_blackbody_temp')],
                     [getattr(property_group, channel_prop_name + '_SP_blackbody_gain')]
                 )
             }
     
     elif channel_type == 'texture':
         tex_name = getattr(property_group, channel_prop_name + '_TX_texture')
         
         if tex_name: # string is not empty
             if channel_prop_name not in self.found_texture_indices:
                 self.found_texture_indices.append(channel_prop_name)
                 
                 if not tex_name in bpy.data.textures:
                     raise Exception("Texture \"%s\" assigned to material \"%s\" doesn't exist!" %(tex_name, self.material_name))
                 
                 tex_property_group = bpy.data.textures[tex_name].indigo_texture
                 
                 if tex_property_group.image_ref == 'file':
                     relative_texture_path = efutil.path_relative_to_export(
                         getattr(tex_property_group, 'path')
                     )
                 elif tex_property_group.image_ref == 'blender':
                     if not tex_property_group.image in bpy.data.images:
                         raise Exception("Error with image reference on texture \"%s\"" % tex_name)
                     
                     img = bpy.data.images[tex_property_group.image]
                     
                     if img.filepath == '':
                         bl_img_path = 'blendigo_extracted_image_%s.png' % bpy.path.clean_name(tex_name)
                     else:
                         bl_img_path = img.filepath
                     
                     if img.source != 'FILE' or img.packed_file:
                         bl_file_formatted = os.path.splitext(os.path.basename(bl_img_path))[0]
                         bl_file_formatted = '%s.%s' % (bl_file_formatted, self.scene.render.image_settings.file_format)
                         bl_img_path = os.path.join(
                             efutil.export_path,
                             efutil.scene_filename(),
                             bpy.path.clean_name(self.scene.name),
                             '%05d' % self.scene.frame_current,
                             bl_file_formatted
                         )
                         img.save_render(bl_img_path, self.scene)
                     
                     relative_texture_path = efutil.path_relative_to_export(bl_img_path)
                 
                 if not getattr(property_group, channel_prop_name + '_TX_abc_from_tex'):
                     abc_property_group = property_group
                     abc_prefix = channel_prop_name + '_TX_'
                 else:
                     abc_property_group = tex_property_group
                     abc_prefix = ''
                 
                 uv_set_name  = getattr(property_group, channel_prop_name + '_TX_uvset')
                 try:
                     uv_set_index = self.obj.data.uv_textures.keys().index(uv_set_name)
                 except:
                     uv_set_index = 0
                 
                 self.found_textures.append({
                     'uv_set_index':    [uv_set_index], #getattr(property_group, channel_prop_name + '_TX_uv_index')],
                     'path':            [relative_texture_path],
                     'exponent':        [getattr(tex_property_group, 'gamma')],
                     'a':            [getattr(abc_property_group, abc_prefix + 'A')],
                     'b':            [getattr(abc_property_group, abc_prefix + 'B')],
                     'c':            [getattr(abc_property_group, abc_prefix + 'C')],
                     'smooth':        [str(getattr(property_group, channel_prop_name + '_TX_smooth')).lower()]
                 })
             
             d[channel_name] = {
                 'texture': {
                     'texture_index': [ self.found_texture_indices.index(channel_prop_name) ],
                 }
             }
     
     elif channel_type == 'shader':
         try:
             shader_name = getattr(property_group, channel_prop_name + '_SH_text')
             if not shader_name in bpy.data.texts:
                 raise Exception('Referenced Text "%s" for shader on material "%s" not found' % (shader_name, self.material_name))
             
             shader_text = '\n' + bpy.data.texts[shader_name].as_string()
             d[channel_name] = {
                 'shader': {
                     'shader': xml_cdata(shader_text)
                 }
             }
         except:
             pass
     
     return d
예제 #16
0
    def handler_Duplis_PATH(self, obj, *args, **kwargs):
        if not 'particle_system' in kwargs.keys():
            MtsLog('ERROR: handler_Duplis_PATH called without particle_system')
            return

        psys = kwargs['particle_system']

        if not psys.settings.type == 'HAIR':
            MtsLog(
                'ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")'
                % psys.name)
            return

        for mod in obj.modifiers:
            if mod.type == 'PARTICLE_SYSTEM' and mod.show_render == False:
                return

        MtsLog('Exporting Hair system "%s"...' % psys.name)

        size = psys.settings.particle_size / 2.0 / 1000.0
        psys.set_resolution(self.geometry_scene, obj, 'RENDER')
        steps = 2**psys.settings.render_step
        num_parents = len(psys.particles)
        num_children = len(psys.child_particles)

        partsys_name = '%s_%s' % (obj.name, psys.name)
        det = DupliExportProgressThread()
        det.start(num_parents + num_children)

        # Put Hair files in frame-numbered subfolders to avoid
        # clobbering when rendering animations
        sc_fr = '%s/%s/%s/%05d' % (
            self.mts_context.meshes_dir, efutil.scene_filename(),
            bpy.path.clean_name(
                self.geometry_scene.name), self.visibility_scene.frame_current)
        if not os.path.exists(sc_fr):
            os.makedirs(sc_fr)

        hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
        hair_file_path = '/'.join([sc_fr, hair_filename])

        shape_params = ParamSet().add_string(
            'filename',
            efutil.path_relative_to_export(hair_file_path)).add_float(
                'radius', size)
        mesh_definitions = []
        mesh_definition = (psys.name, psys.settings.material - 1, 'hair',
                           shape_params)
        mesh_definitions.append(mesh_definition)
        self.exportShapeInstances(obj, mesh_definitions)

        hair_file = open(hair_file_path, 'w')

        transform = obj.matrix_world.inverted()
        for pindex in range(num_parents + num_children):
            det.exported_objects += 1
            points = []

            for step in range(0, steps + 1):
                co = psys.co_hair(obj, mod, pindex, step)
                if not co.length_squared == 0:
                    points.append(transform * co)

            if psys.settings.use_hair_bspline:
                temp = []
                degree = 2
                dimension = 3
                for i in range(
                        math.trunc(math.pow(2, psys.settings.render_step))):
                    if i > 0:
                        u = i * (len(points) - degree) / math.trunc(
                            math.pow(2, psys.settings.render_step) -
                            1) - 0.0000000000001
                    else:
                        u = i * (len(points) - degree) / math.trunc(
                            math.pow(2, psys.settings.render_step) - 1)
                    temp.append(self.BSpline(points, dimension, degree, u))
                points = temp

            for p in points:
                hair_file.write('%f %f %f\n' % (p[0], p[1], p[2]))

            hair_file.write('\n')

        hair_file.close()

        psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
        det.stop()
        det.join()

        MtsLog('... done, exported %s hairs' % det.exported_objects)
예제 #17
0
    def buildNativeMesh(self, obj):
        """
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a serialized mesh
		file for Mitsuba.
		"""

        try:
            mesh_definitions = []
            mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
            if mesh is None:
                raise UnexportableObjectException(
                    'Cannot create render/export mesh')

            # collate faces by mat index
            ffaces_mats = {}
            mesh_faces = mesh.tessfaces
            for f in mesh_faces:
                mi = f.material_index
                if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
                ffaces_mats[mi].append(f)
            material_indices = ffaces_mats.keys()

            if len(mesh.materials) > 0 and mesh.materials[0] != None:
                mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
            else:
                mats = [(0, None)]

            for i, mat in mats:
                try:
                    if i not in material_indices: continue

                    # If this mesh/mat-index combo has already been processed, get it from the cache
                    mesh_cache_key = (self.geometry_scene, obj.data, i)
                    if self.allow_instancing(obj) and self.ExportedMeshes.have(
                            mesh_cache_key):
                        mesh_definitions.append(
                            self.ExportedMeshes.get(mesh_cache_key))
                        continue

                    # Put Serialized files in frame-numbered subfolders to avoid
                    # clobbering when rendering animations
                    #sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
                    sc_fr = '%s/%s/%s/%05d' % (
                        self.mts_context.meshes_dir, efutil.scene_filename(),
                        bpy.path.clean_name(self.geometry_scene.name),
                        self.visibility_scene.frame_current)
                    if not os.path.exists(sc_fr):
                        os.makedirs(sc_fr)

                    def make_serfilename():
                        ser_serial = self.ExportedSERs.serial(mesh_cache_key)
                        mesh_name = '%s_%04d_m%03d' % (obj.data.name,
                                                       ser_serial, i)
                        ser_filename = '%s.serialized' % bpy.path.clean_name(
                            mesh_name)
                        ser_path = '/'.join([sc_fr, ser_filename])
                        return mesh_name, ser_path

                    mesh_name, ser_path = make_serfilename()

                    # Ensure that all Serialized files have unique names
                    while self.ExportedSERs.have(ser_path):
                        mesh_name, ser_path = make_serfilename()

                    self.ExportedSERs.add(ser_path, None)

                    # skip writing the Serialized file if the box is checked
                    skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
                    if not os.path.exists(ser_path) or not (
                            self.visibility_scene.mitsuba_engine.partial_export
                            and skip_exporting):

                        GeometryExporter.NewExportedObjects.add(obj)

                        uv_textures = mesh.tessface_uv_textures
                        if len(uv_textures) > 0:
                            if uv_textures.active and uv_textures.active.data:
                                uv_layer = uv_textures.active.data
                        else:
                            uv_layer = None

                        # Export data
                        points = array.array('d', [])
                        normals = array.array('d', [])
                        uvs = array.array('d', [])
                        ntris = 0
                        face_vert_indices = array.array(
                            'I', [])  # list of face vert indices

                        # Caches
                        vert_vno_indices = {
                        }  # mapping of vert index to exported vert index for verts with vert normals
                        vert_use_vno = set(
                        )  # Set of vert indices that use vert normals

                        vert_index = 0  # exported vert index
                        for face in ffaces_mats[i]:
                            fvi = []
                            for j, vertex in enumerate(face.vertices):
                                v = mesh.vertices[vertex]

                                if uv_layer:
                                    # Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
                                    uv_coord = (uv_layer[face.index].uv[j][0],
                                                1.0 -
                                                uv_layer[face.index].uv[j][1])

                                if face.use_smooth:

                                    if uv_layer:
                                        vert_data = (v.co[:], v.normal[:],
                                                     uv_coord)
                                    else:
                                        vert_data = (v.co[:], v.normal[:],
                                                     tuple())

                                    if vert_data not in vert_use_vno:
                                        vert_use_vno.add(vert_data)

                                        points.extend(vert_data[0])
                                        normals.extend(vert_data[1])
                                        uvs.extend(vert_data[2])

                                        vert_vno_indices[
                                            vert_data] = vert_index
                                        fvi.append(vert_index)

                                        vert_index += 1
                                    else:
                                        fvi.append(vert_vno_indices[vert_data])

                                else:
                                    # all face-vert-co-no are unique, we cannot
                                    # cache them
                                    points.extend(v.co[:])
                                    normals.extend(face.normal[:])
                                    if uv_layer: uvs.extend(uv_coord)

                                    fvi.append(vert_index)

                                    vert_index += 1

                            # For Mitsuba, we need to triangulate quad faces
                            face_vert_indices.extend(fvi[0:3])
                            ntris += 3
                            if len(fvi) == 4:
                                face_vert_indices.extend(
                                    (fvi[0], fvi[2], fvi[3]))
                                ntris += 3

                        del vert_vno_indices
                        del vert_use_vno

                        with open(ser_path, 'wb') as ser:
                            # create mesh flags
                            flags = 0
                            # turn on double precision
                            flags = flags | 0x2000
                            # turn on vertex normals
                            flags = flags | 0x0001
                            # turn on uv layer
                            if uv_layer:
                                flags = flags | 0x0002

                            # begin serialized mesh data
                            ser.write(struct.pack('<HH', 0x041C, 0x0004))

                            # encode serialized mesh
                            encoder = zlib.compressobj()
                            ser.write(
                                encoder.compress(struct.pack('<I', flags)))
                            ser.write(
                                encoder.compress(
                                    bytes(mesh_name + "_serialized\0",
                                          'latin-1')))
                            ser.write(
                                encoder.compress(
                                    struct.pack('<QQ', vert_index,
                                                int(ntris / 3))))
                            ser.write(encoder.compress(points.tostring()))
                            ser.write(encoder.compress(normals.tostring()))
                            if uv_layer:
                                ser.write(encoder.compress(uvs.tostring()))
                            ser.write(
                                encoder.compress(face_vert_indices.tostring()))
                            ser.write(encoder.flush())

                            ser.write(struct.pack('<Q', 0))
                            ser.write(struct.pack('<I', 1))
                            ser.close()

                        MtsLog('Binary Serialized file written: %s' %
                               (ser_path))
                    else:
                        MtsLog(
                            'Skipping already exported Serialized mesh: %s' %
                            mesh_name)

                    shape_params = ParamSet().add_string(
                        'filename', efutil.path_relative_to_export(ser_path))
                    if obj.data.mitsuba_mesh.normals == 'facenormals':
                        shape_params.add_boolean('faceNormals',
                                                 {'value': 'true'})

                    mesh_definition = (mesh_name, i, 'serialized',
                                       shape_params)
                    # Only export Shapegroup and cache this mesh_definition if we plan to use instancing
                    if self.allow_instancing(
                            obj) and self.exportShapeDefinition(
                                obj, mesh_definition):
                        shape_params = ParamSet().add_reference(
                            'id', '', mesh_name + '-shapegroup_%i' % (i))

                        mesh_definition = (mesh_name, i, 'instance',
                                           shape_params)
                        self.ExportedMeshes.add(mesh_cache_key,
                                                mesh_definition)

                    mesh_definitions.append(mesh_definition)

                except InvalidGeometryException as err:
                    MtsLog('Mesh export failed, skipping this mesh: %s' % err)

            del ffaces_mats
            bpy.data.meshes.remove(mesh)

        except UnexportableObjectException as err:
            MtsLog('Object export failed, skipping this object: %s' % err)

        return mesh_definitions
예제 #18
0
def get_output_filename(scene):
    return '%s.%s.%05d' % (efutil.scene_filename(),
                           bpy.path.clean_name(
                               scene.name), scene.frame_current)
예제 #19
0
    def render(self, scene):
        if self is None or scene is None:
            MtsLog('ERROR: Scene is missing!')
            return
        if scene.mitsuba_engine.binary_path == '':
            MtsLog('ERROR: The binary path is unspecified!')
            return

        with self.render_lock:  # just render one thing at a time
            if scene.name == 'preview':
                self.render_preview(scene)
                return

            config_updates = {}
            binary_path = os.path.abspath(
                efutil.filesystem_path(scene.mitsuba_engine.binary_path))
            if os.path.isdir(binary_path) and os.path.exists(binary_path):
                config_updates['binary_path'] = binary_path

            try:
                for k, v in config_updates.items():
                    efutil.write_config_value('mitsuba', 'defaults', k, v)
            except Exception as err:
                MtsLog(
                    'WARNING: Saving Mitsuba configuration failed, please set your user scripts dir: %s'
                    % err)

            scene_path = efutil.filesystem_path(scene.render.filepath)
            if os.path.isdir(scene_path):
                output_dir = scene_path
            else:
                output_dir = os.path.dirname(scene_path)

            MtsLog('MtsBlend: Current directory = "%s"' % output_dir)
            output_basename = efutil.scene_filename() + '.%s.%05i' % (
                scene.name, scene.frame_current)

            result = SceneExporter(
                directory=output_dir,
                filename=output_basename,
            ).export(scene)

            if not result:
                MtsLog(
                    'Error while exporting -- check the console for details.')
                return

            if scene.mitsuba_engine.export_mode == 'render':

                MtsLog("MtsBlend: Launching renderer ..")
                if scene.mitsuba_engine.render_mode == 'gui':
                    MtsLaunch(scene.mitsuba_engine.binary_path, output_dir,
                              ['mtsgui', efutil.export_path])
                elif scene.mitsuba_engine.render_mode == 'cli':
                    output_file = efutil.export_path[:
                                                     -4] + "." + scene.camera.data.mitsuba_film.fileExtension
                    mitsuba_process = MtsLaunch(
                        scene.mitsuba_engine.binary_path, output_dir, [
                            'mitsuba', '-r',
                            str(scene.mitsuba_engine.refresh_interval), '-o',
                            output_file, efutil.export_path
                        ])
                    framebuffer_thread = MtsFilmDisplay()
                    framebuffer_thread.set_kick_period(
                        scene.mitsuba_engine.refresh_interval)
                    framebuffer_thread.begin(self, output_file,
                                             resolution(scene))
                    render_update_timer = None
                    while mitsuba_process.poll(
                    ) == None and not self.test_break():
                        render_update_timer = threading.Timer(
                            1, self.process_wait_timer)
                        render_update_timer.start()
                        if render_update_timer.isAlive():
                            render_update_timer.join()

                    # If we exit the wait loop (user cancelled) and mitsuba is still running, then send SIGINT
                    if mitsuba_process.poll() == None:
                        # Use SIGTERM because that's the only one supported on Windows
                        mitsuba_process.send_signal(subprocess.signal.SIGTERM)

                    # Stop updating the render result and load the final image
                    framebuffer_thread.stop()
                    framebuffer_thread.join()

                    if mitsuba_process.poll(
                    ) != None and mitsuba_process.returncode != 0:
                        MtsLog(
                            "MtsBlend: Rendering failed -- check the console")
                    else:
                        framebuffer_thread.kick(render_end=True)
                    framebuffer_thread.shutdown()
예제 #20
0
	def render(self, scene):
		if self is None or scene is None:
			MtsLog('ERROR: Scene is missing!')
			return
		if scene.mitsuba_engine.binary_path == '':
			MtsLog('ERROR: The binary path is unspecified!')
			return
		
		with self.render_lock:	# just render one thing at a time
			if scene.name == 'preview':
				self.render_preview(scene)
				return
			
			config_updates = {}
			binary_path = os.path.abspath(efutil.filesystem_path(scene.mitsuba_engine.binary_path))
			if os.path.isdir(binary_path) and os.path.exists(binary_path):
				config_updates['binary_path'] = binary_path
			
			try:
				for k, v in config_updates.items():
					efutil.write_config_value('mitsuba', 'defaults', k, v)
			except Exception as err:
				MtsLog('WARNING: Saving Mitsuba configuration failed, please set your user scripts dir: %s' % err)
			
			scene_path = efutil.filesystem_path(scene.render.filepath)
			if os.path.isdir(scene_path):
				output_dir = scene_path
			else:
				output_dir = os.path.dirname(scene_path)		
			
			MtsLog('MtsBlend: Current directory = "%s"' % output_dir)
			output_basename = efutil.scene_filename() + '.%s.%05i' % (scene.name, scene.frame_current)
			
			result = SceneExporter(
				directory = output_dir,
				filename = output_basename,
			).export(scene)
			
			if not result:
				MtsLog('Error while exporting -- check the console for details.')
				return
			
			if scene.mitsuba_engine.export_mode == 'render':
				
				MtsLog("MtsBlend: Launching renderer ..")
				if scene.mitsuba_engine.render_mode == 'gui':
					MtsLaunch(scene.mitsuba_engine.binary_path, output_dir,
						['mtsgui', efutil.export_path])
				elif scene.mitsuba_engine.render_mode == 'cli':
					output_file = efutil.export_path[:-4] + "." + scene.camera.data.mitsuba_film.fileExtension
					mitsuba_process = MtsLaunch(scene.mitsuba_engine.binary_path, output_dir,
						['mitsuba', '-r', str(scene.mitsuba_engine.refresh_interval),
							'-o', output_file, efutil.export_path]
					)
					framebuffer_thread = MtsFilmDisplay()
					framebuffer_thread.set_kick_period(scene.mitsuba_engine.refresh_interval) 
					framebuffer_thread.begin(self, output_file, resolution(scene))
					render_update_timer = None
					while mitsuba_process.poll() == None and not self.test_break():
						render_update_timer = threading.Timer(1, self.process_wait_timer)
						render_update_timer.start()
						if render_update_timer.isAlive(): render_update_timer.join()
					
					# If we exit the wait loop (user cancelled) and mitsuba is still running, then send SIGINT
					if mitsuba_process.poll() == None:
						# Use SIGTERM because that's the only one supported on Windows
						mitsuba_process.send_signal(subprocess.signal.SIGTERM)
					
					# Stop updating the render result and load the final image
					framebuffer_thread.stop()
					framebuffer_thread.join()
					
					if mitsuba_process.poll() != None and mitsuba_process.returncode != 0:
						MtsLog("MtsBlend: Rendering failed -- check the console")
					else:
						framebuffer_thread.kick(render_end=True)
					framebuffer_thread.shutdown()
예제 #21
0
    def execute(self, master_scene):
        try:
            if master_scene is None:
                #indigo_log('Scene context is invalid')
                raise Exception('Scene context is invalid')
            
            #------------------------------------------------------------------------------
            # Init stats
            if self.verbose: indigo_log('Indigo export started ...')
            export_start_time = time.time()
            
            igs_filename = self.check_output_path(self.properties.directory)
            export_scenes = [master_scene.background_set, master_scene]
            
            if self.verbose: indigo_log('Export render settings')
            
            #------------------------------------------------------------------------------
            # Start with render settings, this also creates the root <scene>
            self.scene_xml = master_scene.indigo_engine.build_xml_element(master_scene)
            
            # Export background light if no light exists.
            self.export_default_background_light(export_scenes)
            
            #------------------------------------------------------------------------------
            # Tonemapping
            self.export_tonemapping(master_scene)
            
            #------------------------------------------------------------------------------
            # Materials - always export the default clay material and a null material
            self.export_default_materials(master_scene)
            
            # Initialise values used for motion blur export.
            fps = master_scene.render.fps / master_scene.render.fps_base
            start_frame = master_scene.frame_current
            exposure = 1 / master_scene.camera.data.indigo_camera.exposure
            camera = (master_scene.camera, [])
            
            # Make a relative igs and mesh dir path like "TheAnimation/00002"
            rel_mesh_dir = efutil.scene_filename()
            rel_frame_dir = '%s/%05i' % (rel_mesh_dir, start_frame) #bpy.path.clean_name(master_scene.name), 
            mesh_dir = '/'.join([efutil.export_path, rel_mesh_dir])
            frame_dir = '/'.join([efutil.export_path, rel_frame_dir])
            
            # Initialise GeometryExporter.
            geometry_exporter = geometry.GeometryExporter()
            geometry_exporter.mesh_dir = mesh_dir
            geometry_exporter.rel_mesh_dir = rel_mesh_dir
            geometry_exporter.skip_existing_meshes = master_scene.indigo_engine.skip_existing_meshes
            geometry_exporter.verbose = self.verbose
            
            # Make frame_dir directory if it does not exist yet.
            if not os.path.exists(frame_dir):
                os.makedirs(frame_dir)
            
            if master_scene.indigo_engine.motionblur:
                # When motion blur is on, calculate the number of frames covered by the exposure time
                start_time = start_frame / fps
                end_time = start_time + exposure
                end_frame = math.ceil(end_time * fps)
                
                # end_frame + 1 because range is max excl
                frame_list = [x for x in range(start_frame, end_frame+1)]
            else:
                frame_list = [start_frame]
                
            #indigo_log('frame_list: %s'%frame_list)
            
            #------------------------------------------------------------------------------
            # Process all objects in all frames in all scenes.
            for cur_frame in frame_list:
                # Calculate normalised time for keyframes.
                normalised_time = (cur_frame - start_frame) / fps / exposure
                if self.verbose: indigo_log('Processing frame: %i time: %f'%(cur_frame, normalised_time))
                
                geometry_exporter.normalised_time = normalised_time
                bpy.context.scene.frame_set(cur_frame, 0.0)

                # Add Camera matrix.
                camera[1].append((normalised_time, camera[0].matrix_world.copy()))
            
                for ex_scene in export_scenes:
                    if ex_scene is None: continue
                    
                    if self.verbose: indigo_log('Processing objects for scene %s' % ex_scene.name)
                    geometry_exporter.iterateScene(ex_scene)
            
            #------------------------------------------------------------------------------
            # Export camera
            if self.verbose: indigo_log('Exporting camera')
            self.scene_xml.append(
                camera[0].data.indigo_camera.build_xml_element(master_scene, camera[1])
            )
            
            #------------------------------------------------------------------------------
            # Export light layers
            from indigo.export.light_layer import light_layer_xml
            # TODO:
            # light_layer_count was supposed to export correct indices when there
            # is a background_set with emitters on light layers -
            # however, the re-indexing at material export time is non-trivial for
            # now and probably not worth it.
            #light_layer_count = 0
            for ex_scene in export_scenes:
                if ex_scene is None: continue
                
                # Light layer names
                for layer_name, idx in ex_scene.indigo_lightlayers.enumerate().items():
                    if self.verbose: indigo_log('Light layer %i: %s' % (idx, layer_name))
                    self.scene_xml.append(
                        light_layer_xml().build_xml_element(ex_scene, idx, layer_name)
                    )
                    # light_layer_count += 1
            
            if self.verbose: indigo_log('Exporting lamps')
            
            # use special n==1 case due to bug in indigo <sum> material
            num_lamps = len(geometry_exporter.ExportedLamps)
            
            if num_lamps == 1:
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_mat = ET.Element('background_material')
                scene_background_settings.append(scene_background_settings_mat)
                
                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        scene_background_settings_mat.append(xml)
                
                self.scene_xml.append(scene_background_settings)
            
            if num_lamps > 1:
                
                scene_background_settings = ET.Element('background_settings')
                scene_background_settings_fmt = {
                    'background_material': {
                        'material': {
                            'name': ['background_material'],
                            'sum': { 'mat': xml_multichild() }
                        }
                    }
                }
                
                for ck, ci in geometry_exporter.ExportedLamps.items():
                    for xml in ci:
                        self.scene_xml.append(xml)
                    
                    scene_background_settings_fmt['background_material']['material']['sum']['mat'].append({
                        'mat_name': [ck],
                        'weight': {'constant': [1]}
                    })
                scene_background_settings_obj = xml_builder()
                scene_background_settings_obj.build_subelements(None, scene_background_settings_fmt, scene_background_settings)
                self.scene_xml.append(scene_background_settings)
            
            #------------------------------------------------------------------------------
            # Export Medium
            from indigo.export.materials.medium import medium_xml
            # TODO:
            # check if medium is currently used by any material and add 
            # basic medium for SpecularMaterial default

            for ex_scene in export_scenes:
                if ex_scene is None: continue
                
                indigo_material_medium = ex_scene.indigo_material_medium
                medium = indigo_material_medium.medium
                
                if len(indigo_material_medium.medium.items()) == 0 : continue
                
                for medium_name, medium_data in medium.items():
                    
                    medium_index = ex_scene.indigo_material_medium.medium.find(medium_name) # more precise if same name
                    
                    indigo_log('Exporting medium: %s ' % (medium_name))
                    self.scene_xml.append(
                        medium_xml(ex_scene, medium_name, medium_index, medium_data).build_xml_element(ex_scene, medium_name, medium_data)
                    )
                indigo_log('Exporting Medium: %s ' % (medium_name))         
                # TODO: 
                # check for unused medium	
            basic_medium = ET.fromstring("""
                                <medium>
                                   <uid>10200137</uid>
		                             <name>basic</name>
			                           <precedence>10</precedence>
			                             <basic>
				                           <ior>1.5</ior>
				                           <cauchy_b_coeff>0</cauchy_b_coeff>
				                           <max_extinction_coeff>1</max_extinction_coeff>
				                           <absorption_coefficient>
					                         <constant>
						                      <uniform>
							                   <value>0</value>
						                      </uniform>
					                         </constant>
				                           </absorption_coefficient>
			                             </basic>
	                            </medium>   
                         """)
            
            self.scene_xml.append(basic_medium)
            
            #------------------------------------------------------------------------------
            # Export used materials.
            if self.verbose: indigo_log('Exporting used materials')
            material_count = 0
            for ck, ci in geometry_exporter.ExportedMaterials.items():
                for xml in ci:
                    self.scene_xml.append(xml)
                material_count += 1
            if self.verbose: indigo_log('Exported %i materials' % material_count)
            
            # Export used meshes.
            if self.verbose: indigo_log('Exporting meshes')
            mesh_count = 0
            for ck, ci in geometry_exporter.MeshesOnDisk.items():
                mesh_name, xml = ci
                self.scene_xml.append(xml)
                mesh_count += 1
            if self.verbose: indigo_log('Exported %i meshes' % mesh_count)
            
            #------------------------------------------------------------------------------
            # We write object instances to a separate file
            oc = 0
            scene_data_xml = ET.Element('scenedata')
            for ck, ci in geometry_exporter.ExportedObjects.items():
                obj_type = ci[0]
                
                if obj_type == 'OBJECT':
                    obj = ci[1]
                    mesh_name = ci[2]
                    obj_matrices = ci[3]
                    scene = ci[4]
                    
                    xml = geometry.model_object(scene).build_xml_element(obj, mesh_name, obj_matrices)
                else:
                    xml = ci[1]
                    
                scene_data_xml.append(xml)
                oc += 1
            
            objects_file_name = '%s/objects.igs' % (
                frame_dir
            )
            
            objects_file = open(objects_file_name, 'wb')
            ET.ElementTree(element=scene_data_xml).write(objects_file, encoding='utf-8')
            objects_file.close()
            # indigo_log('Exported %i object instances to %s' % (oc,objects_file_name))
            scene_data_include = include.xml_include( efutil.path_relative_to_export(objects_file_name) )
            self.scene_xml.append( scene_data_include.build_xml_element(master_scene) )
            
            #------------------------------------------------------------------------------
            # Write formatted XML for settings, materials and meshes
            out_file = open(igs_filename, 'w')
            xml_str = ET.tostring(self.scene_xml, encoding='utf-8').decode()
            
            # substitute back characters protected from entity encoding in CDATA nodes
            xml_str = xml_str.replace('{_LESSTHAN_}', '<')
            xml_str = xml_str.replace('{_GREATERTHAN_}', '>')
            
            xml_dom = MD.parseString(xml_str)
            xml_dom.writexml(out_file, addindent='\t', newl='\n', encoding='utf-8')
            out_file.close()
            
            #------------------------------------------------------------------------------
            # Print stats
            export_end_time = time.time()
            if self.verbose: indigo_log('Total mesh export time: %f seconds' % (geometry_exporter.total_mesh_export_time))
            indigo_log('Export finished; took %f seconds' % (export_end_time-export_start_time))
            
            # Reset to start_frame.
            if len(frame_list) > 1:
                bpy.context.scene.frame_set(start_frame)
            
            return {'FINISHED'}
        
        except Exception as err:
            indigo_log('%s' % err, message_type='ERROR')
            if os.getenv('B25_OBJECT_ANALYSIS', False):
                raise err
            return {'CANCELLED'}
예제 #22
0
def convert_texture(scene, texture, variant_hint=None):
	# Lux only supports blender's textures in float variant (except for image/ocean, but both of these are exported as imagemap)
	variant = 'float'
	paramset = ParamSet()
	
	lux_tex_name = 'blender_%s' % texture.type.lower()
	
	mapping_type = '3D'
	
	if texture.type not in ('IMAGE', 'OCEAN'):
		paramset.add_float('bright', texture.intensity)
		paramset.add_float('contrast', texture.contrast)
	
	if texture.type == 'BLEND':
		progression_map = {
			'LINEAR':			'lin',
			'QUADRATIC':		'quad',
			'EASING':			'ease',
			'DIAGONAL':			'diag',
			'SPHERICAL':		'sphere',
			'QUADRATIC_SPHERE':	'halo',
			'RADIAL':			'radial',
		}
		paramset.add_bool('flipxy', texture.use_flip_axis) \
				.add_string('type', progression_map[texture.progression])
	
	if texture.type == 'CLOUDS':
		paramset.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_integer('noisedepth', texture.noise_depth)
	
	if texture.type == 'DISTORTED_NOISE':
		lux_tex_name = 'blender_distortednoise'
		paramset.add_string('type', texture.noise_distortion.lower()) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('distamount', texture.distortion) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('nabla', texture.nabla)
	
	if texture.type == 'MAGIC':
		paramset.add_integer('noisedepth', texture.noise_depth) \
				.add_float('turbulence', texture.turbulence)
	
	if texture.type == 'MARBLE':
		paramset.add_string('type', texture.marble_type.lower() ) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_string('noisebasis2', texture.noise_basis_2.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('turbulence', texture.turbulence) \
				.add_integer('noisedepth', texture.noise_depth)
	
	if texture.type == 'MUSGRAVE':
		paramset.add_string('type', texture.musgrave_type.lower() ) \
				.add_float('h', texture.dimension_max) \
				.add_float('lacu', texture.lacunarity) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('octs', texture.octaves)
	
	# NOISE shows no params ?
	
	if texture.type == 'STUCCI':
		paramset.add_string('type', texture.stucci_type.lower() ) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('turbulence', texture.turbulence)
	
	if texture.type == 'VORONOI':
		distancem_map = {
			'DISTANCE': 'actual_distance',
			'DISTANCE_SQUARED': 'distance_squared',
			'MANHATTAN': 'manhattan',
			'CHEBYCHEV': 'chebychev',
			'MINKOVSKY_HALF': 'minkovsky_half',
			'MINKOVSKY_FOUR': 'minkovsky_four',
			'MINKOVSKY': 'minkovsky'
		}
		paramset.add_string('distmetric', distancem_map[texture.distance_metric]) \
				.add_float('minkovsky_exp', texture.minkovsky_exponent) \
				.add_float('noisesize', texture.noise_scale) \
				.add_float('nabla', texture.nabla) \
				.add_float('w1', texture.weight_1) \
				.add_float('w2', texture.weight_2) \
				.add_float('w3', texture.weight_3) \
				.add_float('w4', texture.weight_4)
	
	if texture.type == 'WOOD':
		paramset.add_string('noisebasis', texture.noise_basis.lower() ) \
				.add_string('noisebasis2', texture.noise_basis_2.lower() ) \
				.add_float('noisesize', texture.noise_scale) \
				.add_string('noisetype', texture.noise_type.lower() ) \
				.add_float('turbulence', texture.turbulence) \
				.add_string('type', texture.wood_type.lower() )
	
	# Translate Blender Image/movie into lux tex
	if texture.type == 'IMAGE' and texture.image and texture.image.source in ['GENERATED', 'FILE', 'SEQUENCE']:
		
		extract_path = os.path.join(
			efutil.scene_filename(),
			bpy.path.clean_name(scene.name),
			'%05d' % scene.frame_current
		)
		
		if texture.image.source == 'GENERATED':
			tex_image = 'luxblend_baked_image_%s.%s' % (bpy.path.clean_name(texture.name), scene.render.image_settings.file_format)
			tex_image = os.path.join(extract_path, tex_image)
			texture.image.save_render(tex_image, scene)
		
		if texture.image.source == 'FILE':
			if texture.image.packed_file:
				tex_image = 'luxblend_extracted_image_%s.%s' % (bpy.path.clean_name(texture.name), scene.render.image_settings.file_format)
				tex_image = os.path.join(extract_path, tex_image)
				texture.image.save_render(tex_image, scene)
			else:
				if texture.library is not None:
					f_path = efutil.filesystem_path(bpy.path.abspath( texture.image.filepath, texture.library.filepath))
				else:
					f_path = efutil.filesystem_path(texture.image.filepath)
				if not os.path.exists(f_path):
					raise Exception('Image referenced in blender texture %s doesn\'t exist: %s' % (texture.name, f_path))
				tex_image = efutil.filesystem_path(f_path)

		if texture.image.source == 'SEQUENCE':
			if texture.image.packed_file:
				tex_image = 'luxblend_extracted_image_%s.%s' % (bpy.path.clean_name(texture.name), scene.render.image_settings.file_format)
				tex_image = os.path.join(extract_path, tex_image)
				texture.image.save_render(tex_image, scene)
			else:
				# sequence params from blender
				sequence = bpy.data.textures[(texture.name).replace('.001', '')].image_user # remove tex_preview extension to avoid error
				seqframes = sequence.frame_duration
				seqoffset = sequence.frame_offset
				seqstartframe = sequence.frame_start # the global frame at which the imagesequence starts
				seqcyclic = sequence.use_cyclic
				currentframe = scene.frame_current
				
				if texture.library is not None:
					f_path = efutil.filesystem_path(bpy.path.abspath( texture.image.filepath, texture.library.filepath))
				else:
					f_path = efutil.filesystem_path(texture.image.filepath)

				if currentframe < seqstartframe:
					fnumber = 1 + seqoffset
				else:
					fnumber = currentframe - (seqstartframe-1) + seqoffset

				if fnumber > seqframes:
					if seqcyclic == False:
						fnumber = seqframes
					else:
						fnumber = (currentframe - (seqstartframe-1)) % seqframes
						if fnumber == 0:
							fnumber = seqframes

				import re
				def get_seq_filename(number, f_path):
					m = re.findall(r'(\d+)', f_path)
					if len(m) == 0:
						return "ERR: Can't find pattern"
					
					rightmost_number = m[len(m)-1]
					seq_length = len(rightmost_number)
					
					nstr = "%i" %number
					new_seq_number = nstr.zfill(seq_length)
					
					return f_path.replace(rightmost_number, new_seq_number)

				f_path = get_seq_filename(fnumber, f_path)
				
#				print("-----------------", f_path)

				if not os.path.exists(f_path):
					raise Exception('Image referenced in blender texture %s doesn\'t exist: %s' % (texture.name, f_path))
				tex_image = efutil.filesystem_path(f_path)


		lux_tex_name = 'imagemap'
		sampling = texture.luxrender_texture.luxrender_tex_imagesampling
		if variant_hint:
			variant = variant_hint
		else:
			variant = 'color'
		paramset.add_string('filename', tex_image)
		if variant_hint == float:
			paramset.add_string('channel', sampling.channel)
		paramset.add_integer('discardmipmaps', sampling.discardmipmaps)
		paramset.add_float('gain', sampling.gain)
		paramset.add_float('gamma', sampling.gamma)
		paramset.add_float('maxanisotropy', sampling.maxanisotropy)
		paramset.add_string('wrap', sampling.wrap)
		mapping_type = '2D'
	
	# Similar to image handler, but for Ocean tex
	if texture.type == 'OCEAN':
		if texture.ocean.output == 'FOAM':

			ocean_mods = [m for m in texture.ocean.ocean_object.modifiers if m.type == 'OCEAN']
			if len(ocean_mods) == 0:
				print ('No ocean modifiers!')
			else:
				ocean_mod = ocean_mods[0]
			
			if texture.ocean.output == 'FOAM':
				tex_image = efutil.filesystem_path(os.path.join(ocean_mod.filepath, 'foam_%04d.exr' % scene.frame_current)) 
			#SOON! (until 3D disp support...)
			#elif texture.ocean.output == 'DISPLACEMENT':
				#tex_image = os.path.join(ocean_mod.filepath, 'disp_%04d.exr' % scene.frame_current) 
			
			lux_tex_name = 'imagemap'
			if variant_hint:
				variant = variant_hint
			else:
				variant = 'color'
			paramset.add_string('filename', tex_image)
			paramset.add_float('gamma', 1.0)
			mapping_type = '2D'
			
		else:
			lux_tex_name = 'constant'
		
	if mapping_type == '3D':
		paramset.update( texture.luxrender_texture.luxrender_tex_transform.get_paramset(scene) )
	else:
		paramset.update( texture.luxrender_texture.luxrender_tex_mapping.get_paramset(scene) )
	
	return variant, lux_tex_name, paramset
예제 #23
0
  def render(self, scene):
      
      if self is None or scene is None:
          sunflowLog('ERROR: Scene is missing!')
          return
      
      scene.render.use_placeholder = False
      
      with self.render_lock:  # just render one thing at a time
          
          if scene.name == 'preview':
              self.render_preview(scene)
              return
 
          scene_path = efutil.filesystem_path(scene.render.filepath)
          if os.path.isdir(scene_path):
              output_dir = scene_path
          else:
              output_dir = os.path.dirname(scene_path)        
          
          output_dir = os.path.abspath(os.path.join(output_dir , efutil.scene_filename()))            
          if not os.path.exists(output_dir):
              os.mkdir(output_dir)
          #----------- sunflowLog('Sunflow: Current directory = "%s"' % output_dir)
          
          #--------------------------------------- if DEBUG: pydevd.settrace()
          
          if not getExporter (output_dir, scene.name, scene.frame_current):
              return 
          
          if self.is_animation:
              return
          
          arguments = self.getCommandLineArgs(scene)
          
          
          jarpath = efutil.find_config_value('sunflow', 'defaults', 'jar_path', '')
          javapath = efutil.find_config_value('sunflow', 'defaults', 'java_path', '')
          memory = "-Xmx%sm" % efutil.find_config_value('sunflow', 'defaults', 'memoryalloc', '')
          image_name = "%s.%03d.%s" % (scene.name , scene.frame_current, arguments['format'])
          
          if scene.sunflow_performance.useRandom:
              image_name = self.check_randomname(output_dir, image_name)
          
          sunflow_file = "%s.%03d.sc" % (scene.name , scene.frame_current)
          image_file = os.path.abspath(os.path.join(output_dir , image_name))
          sc_file_path = os.path.abspath(os.path.join(output_dir , sunflow_file))
          
          cmd_line = [ javapath , memory , '-server' , '-jar' , jarpath ]
          final_line = ['-o', image_file , sc_file_path]     
          
          extra = []
          for key in arguments:
              if key == 'format':
                  continue
              if arguments[key] != '':
                  values = arguments[key].split()
                  extra.extend(values)
          
          if arguments['format'] != 'png':
              extra.append('-nogui')
          
          cmd_line.extend(extra)
          cmd_line.extend(final_line)
          
      
          sunflow_process = subprocess.Popen(cmd_line)
          refresh_interval = 5
          
          framebuffer_thread = sunflowFilmDisplay()
          framebuffer_thread.set_kick_period(refresh_interval) 
          framebuffer_thread.begin(self, image_file, resolution(scene))
          render_update_timer = None
          while sunflow_process.poll() == None and not self.test_break():
              render_update_timer = threading.Timer(1, self.process_wait_timer)
              render_update_timer.start()
              if render_update_timer.isAlive(): render_update_timer.join()
          
          # If we exit the wait loop (user cancelled) and sunflow is still running, then send SIGINT
          if sunflow_process.poll() == None:
              # Use SIGTERM because that's the only one supported on Windows
              sunflow_process.send_signal(subprocess.signal.SIGTERM)
          
          # Stop updating the render result and load the final image
          framebuffer_thread.stop()
          framebuffer_thread.join()
          
          if sunflow_process.poll() != None and sunflow_process.returncode != 0:
              sunflowLog("Sunflow: Rendering failed -- check the console")
          else:
              framebuffer_thread.kick(render_end=True)
          framebuffer_thread.shutdown()
예제 #24
0
    def render(self, context):
        '''
        Render the scene file, or in our case, export the frame(s)
        and launch an Indigo process.
        '''

        with RENDERENGINE_indigo.render_lock:    # Just render one thing at a time.
            self.renderer            = None
            self.message_thread      = None
            self.stats_thread        = None
            self.framebuffer_thread  = None
            self.render_update_timer = None
            self.rendering           = False

            # force scene update to current rendering frame
            # Not sure why - Yves
            #context.frame_set(context.frame_current)

            #------------------------------------------------------------------------------
            # Export the Scene

            # Get the frame path.
            frame_path = efutil.filesystem_path(context.render.frame_path())

            # Get the filename for the frame sans extension.
            image_out_path = os.path.splitext(frame_path)[0]

            # Generate the name for the scene file(s).
            if context.indigo_engine.use_output_path == True:
                # Get the output path from the frame path.
                output_path = os.path.dirname(frame_path)

                # Generate the output filename
                output_filename = '%s.%s.%05i.igs' % (efutil.scene_filename(), bpy.path.clean_name(context.name), context.frame_current)
            else:
                # Get export path from the indigo_engine.
                export_path = efutil.filesystem_path(context.indigo_engine.export_path)

                # Get the directory name from the output path.
                output_path = os.path.dirname(export_path)

                # Get the filename from the output path and remove the extension.
                output_filename = os.path.splitext(os.path.basename(export_path))[0]

                # Count contiguous # chars and replace them with the frame number.
                # If the hash count is 0 and we are exporting an animation, append the frame numbers.
                hash_count = util.count_contiguous('#', output_filename)
                if hash_count != 0:
                    output_filename = output_filename.replace('#'*hash_count, ('%%0%0ii'%hash_count)%context.frame_current)
                elif self.is_animation:
                    output_filename = output_filename + ('%%0%0ii'%4)%context.frame_current

                # Add .igs extension.
                output_filename += '.igs'


            # The full path of the exported scene file.
            exported_file = '/'.join([
                output_path,
                output_filename
            ])

            # Create output_path if it does not exist.
            if not os.path.exists(output_path):
                os.makedirs(output_path)

            # If an animation is rendered, write an indigo queue file (.igq).
            if self.is_animation:
                igq_filename = '%s/%s.%s.igq'%(output_path, efutil.scene_filename(), bpy.path.clean_name(context.name))

                if context.frame_current == context.frame_start:
                    # Start a new igq file.
                    igq_file = open(igq_filename, 'w')
                    igq_file.write('<?xml version="1.0" encoding="utf-8" standalone="no" ?>\n')
                    igq_file.write('<render_queue>\n')
                else:
                    # Append to existing igq.
                    igq_file = open(igq_filename, 'a')
                    
                rnd = random.Random()
                rnd.seed(context.frame_current)

                # Write igq item.
                igq_file.write('\t<item>\n')
                igq_file.write('\t\t<scene_path>%s</scene_path>\n' % exported_file)
                igq_file.write('\t\t<halt_time>%d</halt_time>\n' % context.indigo_engine.halttime)
                igq_file.write('\t\t<halt_spp>%d</halt_spp>\n' % context.indigo_engine.haltspp)
                igq_file.write('\t\t<output_path>%s</output_path>\n' % image_out_path)
                igq_file.write('\t\t<seed>%s</seed>\n' % rnd.randint(1, 1000000))
                igq_file.write('\t</item>\n')

                # If this is the last frame, write the closing tag.
                if context.frame_current == context.frame_end:
                    igq_file.write('</render_queue>\n')

                igq_file.close()

                # Calculate the progress by frame with frame range (fr) and frame offset (fo).
                fr = context.frame_end - context.frame_start
                fo = context.frame_current - context.frame_start
                self.update_progress(fo/fr)

            scene_writer = indigo.operators._Impl_OT_indigo(
                directory = output_path,
                filename = output_filename
            ).set_report(self.report)

            # Write the scene file.
            export_result = scene_writer.execute(context)

            # Return if the export didn't finish.
            if not 'FINISHED' in export_result:
                return

            #------------------------------------------------------------------------------
            # Update indigo defaults config file .
            config_updates = {
                'auto_start': context.indigo_engine.auto_start,
                'console_output': context.indigo_engine.console_output
            }

            if context.indigo_engine.use_console:
                indigo_path = getConsolePath(context)
            else:
                indigo_path = getGuiPath(context)

            if os.path.exists(indigo_path):
                config_updates['install_path'] = getInstallPath(context)

            try:
                for k,v in config_updates.items():
                    efutil.write_config_value('indigo', 'defaults', k, v)
            except Exception as err:
                indigo_log('Saving indigo config failed: %s' % err, message_type='ERROR')

            # Make sure that the Indigo we are going to launch is at least as
            # new as the exporter version.
            version_ok = True
            if not context.indigo_engine.skip_version_check:
                iv = getVersion(context)
                for i in range(3):
                    version_ok &= iv[i]>=bl_info['version'][i]

            #------------------------------------------------------------------------------
            # Conditionally Spawn Indigo.
            if context.indigo_engine.auto_start:

                exe_path = efutil.filesystem_path( indigo_path )

                if not os.path.exists(exe_path):
                    print("Failed to find indigo at '" + str(exe_path) + "'")
                    msg = "Failed to find indigo at '" + str(exe_path) + "'."
                    msg + "\n  "
                    msg += "Please make sure you have Indigo installed, and that the path to indigo in the 'Indigo Render Engine Settings' is set correctly."
                    self.report({'ERROR'}, msg)

                #if not version_ok:
                    #indigo_log("Unsupported version v%s; Cannot start Indigo with this scene" % ('.'.join(['%s'%i for i in iv])), message_type='ERROR')
                    #return

                # if it's an animation, don't execute until final frame
                if self.is_animation and context.frame_current != context.frame_end:
                    return

                # if animation and final frame, launch queue instead of single frame
                if self.is_animation and context.frame_current == context.frame_end:
                    exported_file = igq_filename
                    indigo_args = [
                        exe_path,
                        exported_file
                    ]
                else:
                    indigo_args = [
                        exe_path,
                        exported_file,
                        '-o',
                        image_out_path + '.png'
                    ]

                # Set master or working master command line args.
                if context.indigo_engine.network_mode == 'master':
                    indigo_args.extend(['-n', 'm'])
                elif context.indigo_engine.network_mode == 'working_master':
                    indigo_args.extend(['-n', 'wm'])

                # Set port arg if network rendering is enabled.
                if context.indigo_engine.network_mode in ['master', 'working_master']:
                    indigo_args.extend([
                        '-p',
                        '%i' % context.indigo_engine.network_port
                    ])

                # Set hostname and port arg.
                if context.indigo_engine.network_mode == 'manual':
                    indigo_args.extend([
                        '-h',
                        '%s:%i' % (context.indigo_engine.network_host, context.indigo_engine.network_port)
                ])

                # indigo_log("Starting indigo: %s" % indigo_args)

                # If we're starting a console or should wait for the process, listen to the output.
                if context.indigo_engine.use_console or context.indigo_engine.wait_for_process:
                    f_stdout = subprocess.PIPE
                else:
                    f_stdout = None

                # Launch the Indigo process.
                indigo_proc = subprocess.Popen(indigo_args, stdout=f_stdout)
                indigo_pid = indigo_proc.pid
                indigo_log('Started Indigo process, PID: %i' % indigo_pid)

                # Wait for the render to finish if we use the console or should wait for the process.
                if context.indigo_engine.use_console or context.indigo_engine.wait_for_process:
                    while indigo_proc.poll() == None:
                        indigo_proc.communicate()
                        time.sleep(2)

                    indigo_proc.wait()
                    if not indigo_proc.stdout.closed:
                        indigo_proc.communicate()
                    if indigo_proc.returncode == -1:
                        sys.exit(-1)

            else:
                indigo_log("Scene was exported to %s" % exported_file)

            #------------------------------------------------------------------------------
            # Finished
            return
예제 #25
0
def get_output_filename(scene):
	return '%s.%s.%05d' % (efutil.scene_filename(), bpy.path.clean_name(scene.name), scene.frame_current)
예제 #26
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and create a binary PLY file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			if len(mesh.materials) > 0 and mesh.materials[0] != None:
				mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
			else:
				mats = [(0, None)]
			
			for i, mat in mats:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					#sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.mitsuba_engine.partial_export and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures
						if len(uv_textures) > 0:
							if uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						ntris = 0
						co_no_uv_cache = []
						face_vert_indices = []		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if uv_layer:
									# Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
									uv_coord = (uv_layer[face.index].uv[j][0], 1.0 - uv_layer[face.index].uv[j][1])
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_coord )
									else:
										vert_data = (v.co[:], v.normal[:] )
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add(vert_data)
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							# For Mitsuba, we need to triangulate quad faces
							face_vert_indices.append( fvi[0:3] )
							ntris += 3
							if len(fvi) == 4:
								face_vert_indices.append(( fvi[0], fvi[2], fvi[3] ))
								ntris += 3
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by MtsBlend 2.5 exporter for Mitsuba - www.mitsuba.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % int(ntris / 3)).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in face_vert_indices:
								ply.write( struct.pack('<B', 3) )
								ply.write( struct.pack('<3I', *face) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						MtsLog('Binary PLY file written: %s' % (ply_path))
					else:
						MtsLog('Skipping already exported PLY: %s' % mesh_name)
					
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					if obj.data.mitsuba_mesh.normals == 'facenormals':
						shape_params.add_boolean('faceNormals', {'value' : 'true'})
					
					mesh_definition = (
						mesh_name,
						i,
						'ply',
						shape_params
					)
					# Only export Shapegroup and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj) and self.exportShapeDefinition(obj, mesh_definition):
						shape_params = ParamSet().add_reference(
							'id',
							'',
							mesh_name + '-shapegroup_%i' % (i)
						)
						
						mesh_definition = (
							mesh_name,
							i,
							'instance',
							shape_params
						)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
					
					mesh_definitions.append( mesh_definition )
					
				except InvalidGeometryException as err:
					MtsLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			MtsLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
예제 #27
0
	def render(self, scene):
		'''
		scene:	bpy.types.Scene
		
		Export the given scene to LuxRender.
		Choose from one of several methods depending on what needs to be rendered.
		
		Returns None
		'''
		
		with RENDERENGINE_luxrender.render_lock:	# just render one thing at a time
			prev_cwd = os.getcwd()
			try:
				self.LuxManager				= None
				self.render_update_timer	= None
				self.output_dir				= efutil.temp_directory()
				self.output_file			= 'default.png'
				
				if scene is None:
					LuxLog('ERROR: Scene to render is not valid')
					return
				
				if scene.name == 'preview':
					self.render_preview(scene)
					return

				if scene.display_settings.display_device != "sRGB":
					LuxLog('WARNING: Colour Management not set to sRGB, render results may look too dark.')
				
				api_type, write_files = self.set_export_path(scene)
				
				os.chdir(efutil.export_path)
				
				is_animation = hasattr(self, 'is_animation') and self.is_animation
				make_queue = scene.luxrender_engine.export_type == 'EXT' and scene.luxrender_engine.binary_name == 'luxrender' and write_files
				
				if is_animation and make_queue:
					queue_file = efutil.export_path + '%s.%s.lxq' % (efutil.scene_filename(), bpy.path.clean_name(scene.name))
					
					# Open/reset a queue file
					if scene.frame_current == scene.frame_start:
						open(queue_file, 'w').close()
					
					if hasattr(self, 'update_progress'):
						fr = scene.frame_end - scene.frame_start
						fo = scene.frame_current - scene.frame_start
						self.update_progress(fo/fr)
				
				exported_file = self.export_scene(scene)
				if exported_file == False:
					return	# Export frame failed, abort rendering
				
				if is_animation and make_queue:
					self.LuxManager = LuxManager.GetActive()
					self.LuxManager.lux_context.worldEnd()
					with open(queue_file, 'a') as qf:
						qf.write("%s\n" % exported_file)
					
					if scene.frame_current == scene.frame_end:
						# run the queue
						self.render_queue(scene, queue_file)
				else:
					self.render_start(scene)
			
			except Exception as err:
				LuxLog('%s'%err)
				self.report({'ERROR'}, '%s'%err)
			
			os.chdir(prev_cwd)
예제 #28
0
	def buildNativeMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a serialized mesh
		file for Mitsuba.
		"""
		
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			if len(mesh.materials) > 0 and mesh.materials[0] != None:
				mats = [(i, mat) for i, mat in enumerate(mesh.materials)]
			else:
				mats = [(0, None)]
			
			for i, mat in mats:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat-index combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put Serialized files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					#sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_serfilename():
						ser_serial = self.ExportedSERs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ser_serial, i)
						ser_filename = '%s.serialized' % bpy.path.clean_name(mesh_name)
						ser_path = '/'.join([sc_fr, ser_filename])
						return mesh_name, ser_path
					
					mesh_name, ser_path = make_serfilename()
					
					# Ensure that all Serialized files have unique names
					while self.ExportedSERs.have(ser_path):
						mesh_name, ser_path = make_serfilename()
					
					self.ExportedSERs.add(ser_path, None)
					
					# skip writing the Serialized file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ser_path) or not (self.visibility_scene.mitsuba_engine.partial_export and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures
						if len(uv_textures) > 0:
							if uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Export data
						points = array.array('d',[])
						normals = array.array('d',[])
						uvs = array.array('d',[])
						ntris = 0
						face_vert_indices = array.array('I',[])		# list of face vert indices
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if uv_layer:
									# Flip UV Y axis. Blender UV coord is bottom-left, Mitsuba is top-left.
									uv_coord = (uv_layer[face.index].uv[j][0], 1.0 - uv_layer[face.index].uv[j][1])
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_coord )
									else:
										vert_data = (v.co[:], v.normal[:], tuple() )
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add(vert_data)
										
										points.extend( vert_data[0] )
										normals.extend( vert_data[1] )
										uvs.extend( vert_data[2] )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									# all face-vert-co-no are unique, we cannot
									# cache them
									points.extend( v.co[:] )
									normals.extend( face.normal[:] )
									if uv_layer: uvs.extend( uv_coord )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							# For Mitsuba, we need to triangulate quad faces
							face_vert_indices.extend( fvi[0:3] )
							ntris += 3
							if len(fvi) == 4:
								face_vert_indices.extend(( fvi[0], fvi[2], fvi[3] ))
								ntris += 3
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ser_path, 'wb') as ser:
							# create mesh flags
							flags = 0
							# turn on double precision
							flags = flags | 0x2000
							# turn on vertex normals
							flags = flags | 0x0001
							# turn on uv layer
							if uv_layer:
								flags = flags | 0x0002
							
							# begin serialized mesh data
							ser.write(struct.pack('<HH', 0x041C, 0x0004))
							
							# encode serialized mesh
							encoder = zlib.compressobj()
							ser.write(encoder.compress(struct.pack('<I', flags)))
							ser.write(encoder.compress(bytes(mesh_name + "_serialized\0",'latin-1')))
							ser.write(encoder.compress(struct.pack('<QQ', vert_index, int(ntris/3))))
							ser.write(encoder.compress(points.tostring()))
							ser.write(encoder.compress(normals.tostring()))
							if uv_layer:
								ser.write(encoder.compress(uvs.tostring()))
							ser.write(encoder.compress(face_vert_indices.tostring()))
							ser.write(encoder.flush())
							
							ser.write(struct.pack('<Q', 0))
							ser.write(struct.pack('<I', 1))
							ser.close()
						
						MtsLog('Binary Serialized file written: %s' % (ser_path))
					else:
						MtsLog('Skipping already exported Serialized mesh: %s' % mesh_name)
					
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ser_path)
					)
					if obj.data.mitsuba_mesh.normals == 'facenormals':
						shape_params.add_boolean('faceNormals', {'value' : 'true'})
					
					mesh_definition = (
						mesh_name,
						i,
						'serialized',
						shape_params
					)
					# Only export Shapegroup and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj) and self.exportShapeDefinition(obj, mesh_definition):
						shape_params = ParamSet().add_reference(
							'id',
							'',
							mesh_name + '-shapegroup_%i' % (i)
						)
						
						mesh_definition = (
							mesh_name,
							i,
							'instance',
							shape_params
						)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
					
					mesh_definitions.append( mesh_definition )
					
				except InvalidGeometryException as err:
					MtsLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			MtsLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions
예제 #29
0
	def handler_Duplis_PATH(self, obj, *args, **kwargs):
		if not 'particle_system' in kwargs.keys():
			MtsLog('ERROR: handler_Duplis_PATH called without particle_system')
			return
		
		psys = kwargs['particle_system']
		
		if not psys.settings.type == 'HAIR':
			MtsLog('ERROR: handler_Duplis_PATH can only handle Hair particle systems ("%s")' % psys.name)
			return
		
		for mod in obj.modifiers:
			if mod.type == 'PARTICLE_SYSTEM' and mod.show_render == False:
				return
				
		MtsLog('Exporting Hair system "%s"...' % psys.name)
		
		size = psys.settings.particle_size / 2.0 / 1000.0
		psys.set_resolution(self.geometry_scene, obj, 'RENDER')
		steps = 2**psys.settings.render_step
		num_parents = len(psys.particles)
		num_children = len(psys.child_particles)
		
		partsys_name = '%s_%s'%(obj.name, psys.name)
		det = DupliExportProgressThread()
		det.start(num_parents + num_children)
		
		# Put Hair files in frame-numbered subfolders to avoid
		# clobbering when rendering animations
		sc_fr = '%s/%s/%s/%05d' % (self.mts_context.meshes_dir, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
		if not os.path.exists( sc_fr ):
			os.makedirs(sc_fr)
		
		hair_filename = '%s.hair' % bpy.path.clean_name(partsys_name)
		hair_file_path = '/'.join([sc_fr, hair_filename])
		
		shape_params = ParamSet().add_string(
			'filename',
			efutil.path_relative_to_export(hair_file_path)
		).add_float(
			'radius',
			size
		)
		mesh_definitions = []
		mesh_definition = (
			psys.name,
			psys.settings.material - 1,
			'hair',
			shape_params
		)
		mesh_definitions.append( mesh_definition )
		self.exportShapeInstances(obj, mesh_definitions)
		
		hair_file = open(hair_file_path, 'w')
		
		transform = obj.matrix_world.inverted()
		for pindex in range(num_parents + num_children):
			det.exported_objects += 1
			points = []
			
			for step in range(0,steps+1):
				co = psys.co_hair(obj, mod, pindex, step)
				if not co.length_squared == 0:
					points.append(transform*co)
			
			if psys.settings.use_hair_bspline:
				temp = []
				degree = 2
				dimension = 3
				for i in range(math.trunc(math.pow(2,psys.settings.render_step))):
					if i > 0:
						u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)-0.0000000000001
					else:
						u = i*(len(points)- degree)/math.trunc(math.pow(2,psys.settings.render_step)-1)
					temp.append(self.BSpline(points, dimension, degree, u))
				points = temp
			
			for p in points:
				hair_file.write('%f %f %f\n' % (p[0], p[1], p[2]))
			
			hair_file.write('\n')
		
		hair_file.close()
		
		psys.set_resolution(self.geometry_scene, obj, 'PREVIEW')
		det.stop()
		det.join()
		
		MtsLog('... done, exported %s hairs' % det.exported_objects)
예제 #30
0
	def buildBinaryPLYMesh(self, obj):
		"""
		Convert supported blender objects into a MESH, and then split into parts
		according to vertex material assignment, and construct a mesh_name and
		ParamSet for each part which will become a LuxRender PLYShape statement
		wrapped within objectBegin..objectEnd or placed in an
		attributeBegin..attributeEnd scope, depending if instancing is allowed.
		The actual geometry will be dumped to a binary ply file.
		"""
		try:
			mesh_definitions = []
			mesh = obj.to_mesh(self.geometry_scene, True, 'RENDER')
			if mesh is None:
				raise UnexportableObjectException('Cannot create render/export mesh')
			
			# collate faces by mat index
			ffaces_mats = {}
			mesh_faces = mesh.tessfaces if bpy.app.version > (2, 62, 1 ) else mesh.faces # bmesh
			for f in mesh_faces:
				mi = f.material_index
				if mi not in ffaces_mats.keys(): ffaces_mats[mi] = []
				ffaces_mats[mi].append( f )
			material_indices = ffaces_mats.keys()
			
			number_of_mats = len(mesh.materials)
			if number_of_mats > 0:
				iterator_range = range(number_of_mats)
			else:
				iterator_range = [0]
			
			for i in iterator_range:
				try:
					if i not in material_indices: continue
					
					# If this mesh/mat combo has already been processed, get it from the cache
					mesh_cache_key = (self.geometry_scene, obj.data, i)
					if self.allow_instancing(obj) and self.ExportedMeshes.have(mesh_cache_key):
						mesh_definitions.append( self.ExportedMeshes.get(mesh_cache_key) )
						continue
					
					# Put PLY files in frame-numbered subfolders to avoid
					# clobbering when rendering animations
					sc_fr = '%s/%s/%s/%05d' % (efutil.export_path, efutil.scene_filename(), bpy.path.clean_name(self.geometry_scene.name), self.visibility_scene.frame_current)
					if not os.path.exists( sc_fr ):
						os.makedirs(sc_fr)
					
					def make_plyfilename():
						ply_serial = self.ExportedPLYs.serial(mesh_cache_key)
						mesh_name = '%s_%04d_m%03d' % (obj.data.name, ply_serial, i)
						ply_filename = '%s.ply' % bpy.path.clean_name(mesh_name)
						ply_path = '/'.join([sc_fr, ply_filename])
						return mesh_name, ply_path
					
					mesh_name, ply_path = make_plyfilename()
					
					# Ensure that all PLY files have unique names
					while self.ExportedPLYs.have(ply_path):
						mesh_name, ply_path = make_plyfilename()
					
					self.ExportedPLYs.add(ply_path, None)
					
					# skip writing the PLY file if the box is checked
					skip_exporting = obj in self.KnownExportedObjects and not obj in self.KnownModifiedObjects
					if not os.path.exists(ply_path) or not (self.visibility_scene.luxrender_engine.partial_ply and skip_exporting):
						
						GeometryExporter.NewExportedObjects.add(obj)
						
						uv_textures = mesh.tessface_uv_textures if bpy.app.version > (2, 62, 0 ) else mesh.uv_textures # bmesh
						if len(uv_textures) > 0:
							if mesh.uv_textures.active and uv_textures.active.data:
								uv_layer = uv_textures.active.data
						else:
							uv_layer = None
						
						# Here we work out exactly which vert+normal combinations
						# we need to export. This is done first, and the export
						# combinations cached before writing to file because the
						# number of verts needed needs to be written in the header
						# and that number is not known before this is done.
						
						# Export data
						co_no_uv_cache = []
						face_vert_indices = {}		# mapping of face index to list of exported vert indices for that face
						
						# Caches
						vert_vno_indices = {}		# mapping of vert index to exported vert index for verts with vert normals
						vert_use_vno = set()		# Set of vert indices that use vert normals
						
						vert_index = 0				# exported vert index
						for face in ffaces_mats[i]:
							fvi = []
							for j, vertex in enumerate(face.vertices):
								v = mesh.vertices[vertex]
								
								if face.use_smooth:
									
									if uv_layer:
										vert_data = (v.co[:], v.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], v.normal[:])
									
									if vert_data not in vert_use_vno:
										vert_use_vno.add( vert_data )
										
										co_no_uv_cache.append( vert_data )
										
										vert_vno_indices[vert_data] = vert_index
										fvi.append(vert_index)
										
										vert_index += 1
									else:
										fvi.append(vert_vno_indices[vert_data])
									
								else:
									
									if uv_layer:
										vert_data = (v.co[:], face.normal[:], uv_layer[face.index].uv[j][:])
									else:
										vert_data = (v.co[:], face.normal[:])
									
									# All face-vert-co-no are unique, we cannot
									# cache them
									co_no_uv_cache.append( vert_data )
									
									fvi.append(vert_index)
									
									vert_index += 1
							
							face_vert_indices[face.index] = fvi
						
						del vert_vno_indices
						del vert_use_vno
						
						with open(ply_path, 'wb') as ply:
							ply.write(b'ply\n')
							ply.write(b'format binary_little_endian 1.0\n')
							ply.write(b'comment Created by LuxBlend 2.6 exporter for LuxRender - www.luxrender.net\n')
							
							# vert_index == the number of actual verts needed
							ply.write( ('element vertex %d\n' % vert_index).encode() )
							ply.write(b'property float x\n')
							ply.write(b'property float y\n')
							ply.write(b'property float z\n')
							
							ply.write(b'property float nx\n')
							ply.write(b'property float ny\n')
							ply.write(b'property float nz\n')
							
							if uv_layer:
								ply.write(b'property float s\n')
								ply.write(b'property float t\n')
							
							ply.write( ('element face %d\n' % len(ffaces_mats[i])).encode() )
							ply.write(b'property list uchar uint vertex_indices\n')
							
							ply.write(b'end_header\n')
							
							# dump cached co/no/uv
							if uv_layer:
								for co,no,uv in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
									ply.write( struct.pack('<2f', *uv) )
							else:
								for co,no in co_no_uv_cache:
									ply.write( struct.pack('<3f', *co) )
									ply.write( struct.pack('<3f', *no) )
							
							# dump face vert indices
							for face in ffaces_mats[i]:
								lfvi = len(face_vert_indices[face.index])
								ply.write( struct.pack('<B', lfvi) )
								ply.write( struct.pack('<%dI'%lfvi, *face_vert_indices[face.index]) )
							
							del co_no_uv_cache
							del face_vert_indices
						
						LuxLog('Binary PLY file written: %s' % (ply_path))
					else:
						LuxLog('Skipping already exported PLY: %s' % mesh_name)
					
					# Export the shape definition to LXO
					shape_params = ParamSet().add_string(
						'filename',
						efutil.path_relative_to_export(ply_path)
					)
					
					# Add subdiv etc options
					shape_params.update( obj.data.luxrender_mesh.get_paramset() )
					
					mesh_definition = (
						mesh_name,
						i,
						'plymesh',
						shape_params
					)
					mesh_definitions.append( mesh_definition )
					
					# Only export objectBegin..objectEnd and cache this mesh_definition if we plan to use instancing
					if self.allow_instancing(obj):
						self.exportShapeDefinition(obj, mesh_definition)
						self.ExportedMeshes.add(mesh_cache_key, mesh_definition)
				
				except InvalidGeometryException as err:
					LuxLog('Mesh export failed, skipping this mesh: %s' % err)
			
			del ffaces_mats
			bpy.data.meshes.remove(mesh)
			
		except UnexportableObjectException as err:
			LuxLog('Object export failed, skipping this object: %s' % err)
		
		return mesh_definitions