def render(argv): # Set the desired mitsuba variant filename = 'scene.xml' # Add the scene directory to the FileResolver's search path Thread.thread().file_resolver().append(os.path.dirname(filename)) # Load the actual scene scene = load_file(filename, pos_x = argv[0], pos_y = argv[1], pos_z = argv[2]) # Call the scene's integrator to render the loaded scene scene.integrator().render(scene, scene.sensors()[0]) # After rendering, the rendered data is stored in the film film = scene.sensors()[0].film() # Write out rendering as high dynamic range OpenEXR file film.set_destination_file('output.exr') film.develop() # Write out a tonemapped JPG of the same rendering bmp = film.bitmap(raw=True) bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8, srgb_gamma=True).write('output_{}.jpg'.format(argv[3])) # Get linear pixel values as a numpy array for further processing bmp_linear_rgb = bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, srgb_gamma=False) image_np = np.array(bmp_linear_rgb)
def compute_sh(obj_file, cam_pos, cam_lookat): light_radiance = 1.0 register_integrator('auxintegrator', lambda props: AuxIntegrator(props)) scene_template_file = './scene_template.xml' Thread.thread().file_resolver().append( os.path.dirname(scene_template_file)) scene = load_file(scene_template_file, integrator='auxintegrator', fov="40", tx=cam_lookat[0], ty=cam_lookat[1], tz=cam_lookat[2], \ spp="100", width=200, height=200, obj_file=obj_file) # scene.integrator().light = load_string(LIGHT_TEMPLATE, lsx="1", lsy="1", lsz="1", lrx="0", lry="0", lrz="0", ltx="-1", lty="0", ltz="0", l=light_radiance) # scene.integrator().light_radiance = light_radiance scene.integrator().render(scene, scene.sensors()[0]) film = scene.sensors()[0].film() film.set_destination_file('./render_output.exr') film.develop() sh_channels_list = [] for i in range(0, 9): for c in ['r', 'g', 'b']: sh_channels_list.append('sh_%s_%d' % (c, i)) f_sh = np.zeros((200, 200, 27), dtype=np.float) exrfile = pyexr.open('render_output.exr') for i, channel in enumerate(sh_channels_list): ch = exrfile.get(channel) f_sh[:, :, i:i + 1] += ch return f_sh
def test22_fileresolver_unchanged(variant_scalar_rgb): from mitsuba.core import xml, Thread fs_backup = Thread.thread().file_resolver() xml.load_string("""<scene version="2.0.0"> <path value="/tmp"/> </scene>""") assert fs_backup == Thread.thread().file_resolver()
def main(): """ Generate reference images for all the scenes contained within the TEST_SCENE_DIR directory, and for all the color mode having their `scalar_*` mode enabled. """ parser = argparse.ArgumentParser(prog='RenderReferenceImages') parser.add_argument( '--overwrite', action='store_true', help= 'Force rerendering of all reference images. Otherwise, only missing references will be rendered.' ) parser.add_argument('--spp', default=256, type=int, help='Samples per pixel') args = parser.parse_args() ref_spp = args.spp overwrite = args.overwrite for scene_fname in scenes: scene_dir = dirname(scene_fname) for variant in mitsuba.variants(): if not variant.split('_')[0] == 'scalar' or variant.endswith( 'double'): continue mitsuba.set_variant(variant) from mitsuba.core import Bitmap, Struct, Thread ref_fname = get_ref_fname(scene_fname) if os.path.exists(ref_fname) and not overwrite: continue Thread.thread().file_resolver().append(scene_dir) scene = mitsuba.core.xml.load_file(scene_fname, parameters=[('spp', str(ref_spp))]) scene.integrator().render(scene, scene.sensors()[0]) film = scene.sensors()[0].film() cur_bitmap = film.bitmap(raw=True).convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, False) # Write rendered image to a file cur_bitmap.write(ref_fname) print('Saved rendered image to: ' + ref_fname)
def test09_xml_decompose_transform(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core.xml import load_dict, load_file from mitsuba.core import Transform4f, Vector3f, Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict09/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'cam': { 'type': 'perspective', 'fov_axis': 'x', 'fov': 35, 'to_world': Transform4f.look_at(Vector3f(15, 42.3, 25), Vector3f(1.0, 0.0, 0.5), Vector3f(1.0, 0.0, 0.0)) } } dict_to_xml(scene_dict, filepath) s1 = load_file(filepath) s2 = load_dict(scene_dict) vects = [Vector3f(0, 0, 1), Vector3f(0, 1, 0), Vector3f(1, 0, 0)] tr1 = s1.sensors()[0].world_transform().eval(0) tr2 = s2.sensors()[0].world_transform().eval(0) for vec in vects: assert tr1.transform_point(vec) == tr2.transform_point(vec) rmtree(os.path.split(filepath)[0])
def _get_logger(): """ Get Mitsuba Logger instance """ from mitsuba.core import Thread return Thread.thread().logger()
def test13_xml_multiple_defaults(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core.xml import load_file from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict13/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'cam1': { 'type': 'perspective', 'sampler': { 'type': 'independent', 'sample_count': 150 } }, 'cam2': { 'type': 'perspective', 'sampler': { 'type': 'independent', 'sample_count': 250 } } } dict_to_xml(scene_dict, filepath) scene = load_file(filepath, spp=45) assert scene.sensors()[0].sampler().sample_count() == scene.sensors( )[1].sampler().sample_count() assert scene.sensors()[1].sampler().sample_count() == 45 rmtree(os.path.split(filepath)[0])
def test12_xml_duplicate_files(variants_scalar_all): from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict12/dict.xml') fr.append(os.path.dirname(filepath)) spectrum_path = os.path.join(mts_root, 'resources/data/ior/Al.eta.spd') #Export the same file twice, this should only copy it once scene_dict = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "filename": spectrum_path } }, 'light2': { "type": "point", "intensity": { "type": "spectrum", "filename": spectrum_path } } } dict_to_xml(scene_dict, filepath) spectra_files = os.listdir( os.path.join(os.path.split(filepath)[0], 'spectra')) assert len(spectra_files) == 1 and spectra_files[0] == "Al.eta.spd" spectrum_path2 = os.path.join(mts_root, 'resources/data/scenes/dict12/Al.eta.spd') copy(spectrum_path, spectrum_path2) #Export two files having the same name scene_dict = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "filename": spectrum_path } }, 'light2': { "type": "point", "intensity": { "type": "spectrum", "filename": spectrum_path2 } } } dict_to_xml(scene_dict, filepath) spectra_files = os.listdir( os.path.join(os.path.split(filepath)[0], 'spectra')) assert len(spectra_files) == 2 and spectra_files[ 0] == "Al.eta.spd" and spectra_files[1] == "Al.eta(1).spd" rmtree(os.path.split(filepath)[0])
def test05_xml_split(variant_scalar_rgb): from mitsuba.core import xml, Point3f from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread import numpy as np fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict05/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'bsdf1': { 'type': 'diffuse' }, 'envmap': { 'type': 'envmap', 'filename': 'resources/data/common/textures/museum.exr' }, 'shape': { 'type': 'sphere', 'bsdf': { 'type': 'ref', 'id': 'bsdf1' } } } dict_to_xml(scene_dict, filepath) s1 = xml.load_file(filepath) dict_to_xml(scene_dict, filepath, split_files=True) s2 = xml.load_file(filepath) assert str(s1) == str(s2) rmtree(os.path.split(filepath)[0])
def test01_xml_save_plugin(variant_scalar_rgb): from mitsuba.core import xml from mitsuba.core import Thread from mitsuba.python.xml import dict_to_xml fr = Thread.thread().file_resolver() # Add the path to the mitsuba root folder, so that files are always saved in mitsuba/resources/... # This way we know where to look for the file in case the unit test fails mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict01/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { "type": "sphere", "center": [0, 0, -10], "radius": 10.0, } dict_to_xml(scene_dict, filepath) s1 = xml.load_dict({ 'type': 'scene', 'sphere': { "type": "sphere", "center": [0, 0, -10], "radius": 10.0, } }) s2 = xml.load_file(filepath) assert str(s1) == str(s2) rmtree(os.path.split(filepath)[0])
def test10_xml_rgb(variants_scalar_all): from mitsuba.python.xml import dict_to_xml from mitsuba.core.xml import load_dict, load_file from mitsuba.core import Thread, ScalarColor3f fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict10/dict.xml') fr.append(os.path.dirname(filepath)) d1 = { 'type': 'scene', 'point': { "type": "point", "intensity": { "type": "rgb", "value": ScalarColor3f(0.5, 0.2, 0.5) } } } d2 = { 'type': 'scene', 'point': { "type": "point", "intensity": { "type": "rgb", "value": [0.5, 0.2, 0.5] # list -> ScalarColor3f } } } dict_to_xml(d1, filepath) s1 = load_file(filepath) dict_to_xml(d2, filepath) s2 = load_file(filepath) s3 = load_dict(d1) assert str(s1) == str(s2) assert str(s1) == str(s3) d1 = { 'type': 'scene', 'point': { "type": "point", "intensity": { "type": "rgb", "value": 0.5 # float -> ScalarColor3f } } } dict_to_xml(d1, filepath) s1 = load_file(filepath) s2 = load_dict(d1) assert str(s1) == str(s2) rmtree(os.path.split(filepath)[0])
def main(): # Register MeasuredBTF register_bsdf('measuredbtf', lambda props: MeasuredBTF(props)) # Filename filename_src = args.input filename_dst = args.output # Load an XML file Thread.thread().file_resolver().append(os.path.dirname(filename_src)) scene = load_file(filename_src) # Rendering scene.integrator().render(scene, scene.sensors()[0]) # Save image film = scene.sensors()[0].film() bmp = film.bitmap(raw=True) bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8, srgb_gamma=True).write(filename_dst)
def load_scene(filename, *args, **kwargs): """Prepares the file resolver and loads a Mitsuba scene from the given path.""" fr = Thread.thread().file_resolver() here = os.path.dirname(__file__) fr.append(here) fr.append(os.path.join(here, filename)) fr.append(os.path.dirname(filename)) scene = load_file(filename, *args, **kwargs) assert scene is not None return scene
def test_render(variants_all, scene_fname): from mitsuba.core import Bitmap, Struct, Thread scene_dir = dirname(scene_fname) if os.path.split(scene_dir)[1] in EXCLUDE_FOLDERS: pytest.skip(f"Skip rendering scene {scene_fname}") Thread.thread().file_resolver().append(scene_dir) ref_fname = get_ref_fname(scene_fname) assert os.path.exists(ref_fname) scene = mitsuba.core.xml.load_file(scene_fname, parameters=[('spp', str(32))]) scene.integrator().render(scene, scene.sensors()[0]) film = scene.sensors()[0].film() cur_bitmap = film.bitmap(raw=True).convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, False) cur_image = np.array(cur_bitmap, copy=False) ref_bitmap = Bitmap(ref_fname).convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, False) ref_image = np.array(ref_bitmap, copy=False) error = np.mean(np.mean(np.abs(ref_image - cur_image))) threshold = 0.5 * np.mean(np.mean(ref_image)) success = error < threshold if not success: print("Failed. error: {} // threshold: {}".format(error, threshold)) # Write rendered image to a file cur_fname = os.path.splitext( scene_fname)[0] + '_render_' + mitsuba.variant() + '.exr' cur_bitmap.write(cur_fname) print('Saved rendered image to: ' + cur_fname) assert False
def test02_xml_missing_type(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict02/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = {'my_bsdf': {'type': 'diffuse'}} with pytest.raises(ValueError) as e: dict_to_xml(scene_dict, filepath) e.match("Missing key: 'type'!") rmtree(os.path.split(filepath)[0])
def test03_xml_references(variant_scalar_rgb): from mitsuba.core import xml from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict03/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'bsdf1': { 'type': 'diffuse', 'reflectance': { 'type': 'rgb', 'value': [1.0, 0.0, 0.0] } }, 'texture1': { 'type': 'checkerboard' }, 'bsdf4': { 'type': 'dielectric', 'specular_reflectance': { 'type': 'ref', 'id': 'texture1' }, 'id': 'bsdf2' }, 'shape1': { 'type': 'sphere', 'bsdf': { 'type': 'ref', 'id': 'bsdf1' } }, 'shape2': { 'type': 'cylinder', 'bsdf': { 'type': 'ref', 'id': 'bsdf2' } } } s1 = xml.load_dict(scene_dict) dict_to_xml(scene_dict, filepath) s2 = xml.load_file(filepath) assert str(s1.shapes()[0].bsdf()) == str(s2.shapes()[0].bsdf()) rmtree(os.path.split(filepath)[0])
def test13_duplicate_parameter(variant_scalar_rgb): from mitsuba.core import xml from mitsuba.core import Thread, LogLevel logger = Thread.thread().logger() l = logger.error_level() try: logger.set_error_level(LogLevel.Warn) with pytest.raises(Exception) as e: xml.load_string("""<scene version="2.0.0"> <integer name="a" value="1"/> <integer name="a" value="1"/> </scene>""") finally: logger.set_error_level(l) e.match('Property "a" was specified multiple times')
def test08_xml_defaults(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread from mitsuba.core.xml import load_dict, load_file fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict08/dict.xml') fr.append(os.path.dirname(filepath)) spp = 250 resx = 1920 resy = 1080 scene_dict = { 'type': 'scene', 'cam': { 'type': 'perspective', 'fov_axis': 'x', 'fov': 35, 'sampler': { 'type': 'independent', 'sample_count': spp # --> default }, 'film': { 'type': 'hdrfilm', 'width': resx, # --> default 'height': resy # --> default } } } dict_to_xml(scene_dict, filepath) # Load a file using default values s1 = load_file(filepath) s2 = load_dict(scene_dict) assert str(s1.sensors()[0].film()) == str(s2.sensors()[0].film()) assert str(s1.sensors()[0].sampler()) == str(s2.sensors()[0].sampler()) # Set new parameters spp = 45 resx = 2048 resy = 485 # Load a file with options for the rendering parameters s3 = load_file(filepath, spp=spp, resx=resx, resy=resy) scene_dict['cam']['sampler']['sample_count'] = spp scene_dict['cam']['film']['width'] = resx scene_dict['cam']['film']['height'] = resy s4 = load_dict(scene_dict) assert str(s3.sensors()[0].film()) == str(s4.sensors()[0].film()) assert str(s3.sensors()[0].sampler()) == str(s4.sensors()[0].sampler()) rmtree(os.path.split(filepath)[0])
def f(*args, **kwargs): # New file resolver thread = Thread.thread() fres_old = thread.file_resolver() fres = FileResolver(fres_old) # Append current test directory and project root to the # search path. fres.append(caller_path) fres.append(root_path) thread.set_file_resolver(fres) # Run actual function res = func(*args, **kwargs) # Restore previous file resolver thread.set_file_resolver(fres_old) return res
def test06_xml_duplicate_id(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict06/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'my-bsdf': { 'type': 'diffuse' }, 'bsdf1': { 'type': 'roughdielectric', 'id': 'my-bsdf' } } with pytest.raises(ValueError) as e: dict_to_xml(scene_dict, filepath) e.match("Id: my-bsdf is already used!") rmtree(os.path.split(filepath)[0])
def test04_xml_point(variant_scalar_rgb): from mitsuba.core import xml, Point3f from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread import numpy as np fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict04/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'light': { 'type': 'point', 'position': [0.0, 1.0, 2.0] } } dict_to_xml(scene_dict, filepath) s1 = xml.load_file(filepath) scene_dict = { 'type': 'scene', 'light': { 'type': 'point', 'position': Point3f(0, 1, 2) } } dict_to_xml(scene_dict, filepath) s2 = xml.load_file(filepath) scene_dict = { 'type': 'scene', 'light': { 'type': 'point', 'position': np.array([0, 1, 2]) } } dict_to_xml(scene_dict, filepath) s3 = xml.load_file(filepath) assert str(s1) == str(s2) assert str(s1) == str(s3) rmtree(os.path.split(filepath)[0])
def test01_custom(variant_scalar_rgb): from mitsuba.core import Thread, Appender, Formatter, Log, LogLevel # Install a custom formatter and appender and process a log message messages = [] logger = Thread.thread().logger() formatter = logger.formatter() appenders = [] while logger.appender_count() > 0: app = logger.appender(0) appenders.append(app) logger.remove_appender(app) try: class MyFormatter(Formatter): def format(self, level, theClass, thread, filename, line, msg): return "%i: class=%s, thread=%s, text=%s, filename=%s, ' \ 'line=%i" % (level, str(theClass), thread.name(), msg, filename, line) class MyAppender(Appender): def append(self, level, text): messages.append(text) logger.set_formatter(MyFormatter()) logger.add_appender(MyAppender()) Log(LogLevel.Info, "This is a test message") assert len(messages) == 1 assert messages[0].startswith( '200: class=None, thread=main, text=test01_custom(): This is a' ' test message, filename=') finally: logger.clear_appenders() for app in appenders: logger.add_appender(app) logger.set_formatter(formatter)
def test07_xml_invalid_ref(variant_scalar_rgb): from mitsuba.python.xml import dict_to_xml from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict07/dict.xml') fr.append(os.path.dirname(filepath)) scene_dict = { 'type': 'scene', 'bsdf': { 'type': 'diffuse' }, 'sphere': { 'type': 'sphere', 'bsdf': { 'type': 'ref', 'id': 'my-bsdf' } } } with pytest.raises(ValueError) as e: dict_to_xml(scene_dict, filepath) e.match("Id: my-bsdf referenced before export.") rmtree(os.path.split(filepath)[0])
import os import numpy as np import mitsuba mitsuba.set_variant("scalar_rgb") from mitsuba.core import xml, Thread Thread.thread().file_resolver().append( os.path.dirname(__file__) + '../../../common') m = xml.load_string(""" <shape type="ply" version="0.5.0"> <string name="filename" value="meshes/bunny.ply"/> </shape> """) m.add_attribute("face_color", 3, np.random.rand(3 * m.face_count())) m.add_attribute("vertex_color", 3, np.random.rand(3 * m.vertex_count())) m.write_ply("bunny_attribute_color.ply")
def test11_xml_spectrum(variants_scalar_all): from mitsuba.python.xml import dict_to_xml from mitsuba.core.xml import load_dict, load_file from mitsuba.core import Thread fr = Thread.thread().file_resolver() mts_root = str(fr[len(fr) - 1]) filepath = os.path.join(mts_root, 'resources/data/scenes/dict11/dict.xml') fr.append(os.path.dirname(filepath)) d1 = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "value": [(400, 0.1), (500, 0.2), (600, 0.4), (700, 0.1)] } } } dict_to_xml(d1, filepath) s1 = load_file(filepath) s2 = load_dict(d1) assert str(s1.emitters()[0]) == str(s2.emitters()[0]) d2 = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "value": 0.44 } } } dict_to_xml(d2, filepath) s1 = load_file(filepath) s2 = load_dict(d2) assert str(s1.emitters()[0]) == str(s2.emitters()[0]) d3 = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "value": [(400, 0.1), (500, 0.2), (300, 0.4)] } } } with pytest.raises(ValueError) as e: dict_to_xml(d3, filepath) e.match("Wavelengths must be sorted in strictly increasing order!") #wavelengths file d4 = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "filename": os.path.join(mts_root, 'resources/data/ior/Al.eta.spd') } } } dict_to_xml(d4, filepath) s1 = load_file(filepath) s2 = load_dict(d4) assert str(s1.emitters()[0]) == str(s2.emitters()[0]) d5 = { 'type': 'scene', 'light': { "type": "point", "intensity": { "type": "spectrum", "filename": os.path.join(mts_root, 'resources/data/blabla/Pr.spd') } } } with pytest.raises(ValueError) as e: dict_to_xml(d5, filepath) e.match("File '%s' not found!" % os.path.abspath(d5['light']['intensity']['filename'])) rmtree(os.path.split(filepath)[0])
# Advanced inverse rendering example: render a bunny reference image, # then replace one of the scene parameters and try to recover it using # differentiable rendering and gradient-based optimization. import enoki as ek import mitsuba mitsuba.set_variant('gpu_autodiff_rgb') from mitsuba.core import Float, Thread from mitsuba.core.xml import load_file from mitsuba.python.util import traverse from mitsuba.python.autodiff import render, write_bitmap, Adam import time # Load example scene Thread.thread().file_resolver().append('bunny') scene = load_file('bunny/bunny.xml') # Find differentiable scene parameters params = traverse(scene) # Make a backup copy param_res = params['my_envmap.resolution'] param_ref = Float(params['my_envmap.data']) # Discard all parameters except for one we want to differentiate params.keep(['my_envmap.data']) # Render a reference image (no derivatives used yet) image_ref = render(scene, spp=16) crop_size = scene.sensors()[0].film().crop_size()
if os.path.split(scene_dir)[1] in EXCLUDE_FOLDERS: continue for variant in mitsuba.variants(): if not variant.split('_')[0] == 'scalar' or variant.endswith('double'): continue mitsuba.set_variant(variant) from mitsuba.core import Bitmap, Struct, Thread, set_thread_count ref_fname, var_fname = get_ref_fname(scene_fname) if exists(ref_fname) and exists(var_fname) and not overwrite: continue Thread.thread().file_resolver().append(scene_dir) scene = mitsuba.core.xml.load_file(scene_fname, spp=ref_spp) scene.integrator().render(scene, scene.sensors()[0]) bmp = scene.sensors()[0].film().bitmap(raw=False) img, var_img = bitmap_extract(bmp) # Write rendered image to a file os.makedirs(dirname(ref_fname), exist_ok=True) xyz_to_rgb_bmp(img).write(ref_fname) print('Saved rendered image to: ' + ref_fname) # Write variance image to a file xyz_to_rgb_bmp(var_img).write(var_fname) print('Saved variance image to: ' + var_fname)
# Simple inverse rendering example: render a cornell box reference image, # then replace one of the scene parameters and try to recover it using # differentiable rendering and gradient-based optimization. (PyTorch) import enoki as ek import mitsuba mitsuba.set_variant('gpu_autodiff_rgb') from mitsuba.core import Thread, Vector3f from mitsuba.core.xml import load_file from mitsuba.python.util import traverse from mitsuba.python.autodiff import render_torch, write_bitmap import torch import time Thread.thread().file_resolver().append('cbox') scene = load_file('cbox/cbox.xml') # Find differentiable scene parameters params = traverse(scene) # Discard all parameters except for one we want to differentiate params.keep(['red.reflectance.value']) # Print the current value and keep a backup copy param_ref = params['red.reflectance.value'].torch() print(param_ref) # Render a reference image (no derivatives used yet) image_ref = render_torch(scene, spp=8) crop_size = scene.sensors()[0].film().crop_size()
img_path = sys.argv[1] outpath = os.path.join('outputs/ref', img_path) os.makedirs(outpath, exist_ok=True) out_config(outpath, render_config) filename = 'xml/{}'.format(render_config['scene']) cam_origin = [0, 0, 100] cam_target = [0, 0, 40] xml_util.set_perspective(filepath=filename, origin=cam_origin, target=cam_target, spp=128) # Add the scene directory to the FileResolver's search path Thread.thread().file_resolver().append(os.path.dirname(filename)) # Load the actual scene scene = load_file(filename) # ========== Start time watch ========== start_time = time.time() # Call the scene's integrator to render the loaded scene scene.integrator().render(scene, scene.sensors()[0]) end_time = time.time() elapsed_time = end_time - start_time print_time(elapsed_time) # ========== End time watch ==========
def test_render(variants_all, scene_fname): from mitsuba.core import Bitmap, Struct, Thread, set_thread_count scene_dir = dirname(scene_fname) if os.path.split(scene_dir)[1] in EXCLUDE_FOLDERS: pytest.skip(f"Skip rendering scene {scene_fname}") Thread.thread().file_resolver().prepend(scene_dir) ref_fname, ref_var_fname = get_ref_fname(scene_fname) if not (exists(ref_fname) and exists(ref_var_fname)): pytest.skip("Non-existent reference data.") ref_bmp = read_rgb_bmp_to_xyz(ref_fname) ref_img = np.array(ref_bmp, copy=False) ref_var_bmp = read_rgb_bmp_to_xyz(ref_var_fname) ref_var_img = np.array(ref_var_bmp, copy=False) significance_level = 0.01 # Compute spp budget sample_budget = int(2e6) pixel_count = ek.hprod(ref_bmp.size()) spp = sample_budget // pixel_count # Load and render scene = mitsuba.core.xml.load_file(scene_fname, spp=spp) scene.integrator().render(scene, scene.sensors()[0]) # Compute variance image bmp = scene.sensors()[0].film().bitmap(raw=False) img, var_img = bitmap_extract(bmp) # Compute Z-test p-value p_value = z_test(img, spp, ref_img, ref_var_img) # Apply the Sidak correction term, since we'll be conducting multiple independent # hypothesis tests. This accounts for the fact that the probability of a failure # increases quickly when several hypothesis tests are run in sequence. alpha = 1.0 - (1.0 - significance_level) ** (1.0 / pixel_count) success = (p_value > alpha) if (np.count_nonzero(success) / 3) >= (0.9975 * pixel_count): print('Accepted the null hypothesis (min(p-value) = %f, significance level = %f)' % (np.min(p_value), alpha)) else: print('Reject the null hypothesis (min(p-value) = %f, significance level = %f)' % (np.min(p_value), alpha)) output_dir = join(scene_dir, 'error_output') if not exists(output_dir): os.makedirs(output_dir) output_prefix = join(output_dir, splitext( basename(scene_fname))[0] + '_' + mitsuba.variant()) img_rgb_bmp = xyz_to_rgb_bmp(img) fname = output_prefix + '_img.exr' img_rgb_bmp.write(fname) print('Saved rendered image to: ' + fname) var_fname = output_prefix + '_var.exr' xyz_to_rgb_bmp(var_img).write(var_fname) print('Saved variance image to: ' + var_fname) err_fname = output_prefix + '_error.exr' err_img = 0.02 * np.array(img_rgb_bmp) err_img[~success] = 1.0 err_bmp = Bitmap(err_img).write(err_fname) print('Saved error image to: ' + err_fname) pvalue_fname = output_prefix + '_pvalue.exr' xyz_to_rgb_bmp(p_value).write(pvalue_fname) print('Saved error image to: ' + pvalue_fname) assert False