Ejemplo n.º 1
0
def test05_xml_split(variant_scalar_rgb):
    from mitsuba.core import xml, Point3f
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core import Thread
    import numpy as np
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict05/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        'type': 'scene',
        'bsdf1': {
            'type': 'diffuse'
        },
        'envmap': {
            'type': 'envmap',
            'filename': 'resources/data/common/textures/museum.exr'
        },
        'shape': {
            'type': 'sphere',
            'bsdf': {
                'type': 'ref',
                'id': 'bsdf1'
            }
        }
    }
    dict_to_xml(scene_dict, filepath)
    s1 = xml.load_file(filepath)
    dict_to_xml(scene_dict, filepath, split_files=True)
    s2 = xml.load_file(filepath)
    assert str(s1) == str(s2)
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 2
0
def test10_xml_rgb(variants_scalar_all):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core.xml import load_dict, load_file
    from mitsuba.core import Thread, ScalarColor3f
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict10/dict.xml')
    fr.append(os.path.dirname(filepath))

    d1 = {
        'type': 'scene',
        'point': {
            "type": "point",
            "intensity": {
                "type": "rgb",
                "value": ScalarColor3f(0.5, 0.2, 0.5)
            }
        }
    }

    d2 = {
        'type': 'scene',
        'point': {
            "type": "point",
            "intensity": {
                "type": "rgb",
                "value": [0.5, 0.2, 0.5]  # list -> ScalarColor3f
            }
        }
    }

    dict_to_xml(d1, filepath)
    s1 = load_file(filepath)
    dict_to_xml(d2, filepath)
    s2 = load_file(filepath)
    s3 = load_dict(d1)
    assert str(s1) == str(s2)
    assert str(s1) == str(s3)

    d1 = {
        'type': 'scene',
        'point': {
            "type": "point",
            "intensity": {
                "type": "rgb",
                "value": 0.5  # float -> ScalarColor3f
            }
        }
    }

    dict_to_xml(d1, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(d1)
    assert str(s1) == str(s2)
    rmtree(os.path.split(filepath)[0])
def test13_xml_multiple_defaults(variant_scalar_rgb):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core.xml import load_file
    from mitsuba.core import Thread
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict13/dict.xml')
    fr.append(os.path.dirname(filepath))

    scene_dict = {
        'type': 'scene',
        'cam1': {
            'type': 'perspective',
            'sampler': {
                'type': 'independent',
                'sample_count': 150
            }
        },
        'cam2': {
            'type': 'perspective',
            'sampler': {
                'type': 'independent',
                'sample_count': 250
            }
        }
    }
    dict_to_xml(scene_dict, filepath)
    scene = load_file(filepath, spp=45)

    assert scene.sensors()[0].sampler().sample_count() == scene.sensors(
    )[1].sampler().sample_count()
    assert scene.sensors()[1].sampler().sample_count() == 45

    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 4
0
def test09_xml_decompose_transform(variant_scalar_rgb):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core.xml import load_dict, load_file
    from mitsuba.core import Transform4f, Vector3f, Thread
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict09/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        'type': 'scene',
        'cam': {
            'type':
            'perspective',
            'fov_axis':
            'x',
            'fov':
            35,
            'to_world':
            Transform4f.look_at(Vector3f(15, 42.3,
                                         25), Vector3f(1.0, 0.0, 0.5),
                                Vector3f(1.0, 0.0, 0.0))
        }
    }
    dict_to_xml(scene_dict, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(scene_dict)
    vects = [Vector3f(0, 0, 1), Vector3f(0, 1, 0), Vector3f(1, 0, 0)]
    tr1 = s1.sensors()[0].world_transform().eval(0)
    tr2 = s2.sensors()[0].world_transform().eval(0)
    for vec in vects:
        assert tr1.transform_point(vec) == tr2.transform_point(vec)
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 5
0
def render(argv):
	# Set the desired mitsuba variant

	filename = 'scene.xml'

	# Add the scene directory to the FileResolver's search path
	Thread.thread().file_resolver().append(os.path.dirname(filename))

	# Load the actual scene
	scene = load_file(filename, pos_x = argv[0], pos_y = argv[1], pos_z = argv[2])

	# Call the scene's integrator to render the loaded scene
	scene.integrator().render(scene, scene.sensors()[0])

	# After rendering, the rendered data is stored in the film
	film = scene.sensors()[0].film()

	# Write out rendering as high dynamic range OpenEXR file
	film.set_destination_file('output.exr')
	film.develop()

	# Write out a tonemapped JPG of the same rendering
	bmp = film.bitmap(raw=True)
	bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8, srgb_gamma=True).write('output_{}.jpg'.format(argv[3]))

	# Get linear pixel values as a numpy array for further processing
	bmp_linear_rgb = bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, srgb_gamma=False)
	image_np = np.array(bmp_linear_rgb)
Ejemplo n.º 6
0
def compute_sh(obj_file, cam_pos, cam_lookat):
    light_radiance = 1.0

    register_integrator('auxintegrator', lambda props: AuxIntegrator(props))

    scene_template_file = './scene_template.xml'
    Thread.thread().file_resolver().append(
        os.path.dirname(scene_template_file))

    scene = load_file(scene_template_file, integrator='auxintegrator', fov="40", tx=cam_lookat[0], ty=cam_lookat[1], tz=cam_lookat[2], \
                        spp="100", width=200, height=200, obj_file=obj_file)

    # scene.integrator().light = load_string(LIGHT_TEMPLATE, lsx="1", lsy="1", lsz="1", lrx="0", lry="0", lrz="0", ltx="-1", lty="0", ltz="0", l=light_radiance)
    # scene.integrator().light_radiance = light_radiance

    scene.integrator().render(scene, scene.sensors()[0])
    film = scene.sensors()[0].film()
    film.set_destination_file('./render_output.exr')
    film.develop()

    sh_channels_list = []
    for i in range(0, 9):
        for c in ['r', 'g', 'b']:
            sh_channels_list.append('sh_%s_%d' % (c, i))

    f_sh = np.zeros((200, 200, 27), dtype=np.float)
    exrfile = pyexr.open('render_output.exr')

    for i, channel in enumerate(sh_channels_list):
        ch = exrfile.get(channel)
        f_sh[:, :, i:i + 1] += ch

    return f_sh
Ejemplo n.º 7
0
def test01_xml_save_plugin(variant_scalar_rgb):
    from mitsuba.core import xml
    from mitsuba.core import Thread
    from mitsuba.python.xml import dict_to_xml
    fr = Thread.thread().file_resolver()
    # Add the path to the mitsuba root folder, so that files are always saved in mitsuba/resources/...
    # This way we know where to look for the file in case the unit test fails
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict01/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        "type": "sphere",
        "center": [0, 0, -10],
        "radius": 10.0,
    }
    dict_to_xml(scene_dict, filepath)
    s1 = xml.load_dict({
        'type': 'scene',
        'sphere': {
            "type": "sphere",
            "center": [0, 0, -10],
            "radius": 10.0,
        }
    })
    s2 = xml.load_file(filepath)
    assert str(s1) == str(s2)
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 8
0
def test08_xml_defaults(variant_scalar_rgb):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core import Thread
    from mitsuba.core.xml import load_dict, load_file
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict08/dict.xml')
    fr.append(os.path.dirname(filepath))
    spp = 250
    resx = 1920
    resy = 1080
    scene_dict = {
        'type': 'scene',
        'cam': {
            'type': 'perspective',
            'fov_axis': 'x',
            'fov': 35,
            'sampler': {
                'type': 'independent',
                'sample_count': spp  # --> default
            },
            'film': {
                'type': 'hdrfilm',
                'width': resx,  # --> default
                'height': resy  # --> default
            }
        }
    }
    dict_to_xml(scene_dict, filepath)
    # Load a file using default values
    s1 = load_file(filepath)
    s2 = load_dict(scene_dict)
    assert str(s1.sensors()[0].film()) == str(s2.sensors()[0].film())
    assert str(s1.sensors()[0].sampler()) == str(s2.sensors()[0].sampler())
    # Set new parameters
    spp = 45
    resx = 2048
    resy = 485
    # Load a file with options for the rendering parameters
    s3 = load_file(filepath, spp=spp, resx=resx, resy=resy)
    scene_dict['cam']['sampler']['sample_count'] = spp
    scene_dict['cam']['film']['width'] = resx
    scene_dict['cam']['film']['height'] = resy
    s4 = load_dict(scene_dict)
    assert str(s3.sensors()[0].film()) == str(s4.sensors()[0].film())
    assert str(s3.sensors()[0].sampler()) == str(s4.sensors()[0].sampler())
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 9
0
def load_scene_from_params(train_img_list, fname, default_params):
    import mitsuba
    from mitsuba.core.xml import load_file

    loading_param_list, target_im_list = load_img_with_cfg(
        train_img_list, default_params)
    scene_list = []
    for params in loading_param_list:
        scene = load_file(fname, **default_params, **params)
        scene_list.append(scene)
    return scene_list, target_im_list
def load_scene(filename, *args, **kwargs):
    """Prepares the file resolver and loads a Mitsuba scene from the given path."""
    fr = Thread.thread().file_resolver()
    here = os.path.dirname(__file__)
    fr.append(here)
    fr.append(os.path.join(here, filename))
    fr.append(os.path.dirname(filename))

    scene = load_file(filename, *args, **kwargs)
    assert scene is not None
    return scene
Ejemplo n.º 11
0
def test04_xml_point(variant_scalar_rgb):
    from mitsuba.core import xml, Point3f
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core import Thread
    import numpy as np
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict04/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        'type': 'scene',
        'light': {
            'type': 'point',
            'position': [0.0, 1.0, 2.0]
        }
    }
    dict_to_xml(scene_dict, filepath)
    s1 = xml.load_file(filepath)
    scene_dict = {
        'type': 'scene',
        'light': {
            'type': 'point',
            'position': Point3f(0, 1, 2)
        }
    }
    dict_to_xml(scene_dict, filepath)
    s2 = xml.load_file(filepath)
    scene_dict = {
        'type': 'scene',
        'light': {
            'type': 'point',
            'position': np.array([0, 1, 2])
        }
    }
    dict_to_xml(scene_dict, filepath)
    s3 = xml.load_file(filepath)

    assert str(s1) == str(s2)
    assert str(s1) == str(s3)
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 12
0
def test03_xml_references(variant_scalar_rgb):
    from mitsuba.core import xml
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core import Thread
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict03/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        'type': 'scene',
        'bsdf1': {
            'type': 'diffuse',
            'reflectance': {
                'type': 'rgb',
                'value': [1.0, 0.0, 0.0]
            }
        },
        'texture1': {
            'type': 'checkerboard'
        },
        'bsdf4': {
            'type': 'dielectric',
            'specular_reflectance': {
                'type': 'ref',
                'id': 'texture1'
            },
            'id': 'bsdf2'
        },
        'shape1': {
            'type': 'sphere',
            'bsdf': {
                'type': 'ref',
                'id': 'bsdf1'
            }
        },
        'shape2': {
            'type': 'cylinder',
            'bsdf': {
                'type': 'ref',
                'id': 'bsdf2'
            }
        }
    }

    s1 = xml.load_dict(scene_dict)
    dict_to_xml(scene_dict, filepath)
    s2 = xml.load_file(filepath)

    assert str(s1.shapes()[0].bsdf()) == str(s2.shapes()[0].bsdf())
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 13
0
def run_rendering(xml, save_name):
    scene = load_file(xml)
    sensor = scene.sensors()[0]
    scene.integrator().render(scene, sensor)
    film = sensor.film()

    result = film.bitmap(raw=False)
    result = np.array(result, copy=False).astype(np.float)
    image = np.stack([result[:, :, 2], result[:, :, 1], result[:, :, 0]],
                     axis=-1)
    image = np.array(
        Bitmap(image, Bitmap.PixelFormat.RGB).convert(Bitmap.PixelFormat.RGB,
                                                      Struct.Type.UInt8,
                                                      srgb_gamma=True))

    cv2.imwrite(dataset_output_dir + save_name, image)
    cv2.waitKey()
Ejemplo n.º 14
0
def main():
    # Register MeasuredBTF
    register_bsdf('measuredbtf', lambda props: MeasuredBTF(props))

    # Filename
    filename_src = args.input
    filename_dst = args.output

    # Load an XML file
    Thread.thread().file_resolver().append(os.path.dirname(filename_src))
    scene = load_file(filename_src)

    # Rendering
    scene.integrator().render(scene, scene.sensors()[0])

    # Save image
    film = scene.sensors()[0].film()
    bmp = film.bitmap(raw=True)
    bmp.convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8,
                srgb_gamma=True).write(filename_dst)
Ejemplo n.º 15
0
def run(prefix):
	img= cv2.imread(dataset_output_dir + prefix + 'mask_2x.png')
	img_i = cv2.bitwise_not(img)
	cv2.imwrite(output_dir + 'graycode_01.png', img_i)
	
	copyfile(dataset_output_dir + prefix + 'rho_2x.png', output_dir + 'graycode_02.png')
	
	tree = et.parse('img_gen.xml')
	root = tree.getroot()
	for i in range(1, 21):
			
		r = root.find('shape/emitter/texture/string')
		r.set('value', graycode_dir + 'graycode_'+ str(i) +'.png')
		r = root.findall('sensor/film/integer')
		[rr.set('value', '1024') for rr in r]
		tree.write('img_gen.xml')
		
		
		scene = load_file("img_gen.xml")
		sensor = scene.sensors()[0]
		scene.integrator().render(scene, sensor)
		film = sensor.film()
		
		result = film.bitmap(raw=False)
		result = np.array(result, copy=False).astype(np.float)
		image = np.stack([result[:, :, 2], result[:, :, 1], result[:, :, 0]], axis=-1)
		image = np.array(Bitmap(image, Bitmap.PixelFormat.RGB).convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8, srgb_gamma=True))
		
		#cv2.imshow("image", image)
		cv2.imwrite(output_dir + 'graycode_'+ str(f'{i+2:02d}') +'.png',image)
		cv2.waitKey()
	
	f_run()
	
	copyfile('flow_gen/flow/flow.flo', dataset_output_dir + prefix + 'flow_2x.flo')
	copyfile('flow_gen/flow/flow.png', dataset_output_dir + prefix + 'flow_2x.png')
    'mmaps_size': 30,  # The size of MMAPs side
    'object': 'sphere',  # The filename of transparent object
    'object_transform': 'translate 0, 0, 40, scale 2.5',
    'ior': 1.49,  # Index of refraction of transparent object
    'learning_rate': 1e-3,  # Learning rate
    'num_iteration': 1001,  # The number of iteration of optimization process
    'mmapsbsdf': True,
    # 'dielectric' : "ignore reflection"
}

outimg_dir = sys.argv[1]

# Load the Cornell Box
Thread.thread().file_resolver().append('xml')
xmlfilename = os.path.join('xml', render_config['scene_xml'])
scene = load_file(xmlfilename)
width, height = scene.sensors()[0].film().crop_size()

# Load a reference image (no derivatives used yet)
bitmap_ref = Bitmap(os.path.join('outputs', render_config['ref'])).convert(
    Bitmap.PixelFormat.RGB, Struct.Type.Float32, srgb_gamma=False)
image_ref = np.array(bitmap_ref).flatten()

# Find differentiable scene parameters
params = traverse(scene)
print(params)

opt_param_name = 'textured_lightsource.emitter.radiance.data'
# Make a backup copy
param_res = params['textured_lightsource.emitter.radiance.resolution']
param_ref = Float(params[opt_param_name])
Ejemplo n.º 17
0
def test11_xml_spectrum(variants_scalar_all):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core.xml import load_dict, load_file
    from mitsuba.core import Thread
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict11/dict.xml')
    fr.append(os.path.dirname(filepath))
    d1 = {
        'type': 'scene',
        'light': {
            "type": "point",
            "intensity": {
                "type": "spectrum",
                "value": [(400, 0.1), (500, 0.2), (600, 0.4), (700, 0.1)]
            }
        }
    }
    dict_to_xml(d1, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(d1)

    assert str(s1.emitters()[0]) == str(s2.emitters()[0])

    d2 = {
        'type': 'scene',
        'light': {
            "type": "point",
            "intensity": {
                "type": "spectrum",
                "value": 0.44
            }
        }
    }

    dict_to_xml(d2, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(d2)

    assert str(s1.emitters()[0]) == str(s2.emitters()[0])

    d3 = {
        'type': 'scene',
        'light': {
            "type": "point",
            "intensity": {
                "type": "spectrum",
                "value": [(400, 0.1), (500, 0.2), (300, 0.4)]
            }
        }
    }
    with pytest.raises(ValueError) as e:
        dict_to_xml(d3, filepath)
    e.match("Wavelengths must be sorted in strictly increasing order!")

    #wavelengths file
    d4 = {
        'type': 'scene',
        'light': {
            "type": "point",
            "intensity": {
                "type": "spectrum",
                "filename": os.path.join(mts_root,
                                         'resources/data/ior/Al.eta.spd')
            }
        }
    }

    dict_to_xml(d4, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(d4)

    assert str(s1.emitters()[0]) == str(s2.emitters()[0])

    d5 = {
        'type': 'scene',
        'light': {
            "type": "point",
            "intensity": {
                "type": "spectrum",
                "filename": os.path.join(mts_root,
                                         'resources/data/blabla/Pr.spd')
            }
        }
    }
    with pytest.raises(ValueError) as e:
        dict_to_xml(d5, filepath)
    e.match("File '%s' not found!" %
            os.path.abspath(d5['light']['intensity']['filename']))
    rmtree(os.path.split(filepath)[0])
Ejemplo n.º 18
0
    def get_scene(self, config):
        """
        Generate scene object from attributes
        """
        if (self.serialized is None):
            sys.exit("Please set serialized file path before generating scene")

        # Generate scene object
        if (config.mode is "sample"):
            scene = load_file(self.xml_path,
                              out_path=self.out_path,
                              coeff_range=config.coeff_range,
                              spp=self.spp,
                              seed=self.seed,
                              scale_m=self.scale_m,
                              sigma_t=self.sigmat,
                              albedo=self.albedo,
                              g=self.g,
                              eta=self.eta,
                              serialized=self.serialized,
                              mat=self.mat)

        elif (config.mode is "visual"):
            scene = load_file(self.xml_path,
                              spp=self.spp,
                              seed=self.seed,
                              scale_m=self.scale_m,
                              sigma_t=self.sigmat,
                              albedo=self.albedo,
                              g=self.g,
                              eta=self.eta,
                              serialized=self.serialized,
                              mat=self.mat)

        elif (config.mode is "test"):
            init_d = "0, 0, 1"
            scene = load_file(self.xml_path,
                              out_path=self.out_path,
                              init_d=init_d,
                              spp=self.spp,
                              seed=self.seed,
                              scale_m=self.scale_m,
                              sigma_t=self.sigmat,
                              albedo=self.albedo,
                              g=self.g,
                              eta=self.eta)

        elif (config.mode is "abs"):
            init_d = f"{self.init_d[self.cnt_d, 0]:.5f} {self.init_d[self.cnt_d, 1]:.5f} {self.init_d[self.cnt_d, 2]:.5f}"
            scene = load_file(self.xml_path,
                              out_path=self.out_path,
                              init_d=init_d,
                              spp=self.spp,
                              seed=self.seed,
                              scale_m=self.scale_m,
                              sigma_t=self.sigmat,
                              albedo=self.albedo,
                              g=self.g,
                              eta=self.eta)

            self.cnt_d += 1

        return scene
Ejemplo n.º 19
0
# Simple forward differentiation example: show how a perturbation
# of a single scene parameter changes the rendered image.

import enoki as ek
import mitsuba

mitsuba.set_variant('gpu_autodiff_rgb')

from mitsuba.core import Thread, Float
from mitsuba.core.xml import load_file
from mitsuba.python.util import traverse
from mitsuba.python.autodiff import render, write_bitmap

# Load the Cornell Box
Thread.thread().file_resolver().append('cbox')
scene = load_file(
    'C:/MyFile/code/ray tracing/misuba2/test/gpu_autodiff/cbox/cbox.xml')

# Find differentiable scene parameters
params = traverse(scene)

# Keep track of derivatives with respect to one parameter
param_0 = params['red.reflectance.value']
ek.set_requires_gradient(param_0)

# Differentiable simulation
image = render(scene, spp=4)

# Assign the gradient [1, 1, 1] to the 'red.reflectance.value' input
ek.set_gradient(param_0, [1, 1, 1], backward=False)

# Forward-propagate previously assigned gradients
Ejemplo n.º 20
0
from file_list_cfgs.flatgel import fname, img_list, project_name

cdir = osp.dirname(osp.abspath(__file__))

# Register any searchs path needed to load scene resources (optional)
dname = osp.dirname(fname)
Thread.thread().file_resolver().append(join(cdir, dname))

# load render params
render_params = load_render_cfg(join(cdir, "render_cfgs", "focussed.cfg"))
loading_param_list, target_im_list = load_img_with_cfg(img_list, render_params)

for scene_id, params in enumerate(loading_param_list):
  print(f"Rendering {scene_id}/{len(loading_param_list)} fn:{params['baseFn']}")

  # Load the scene from an XML file
  scene = load_file(fname, **render_params, **params)

  outFn = f"{params['baseFn']}_sim.exr"

  # create output dir
  create_folder(join("results", "flatgel"))
  outFn = join("results", "flatgel", outFn)
  
  scene.integrator().render(scene, scene.sensors()[0])

  # After rendering, the rendered data is stored in the film
  film = scene.sensors()[0].film()
  film.set_destination_file(outFn)
  film.develop()
Ejemplo n.º 21
0
# then replace one of the scene parameters and try to recover it using
# differentiable rendering and gradient-based optimization. (PyTorch)

import enoki as ek
import mitsuba
mitsuba.set_variant('gpu_autodiff_rgb')

from mitsuba.core import Thread, Vector3f
from mitsuba.core.xml import load_file
from mitsuba.python.util import traverse
from mitsuba.python.autodiff import render_torch, write_bitmap
import torch
import time

Thread.thread().file_resolver().append('cbox')
scene = load_file('cbox/cbox.xml')

# Find differentiable scene parameters
params = traverse(scene)

# Discard all parameters except for one we want to differentiate
params.keep(['red.reflectance.value'])

# Print the current value and keep a backup copy
param_ref = params['red.reflectance.value'].torch()
print(param_ref)

# Render a reference image (no derivatives used yet)
image_ref = render_torch(scene, spp=8)
crop_size = scene.sensors()[0].film().crop_size()
write_bitmap('out_ref.png', image_ref, crop_size)
## filepath of new mesh. Absolute path is preferred
new_mesh_fn = join("models", "meshes", "gelpad.obj")
##

bname = osp.basename(new_mesh_fn)
mesh_bname, ext = osp.splitext(bname)

if ext != '.obj':
	print("Currently only obj mesh file format is supported")
	exit(1)

mesh_dname = osp.dirname(new_mesh_fn)
Thread.thread().file_resolver().append(mesh_dname)

dname = osp.dirname(model_fn)
Thread.thread().file_resolver().append(dname)
render_params = load_render_cfg(render_params_fn)
params = {
	"hfName" : mesh_bname
}
scene = load_file(model_fn, **render_params, **params)
integrator = scene.integrator()
cam = scene.sensors()[0]

create_folder(_outdir)

integrator.render(scene, cam)
film = cam.film()
outFn = join(_outdir, "outfile.exr")
film.set_destination_file(outFn)
film.develop()
out_config(outpath, render_config)

filename = 'xml/{}'.format(render_config['scene'])

cam_origin = [0, 0, 100]
cam_target = [0, 0, 40]
xml_util.set_perspective(filepath=filename,
                         origin=cam_origin,
                         target=cam_target,
                         spp=128)

# Add the scene directory to the FileResolver's search path
Thread.thread().file_resolver().append(os.path.dirname(filename))

# Load the actual scene
scene = load_file(filename)

# ========== Start time watch ==========
start_time = time.time()

# Call the scene's integrator to render the loaded scene
scene.integrator().render(scene, scene.sensors()[0])

end_time = time.time()
elapsed_time = end_time - start_time
print_time(elapsed_time)
# ========== End time watch ==========

# After rendering, the rendered data is stored in the file
film = scene.sensors()[0].film()
Ejemplo n.º 24
0
# then replace one of the scene parameters and try to recover it using
# differentiable rendering and gradient-based optimization.

import enoki as ek
import mitsuba
mitsuba.set_variant('gpu_autodiff_rgb')

from mitsuba.core import Float, Thread
from mitsuba.core.xml import load_file
from mitsuba.python.util import traverse
from mitsuba.python.autodiff import render, write_bitmap, Adam
import time

# Load example scene
Thread.thread().file_resolver().append('bunny')
scene = load_file('bunny/bunny.xml')

# Find differentiable scene parameters
params = traverse(scene)

# Make a backup copy
param_res = params['my_envmap.resolution']
param_ref = Float(params['my_envmap.data'])

# Discard all parameters except for one we want to differentiate
params.keep(['my_envmap.data'])

# Render a reference image (no derivatives used yet)
image_ref = render(scene, spp=16)
crop_size = scene.sensors()[0].film().crop_size()
write_bitmap('out_ref.png', image_ref, crop_size)