def build_and_render(scene):
    lm.reset()
    lmscene.load(ft.env.scene_path, scene)
    lm.build('accel::nanort', {})
    lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})
    lm.render('renderer::raycast', {'output': lm.asset('film_output')})
    return np.copy(lm.buffer(lm.asset('film_output')))
def render_and_visualize():
    lm.build('accel::nanort', {})
    lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})
    lm.render(
        'renderer::raycast', {
            'output': lm.asset('film_output'),
            'visualize_normal': True,
            'bg_color': [1, 1, 1]
        })
    img = np.copy(lm.buffer(lm.asset('film_output')))
    f = plt.figure(figsize=(15, 15))
    ax = f.add_subplot(111)
    ax.imshow(np.clip(np.power(img, 1 / 2.2), 0, 1), origin='lower')
    plt.show()
Exemple #3
0
def breakfast_room(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [0.195303, 2.751188, 7.619322],
        'center': [0.139881, 2.681201, 6.623315],
        'up': [0,1,0],
        'vfov': 39.384486
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'breakfast_room/breakfast_room.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #4
0
def vokselia_spawn(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [1.525444, 0.983225, 0.731648],
        'center': [0.780862, 0.377208, 0.451751],
        'up': [0,1,0],
        'vfov': 49.857803
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'vokselia_spawn/vokselia_spawn.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #5
0
def bedroom(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [22.634083, -19.868534, 24.155586],
        'center': [22.027370, -19.116102, 23.899178],
        'up': [0,0,1],
        'vfov': 57.978600
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'bedroom/iscv2.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #6
0
def cornell_box_sphere(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [0,1,5],
        'center': [0,1,0],
        'up': [0,1,0],
        'vfov': 30
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'cornell_box/CornellBox-Sphere.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #7
0
def fireplace_room(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [5.101118, 1.083746, -2.756308],
        'center': [4.167568, 1.078925, -2.397892],
        'up': [0,1,0],
        'vfov': 43.001194
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'fireplace_room/fireplace_room.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #8
0
def powerplant(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [-200000, 200000, 200000],
        'center': [0, 0, 0],
        'up': [0,1,0],
        'vfov': 30
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'powerplant/powerplant.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #9
0
def cube(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [2, 2, 2],
        'center': [0, 0, 0],
        'up': [0,1,0],
        'vfov': 30
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'cube/cube.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #10
0
def conference(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [926.391602, 329.094299, 141.714554],
        'center': [925.628784, 328.858490, 141.112488],
        'up': [0,1,0],
        'vfov': 50.717898
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'conference/conference.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #11
0
def cloud(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [22.288721, 32.486145, 85.542023],
        'center': [22.218456, 32.084743, 84.628822],
        'up': [0,1,0],
        'vfov': 53.023230
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'cloud/altostratus00.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #12
0
def bunny(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [-0.191925, 2.961061, 4.171464],
        'center': [-0.185709, 2.478091, 3.295850],
        'up': [0,1,0],
        'vfov': 28.841546
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'bunny/bunny.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
Exemple #13
0
def buddha(scene_path):
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [0.027255, 0.941126, -2.217943],
        'center': [0.001645, 0.631268, -1.267505],
        'up': [0,1,0],
        'vfov': 19.297509
    })
    lm.asset('model_obj', 'model::wavefrontobj', {
        'path': os.path.join(scene_path, 'buddha/buddha.obj')
    })
    lm.primitive(lm.identity(), {
        'camera': lm.asset('camera_main')
    })
    lm.primitive(lm.identity(), {
        'model': lm.asset('model_obj')
    })
def scene_setup():
    lm.reset()
    lm.asset(
        'mesh_sphere', 'mesh::raw', {
            'ps': vs.flatten().tolist(),
            'ns': ns.flatten().tolist(),
            'ts': ts.flatten().tolist(),
            'fs': {
                'p': fs.flatten().tolist(),
                't': fs.flatten().tolist(),
                'n': fs.flatten().tolist()
            }
        })
    lm.asset('camera_main', 'camera::pinhole', {
        'position': [0, 0, 50],
        'center': [0, 0, 0],
        'up': [0, 1, 0],
        'vfov': 30
    })
    lm.asset('material_white', 'material::diffuse', {'Kd': [1, 1, 1]})
    lm.primitive(lm.identity(), {'camera': lm.asset('camera_main')})
Exemple #15
0
# ### Master process

import _lm_renderer_ao

lm.init()
lm.log.init('logger::jupyter', {})
lm.progress.init('progress::jupyter', {})
lm.dist.init('dist::master::default', {
    'port': 5000
})
lm.dist.printWorkerInfo()

lmscene.load(ft.env.scene_path, 'fireplace_room')
lm.build('accel::sahbvh', {})
lm.asset('film_output', 'film::bitmap', {'w': 320, 'h': 180})
lm.renderer('renderer::ao', {
    'output': lm.asset('film_output'),
    'spp': 3
})

lm.dist.allowWorkerConnection(False)
lm.dist.sync()
lm.render()
lm.dist.gatherFilm(lm.asset('film_output'))
lm.dist.allowWorkerConnection(True)

img = np.copy(lm.buffer(lm.asset('film_output')))
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1), origin='lower')
            'bg_color': [1, 1, 1]
        })
    img = np.copy(lm.buffer(lm.asset('film_output')))
    f = plt.figure(figsize=(15, 15))
    ax = f.add_subplot(111)
    ax.imshow(np.clip(np.power(img, 1 / 2.2), 0, 1), origin='lower')
    plt.show()


# ### Without instancing

scene_setup()
for y in np.linspace(-10, 10, 10):
    for x in np.linspace(-10, 10, 10):
        p = lm.primitiveNode({
            'mesh': lm.asset('mesh_sphere'),
            'material': lm.asset('material_white')
        })
        t = lm.transformNode(lm.translate(np.array([x, y, 0])))
        lm.addChild(t, p)
        lm.addChild(lm.rootNode(), t)

render_and_visualize()

# ### Single-level

# +
scene_setup()

# Instance group
g = lm.instanceGroupNode()
from _run_worker_process import *
if __name__ == '__main__':
    pool = mp.Pool(4, run_worker_process)

# ### Master process

lm.init()
lm.log.init('logger::jupyter', {})
lm.progress.init('progress::jupyter', {})
lm.dist.init('dist::master::default', {'port': 5000})
lm.dist.printWorkerInfo()

lmscene.load(ft.env.scene_path, 'fireplace_room')
lm.build('accel::sahbvh', {})
lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})
lm.renderer('renderer::raycast', {'output': lm.asset('film_output')})

lm.dist.allowWorkerConnection(False)
lm.dist.sync()
lm.render()
lm.dist.gatherFilm(lm.asset('film_output'))
lm.dist.allowWorkerConnection(True)

img = np.copy(lm.buffer(lm.asset('film_output')))
f = plt.figure(figsize=(15, 15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img, 1 / 2.2), 0, 1), origin='lower')
plt.show()

# Termination of the worker process is necessary for Windows
Exemple #18
0
lm.comp.loadPlugin(os.path.join(ft.env.bin_path, 'accel_nanort'))
lm.comp.loadPlugin(os.path.join(ft.env.bin_path, 'accel_embree'))

accels = [
    'accel::sahbvh', 'accel::nanort', 'accel::embree', 'accel::embreeinstanced'
]
scenes = lmscene.scenes_small()

build_time_df = pd.DataFrame(columns=accels, index=scenes)
render_time_df = pd.DataFrame(columns=accels, index=scenes)
for scene in scenes:
    lm.reset()
    lmscene.load(ft.env.scene_path, scene)
    for accel in accels:
        lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})

        def build():
            lm.build(accel, {})

        build_time = timeit.timeit(stmt=build, number=1)
        build_time_df[accel][scene] = build_time

        def render():
            lm.render('renderer::raycast', {'output': lm.asset('film_output')})

        render_time = timeit.timeit(stmt=render, number=1)
        render_time_df[accel][scene] = render_time

build_time_df
Exemple #19
0
 def render():
     lm.render('renderer::raycast', {'output': lm.asset('film_output')})
Exemple #20
0
import matplotlib.pyplot as plt
import lmfunctest as ft
import lightmetrica as lm
# %load_ext lightmetrica_jupyter

lm.init()
lm.log.init('logger::jupyter')
lm.progress.init('progress::jupyter')
lm.info()

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Similarly we define the assets. In addition to ``film``, we define ``camera``, ``mesh``, and ``material``. Although the types of assets are different, we can use consistent interface to define the assets. Here we prepare for a pinhole camera (``camera::pinhole``), a raw mesh (``mesh::raw``), and a diffuse material (``material::diffuse``) with the corrsponding parameters. Please refer to :ref:`component_ref` for the detailed description of the parameters.

# +
# Film for the rendered image
lm.asset('film1', 'film::bitmap', {'w': 1920, 'h': 1080})

# Pinhole camera
lm.asset('camera1', 'camera::pinhole', {
    'position': [0, 0, 5],
    'center': [0, 0, 0],
    'up': [0, 1, 0],
    'vfov': 30
})

# Load mesh with raw vertex data
lm.asset(
    'mesh1', 'mesh::raw', {
        'ps': [-1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1],
        'ns': [0, 0, 1],
        'ts': [0, 0, 1, 0, 1, 1, 0, 1],
    def pdf(self, geom, comp, wi, wo):
        return 0

    def eval(self, geom, comp, wi, wo):
        return np.zeros(3)


lm.init('user::default', {})
lm.parallel.init('parallel::openmp', {'numThreads': 1})
lm.log.init('logger::jupyter')
lm.progress.init('progress::jupyter')
lm.info()

# +
# Original material
lm.asset('mat_vis_normal', 'material::visualize_normal', {})

# Scene
lm.asset(
    'camera_main', 'camera::pinhole', {
        'position': [5.101118, 1.083746, -2.756308],
        'center': [4.167568, 1.078925, -2.397892],
        'up': [0, 1, 0],
        'vfov': 43.001194
    })
lm.asset(
    'model_obj', 'model::wavefrontobj', {
        'path':
        os.path.join(ft.env.scene_path, 'fireplace_room/fireplace_room.obj'),
        'base_material':
        lm.asset('mat_vis_normal')
Exemple #22
0
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmfunctest as ft
import lmscene
import lightmetrica as lm

# %load_ext lightmetrica_jupyter

lm.init('user::default', {
    'numThreads': -1,
    'logger': 'logger::jupyter'
})

lm.asset('camera_main', 'camera::pinhole', {
    'position': [5.101118, 1.083746, -2.756308],
    'center': [4.167568, 1.078925, -2.397892],
    'up': [0,1,0],
    'vfov': 43.001194
})
lm.asset('obj_base_mat', 'material::diffuse', {
    'Kd': [.8,.2,.2]
})
lm.asset('model_obj', 'model::wavefrontobj', {
    'path': os.path.join(ft.env.scene_path, 'fireplace_room/fireplace_room.obj'),
    'base_material': lm.asset('obj_base_mat')
})
lm.primitive(lm.identity(), {
    'camera': lm.asset('camera_main')
})
lm.primitive(lm.identity(), {
    'model': lm.asset('model_obj')
})
# %matplotlib inline
import matplotlib.pyplot as plt
import lmfunctest as ft
import lightmetrica as lm
# %load_ext lightmetrica_jupyter

lm.init()
lm.log.init('logger::jupyter')
lm.progress.init('progress::jupyter')
lm.info()

lm.comp.loadPlugin(os.path.join(ft.env.bin_path, 'functest_renderer_ao'))

# +
# Film for the rendered image
lm.asset('film1', 'film::bitmap', {'w': 1920, 'h': 1080})

# Pinhole camera
lm.asset(
    'camera1', 'camera::pinhole', {
        'position': [5.101118, 1.083746, -2.756308],
        'center': [4.167568, 1.078925, -2.397892],
        'up': [0, 1, 0],
        'vfov': 43.001194
    })

# OBJ model
lm.asset('obj1', 'model::wavefrontobj', {
    'path':
    os.path.join(ft.env.scene_path, 'fireplace_room/fireplace_room.obj')
})
Exemple #24
0
def build_and_render(accel):
    lm.build(accel, {})
    lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})
    lm.render('renderer::raycast', {'output': lm.asset('film_output')})
    return np.copy(lm.buffer(lm.asset('film_output')))
Exemple #25
0
        for i in range(w * h):
            process(i)
        lm.progress.end()


lm.init('user::default', {})

lm.parallel.init('parallel::openmp', {'numThreads': 1})

lm.log.init('logger::jupyter', {})

lm.progress.init('progress::jupyter', {})

lm.info()

# Scene
lm.asset('film_output', 'film::bitmap', {'w': 640, 'h': 360})
lmscene.load(ft.env.scene_path, 'fireplace_room')

lm.build('accel::sahbvh', {})

lm.render('renderer::ao', {'output': lm.asset('film_output'), 'spp': 5})

img = np.flip(np.copy(lm.buffer(lm.asset('film_output'))), axis=0)

f = plt.figure(figsize=(15, 15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img, 1 / 2.2), 0, 1))
plt.show()
Exemple #26
0
# No output in Jupyter notebook
lm.info()

# Initialize the logger with logger::jupyter
lm.log.init('logger::jupyter', {})

lm.info()

# ### Wrong asset name
#
# If you specify the wrong asset name, the framework will rause an exception.

try:
    # Wrong: film:bitmap
    lm.asset('film1', 'film:bitmap', {'w': 1920, 'h': 1080})
except Exception:
    traceback.print_exc()

# Correct: film::bitmap
lm.asset('film1', 'film::bitmap', {'w': 1920, 'h': 1080})

# ### Invalid parameter
#
# The framework will cause an exception if you try to create an asset with invalid parameters.
# The framework will *not* generate an error for the unnecessasry parameters.

try:
    # vfov is missing
    lm.asset('camera1', 'camera::pinhole', {
        'position': [0,0,5],
Exemple #27
0
# -

lm.log.init('logger::jupyter')
lm.progress.init('progress::jupyter')

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Once the framework has been initialized properly, you can get an splash message using :cpp:func:`lm::info()` function.
# -

lm.info()

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Next we define `assets` necessary to dispatch renderer, like materials, meshes, etc. In this example, we only need a `film` to which the renderer outputs the image. We can define assets by :cpp:func:`lm::asset` function. The first argument (``film``) specifies the name of the asset to be referenced. The second argument (``film::bitmap``) is given as the type of the assets formatted as ``<type of asset>::<implementation>`` where the last argument (``{...}``) specifies the parameters passed to the instance. ``film::bitmap`` takes two parameters ``w`` and ``h`` which respectively specify width and height of the film.
# -

lm.asset('film', 'film::bitmap', {'w': 1920, 'h': 1080})

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Now we are ready for rendering. :cpp:func:`lm::render` function dispatches rendering where we speficy type of the renderer and the parameters as arguments. ``renderer::blank`` is a toy renderer that only produces a blank image to the film specifies by ``film`` parameter where we can use predefined ID of the asset. Also, we can specify the background color by ``color`` parameter.
# -

lm.render('renderer::blank', {'output': lm.asset('film'), 'color': [1, 1, 1]})

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# After rendering, the generated image is kept in ``film``. :cpp:func:`lm::save` function outputs this film to the disk as an image.
# -

lm.save(lm.asset('film'), 'blank.png')

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# You can also visualize the film directly in Jupyter notebook. :cpp:func:`lm::buffer` gets the internal image data as numpy array which you can visualize using matplotlib. We rendered a white blank image thus the following image is as we expected.
Exemple #28
0
lm.init('user::default', {})

lm.parallel.init('parallel::openmp', {
    'numThreads': 1
})

lm.log.init('logger::jupyter', {})

lm.progress.init('progress::jupyter', {})

lm.info()

# Scene
lm.asset('film_output', 'film::bitmap', {
    'w': 640,
    'h': 360
})
lmscene.load(ft.env.scene_path, 'fireplace_room')

lm.build('accel::sahbvh', {})

lm.render('renderer::ao', {
    'output': lm.asset('film_output'),
    'spp': 5
})

img = np.flip(np.copy(lm.buffer(lm.asset('film_output'))), axis=0)

f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1))
Exemple #29
0
# %load_ext lightmetrica_jupyter

lm.init('user::default', {})
lm.parallel.init('parallel::openmp', {'numThreads': -1})
lm.log.init('logger::jupyter', {})
lm.info()

scenes = lmscene.scenes_small()

rmse_series = pd.Series(index=scenes)
for scene in scenes:
    print("Testing [scene='{}']".format(scene))

    lm.reset()

    lm.asset('film_output', 'film::bitmap', {'w': 1920, 'h': 1080})

    # Load scene and render
    lmscene.load(ft.env.scene_path, scene)
    lm.build('accel::sahbvh', {})
    lm.render('renderer::raycast', {'output': lm.asset('film_output')})
    img_orig = np.copy(lm.buffer(lm.asset('film_output')))

    # Serialize, reset, deserialize, and render
    lm.serialize('lm.serialized')
    lm.reset()
    lm.deserialize('lm.serialized')
    lm.render('renderer::raycast', {'output': lm.asset('film_output')})
    img_serial = np.copy(lm.buffer(lm.asset('film_output')))

    # Compare two images
lm.log.init('logger::jupyter')
lm.progress.init('progress::jupyter')

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Once the framework has been initialized properly, you can get an splash message using :cpp:func:`lm::info()` function.
# -

lm.info()

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Next we define `assets` necessary to dispatch renderer, like materials, meshes, etc. In this example, we only need a `film` to which the renderer outputs the image. We can define assets by :cpp:func:`lm::asset` function. The first argument (``film``) specifies the name of the asset to be referenced. The second argument (``film::bitmap``) is given as the type of the assets formatted as ``<type of asset>::<implementation>`` where the last argument (``{...}``) specifies the parameters passed to the instance. ``film::bitmap`` takes two parameters ``w`` and ``h`` which respectively specify width and height of the film.
# -

lm.asset('film', 'film::bitmap', {
    'w': 1920,
    'h': 1080
})

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# Now we are ready for rendering. :cpp:func:`lm::render` function dispatches rendering where we speficy type of the renderer and the parameters as arguments. ``renderer::blank`` is a toy renderer that only produces a blank image to the film specifies by ``film`` parameter where we can use predefined ID of the asset. Also, we can specify the background color by ``color`` parameter.
# -

lm.render('renderer::blank', {
    'output': lm.asset('film'),
    'color': [1,1,1]
})

# + {"raw_mimetype": "text/restructuredtext", "active": ""}
# After rendering, the generated image is kept in ``film``. :cpp:func:`lm::save` function outputs this film to the disk as an image.
# -