Example #1
0
import nvisii

opt = lambda: None
opt.nb_objects = 50
opt.spp = 256
opt.width = 500
opt.height = 500
opt.out = "04_load_obj_file.png"

# # # # # # # # # # # # # # # # # # # # # # # # #
nvisii.initialize(headless=True, verbose=True)

nvisii.enable_denoiser()

camera = nvisii.entity.create(name="camera",
                              transform=nvisii.transform.create("camera"),
                              camera=nvisii.camera.create(
                                  name="camera",
                                  aspect=float(opt.width) / float(opt.height)))

camera.get_transform().look_at(
    at=(0, 0.1, 0.1),
    up=(0, 0, 1),
    eye=(1, 0.7, 0.2),
)
nvisii.set_camera_entity(camera)

nvisii.set_dome_light_sky(sun_position=(10, 10, 1), saturation=2)
nvisii.set_dome_light_exposure(1)

# # # # # # # # # # # # # # # # # # # # # # # # #
Example #2
0
# 00.helloworld.py
#
# This example will create a window where you
# should only see a gaussian noise pattern

import nvisii

nvisii.initialize()

while (not nvisii.should_window_close()):
    pass

nvisii.deinitialize()
Example #3
0
# This shows an example of several volumes. Some volume uses the NanoVDB format,
# others use a raw volume, and then some are generated procedurally.
# This scene tests how volumes can be lit up with light sources, and how they can
# overlap.

# Note, the API here is subject to change with future versions...

import nvisii
import numpy as np
opt = lambda: None
opt.spp = 512
opt.width = 1024
opt.height = 1024
opt.out = '22_volumes.png'

nvisii.initialize(headless=False, verbose=True, window_on_top=True)
nvisii.enable_denoiser()

# Configuring the denoiser here to not use albedo and normal guides, which are
# noisy for volumes
nvisii.configure_denoiser(False, False, True)

# Make a camera...
camera = nvisii.entity.create(name="camera")
camera.set_transform(nvisii.transform.create(name="camera_transform"))
camera.set_camera(
    nvisii.camera.create_from_fov(
        name="camera_camera",
        field_of_view=0.785398,  # note, this is in radians
        aspect=opt.width / float(opt.height)))
nvisii.set_camera_entity(camera)
Example #4
0
opt.height = 500 
opt.noise = False
opt.frame_freq = 8
opt.nb_frames = 300
opt.outf = '12_pybullet_motion_blur/'


# # # # # # # # # # # # # # # # # # # # # # # # #
if os.path.isdir(opt.outf):
    print(f'folder {opt.outf}/ exists')
else:
    os.mkdir(opt.outf)
    print(f'created folder {opt.outf}/')
# # # # # # # # # # # # # # # # # # # # # # # # #

nvisii.initialize(headless = False, lazy_updates=True)

if not opt.noise is True: 
    nvisii.enable_denoiser()

    # Since objects are under motion, we'll disable albedo / normal guides
    nvisii.configure_denoiser(
        use_albedo_guide=False, 
        use_normal_guide=False)


# Create a camera
camera = nvisii.entity.create(
    name = "camera",
    transform = nvisii.transform.create("camera"),
    camera = nvisii.camera.create_perspective_from_fov(
if os.path.isdir(f'output/{opt.outf}'):
    print(f'folder output/{opt.outf}/ exists')
else:
    os.mkdir(f'output/{opt.outf}')
    print(f'created folder output/{opt.outf}/')

opt.outf = f'output/{opt.outf}'

if not opt.seed is None:
    random.seed(int(opt.seed))

# # # # # # # # # # # # # # # # # # # # # # # # #

# # # # # # # # # # # # # # # # # # # # # # # # #

visii.initialize(headless=not opt.interactive)

if not opt.motionblur:
    visii.sample_time_interval((1, 1))

visii.sample_pixel_area(x_sample_interval=(.5, .5), y_sample_interval=(.5, .5))

# visii.set_max_bounce_depth(1)

visii.enable_denoiser()

camera = visii.entity.create(name="camera",
                             transform=visii.transform.create("camera"),
                             camera=visii.camera.create_perspective_from_fov(
                                 name="camera",
                                 field_of_view=0.785398,
    def __init__(self,
                 env,
                 img_path='images/',
                 width=500,
                 height=500,
                 spp=256,
                 use_noise=False,
                 debug_mode=False,
                 video_mode=False,
                 video_path='videos/',
                 video_name='robosuite_video_0.mp4',
                 video_fps=60,
                 verbose=1,
                 vision_modalities=None):
        """
        Initializes the nvisii wrapper. Wrapping any MuJoCo environment in this 
        wrapper will use the NVISII wrapper for rendering.

        Args:
            env (MujocoEnv instance): The environment to wrap.

            img_path (string): Path to images.

            width (int, optional): Width of the rendered image. Defaults to 500.

            height (int, optional): Height of the rendered image. Defaults to 500.

            spp (int, optional): Sample-per-pixel for each image. Larger spp will result
                                 in higher quality images but will take more time to render
                                 each image. Higher quality images typically use an spp of
                                 around 512.

            use_noise (bool, optional): Use noise or denoise. Deafults to false.

            debug_mode (bool, optional): Use debug mode for nvisii. Deafults to false.

            video_mode (bool, optional): By deafult, the NVISII wrapper saves the results as 
                                         images. If video_mode is set to true, a video is
                                         produced and will be stored in the directory defined
                                         by video_path. Defaults to false.
            
            video_path (string, optional): Path to store the video. Required if video_mode is 
                                           set to true. Defaults to 'videos/'.

            video_name (string, optional): Name for the file for the video. Defaults to
                                           'robosuite_video_0.mp4'.
            
            video_fps (int, optional): Frames per second for video. Defaults to 60.

            verbose (int, optional): If verbose is set to 1, the wrapper will print the image
                                     number for each image rendered. If verbose is set to 0, 
                                     nothing will be printed. Defaults to 1.

            vision_modalities (string, optional): Options to render image with different ground truths
                                              for NVISII. Options include "normal", "texture_coordinates",
                                              "position", "depth".
        """

        super().__init__(env, renderer_type="nvisii")

        self.env = env
        self.img_path = img_path
        self.width = width
        self.height = height
        self.spp = spp
        self.use_noise = use_noise

        self.video_mode = video_mode
        self.video_path = video_path
        self.video_name = video_name
        self.video_fps = video_fps

        self.verbose = verbose
        self.vision_modalities = vision_modalities

        self.img_cntr = 0

        env._setup_references()

        # enable interactive mode when debugging
        if debug_mode:
            nvisii.initialize_interactive()
        else:
            nvisii.initialize(headless=True)

        self.segmentation_type = self.env.camera_segmentations

        # add denoiser to nvisii if not using noise
        if not use_noise:
            nvisii.configure_denoiser()
            nvisii.enable_denoiser()
            nvisii.configure_denoiser(True, True, False)

        if not os.path.exists(img_path):
            os.makedirs(img_path)

        if video_mode:
            if not os.path.exists(video_path):
                os.makedirs(video_path)
            self.video = cv2.VideoWriter(video_path + video_name,
                                         cv2.VideoWriter_fourcc(*'MP4V'),
                                         video_fps, (self.width, self.height))
            print(f'video mode enabled')

        if vision_modalities is None and self.segmentation_type[0] == None:
            nvisii.sample_pixel_area(x_sample_interval=(0.0, 1.0),
                                     y_sample_interval=(0.0, 1.0))
        else:
            nvisii.sample_pixel_area(x_sample_interval=(0.5, 0.5),
                                     y_sample_interval=(0.5, 0.5))

        self._init_nvisii_components()
Example #7
0
import noise
import random
import numpy as np
import PIL
from PIL import Image
import math

opt = lambda: None
opt.spp = 1024
opt.width = 500
opt.height = 500
opt.noise = False

# # # # # # # # # # # # # # # # # # # # # # # # #

nvisii.initialize(headless=True, verbose=True, lazy_updates=True)

if not opt.noise is True:
    nvisii.enable_denoiser()

camera = nvisii.entity.create(name="camera",
                              transform=nvisii.transform.create("camera"),
                              camera=nvisii.camera.create(
                                  name="camera",
                                  aspect=float(opt.width) / float(opt.height)))

camera.get_transform().look_at(at=(0, 0, 0), up=(0, 0, 1), eye=(0, 1, 1))
nvisii.set_camera_entity(camera)

# # # # # # # # # # # # # # # # # # # # # # # # #
Example #8
0
opt.nb_objs = 10000 
opt.spp = 16 
opt.width = 1920
opt.height = 1080 
opt.out = '02_random_scene.png'

# nvisii uses sets of components to represent a scene. 
# We can increase the max component limit here if necessary.
# In this case, we'll need 16 meshes, a material for each object,
# and finally a transform for each object as well as one more for the camera.
nvisii.initialize(
    headless = True, 
    verbose = True, 
    lazy_updates = True,
    max_entities = opt.nb_objs + 1,
    max_transforms = opt.nb_objs + 1,  
    max_materials = opt.nb_objs,
    max_meshes = 16
    # these are also available
    # max_lights, max_textures, & max_cameras
)

# Turn on the denoiser
nvisii.enable_denoiser()

# Create a camera
camera = nvisii.entity.create(
    name = "camera",
    transform = nvisii.transform.create("camera"),
    camera = nvisii.camera.create(
        name = "camera",  
Example #9
0
import pybullet as p
import time
import math
import time

import nvisii as nv
nv.initialize(window_on_top=True)

camera = nv.entity.create(name="camera",
                          camera=nv.camera.create("camera"),
                          transform=nv.transform.create("camera"))
nv.set_camera_entity(camera)

nv.enable_denoiser()
nv.disable_dome_light_sampling()

nv.set_dome_light_color((0, 0, 0))
light = nv.entity.create(name="light",
                         transform=nv.transform.create("light"),
                         mesh=nv.mesh.create_plane("light", flip_z=True),
                         light=nv.light.create("light"))
light.get_transform().set_position((0, 0, 1.5))
light.get_transform().set_scale((.25, .25, .25))
light.get_light().set_temperature(4500)
light.get_light().set_exposure(2)

# Enable nvisii interactions
camera.get_transform().look_at(eye=(-2., 2., .4), at=(0, 0, .4), up=(0, 0, 1))
cursor = nv.vec4()
speed_camera = .20
rot = nv.vec2(0, 0)