예제 #1
0
mesh1.get_material().set_base_color((1.0, 0.0, 0.0))

mesh1.get_transform().set_position((-0.05, 0.0, 0), previous=True)
mesh1.get_transform().set_scale((0.1, 0.1, 0.1), previous=False)
mesh1.get_transform().set_scale((0.1, 0.1, 0.1), previous=True)
mesh1.get_transform().set_position((0.05, 0.0, 0), previous=False)

tex = nvisii.texture.create_from_file("dome",
                                      "./content/teatro_massimo_2k.hdr")
nvisii.set_dome_light_texture(tex, enable_cdf=True)
nvisii.set_dome_light_intensity(0.8)

nvisii.set_direct_lighting_clamp(10.0)
nvisii.set_indirect_lighting_clamp(10.0)
nvisii.set_max_bounce_depth(0, 0)
nvisii.sample_pixel_area((.5, .5), (.5, .5))

# # # # # # # # # # # # # # # # # # # # # # # # #
# First, let's render out the scene with motion blur to understand
# how the object is moving
nvisii.sample_time_interval((0.0, 1.0))
nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=f"{opt.outf}/motion_blur.png")


def save_image(data, name):
    img = Image.fromarray(
        np.clip((np.abs(data)**(1.0 / 2.2)) * 255, 0,
                255).astype(np.uint8)).transpose(PIL.Image.FLIP_TOP_BOTTOM)
예제 #2
0
    def __init__(self,
                 env,
                 img_path='images/',
                 width=500,
                 height=500,
                 spp=256,
                 use_noise=False,
                 debug_mode=False,
                 video_mode=False,
                 video_path='videos/',
                 video_name='robosuite_video_0.mp4',
                 video_fps=60,
                 verbose=1,
                 vision_modalities=None):
        """
        Initializes the nvisii wrapper. Wrapping any MuJoCo environment in this 
        wrapper will use the NVISII wrapper for rendering.

        Args:
            env (MujocoEnv instance): The environment to wrap.

            img_path (string): Path to images.

            width (int, optional): Width of the rendered image. Defaults to 500.

            height (int, optional): Height of the rendered image. Defaults to 500.

            spp (int, optional): Sample-per-pixel for each image. Larger spp will result
                                 in higher quality images but will take more time to render
                                 each image. Higher quality images typically use an spp of
                                 around 512.

            use_noise (bool, optional): Use noise or denoise. Deafults to false.

            debug_mode (bool, optional): Use debug mode for nvisii. Deafults to false.

            video_mode (bool, optional): By deafult, the NVISII wrapper saves the results as 
                                         images. If video_mode is set to true, a video is
                                         produced and will be stored in the directory defined
                                         by video_path. Defaults to false.
            
            video_path (string, optional): Path to store the video. Required if video_mode is 
                                           set to true. Defaults to 'videos/'.

            video_name (string, optional): Name for the file for the video. Defaults to
                                           'robosuite_video_0.mp4'.
            
            video_fps (int, optional): Frames per second for video. Defaults to 60.

            verbose (int, optional): If verbose is set to 1, the wrapper will print the image
                                     number for each image rendered. If verbose is set to 0, 
                                     nothing will be printed. Defaults to 1.

            vision_modalities (string, optional): Options to render image with different ground truths
                                              for NVISII. Options include "normal", "texture_coordinates",
                                              "position", "depth".
        """

        super().__init__(env, renderer_type="nvisii")

        self.env = env
        self.img_path = img_path
        self.width = width
        self.height = height
        self.spp = spp
        self.use_noise = use_noise

        self.video_mode = video_mode
        self.video_path = video_path
        self.video_name = video_name
        self.video_fps = video_fps

        self.verbose = verbose
        self.vision_modalities = vision_modalities

        self.img_cntr = 0

        env._setup_references()

        # enable interactive mode when debugging
        if debug_mode:
            nvisii.initialize_interactive()
        else:
            nvisii.initialize(headless=True)

        self.segmentation_type = self.env.camera_segmentations

        # add denoiser to nvisii if not using noise
        if not use_noise:
            nvisii.configure_denoiser()
            nvisii.enable_denoiser()
            nvisii.configure_denoiser(True, True, False)

        if not os.path.exists(img_path):
            os.makedirs(img_path)

        if video_mode:
            if not os.path.exists(video_path):
                os.makedirs(video_path)
            self.video = cv2.VideoWriter(video_path + video_name,
                                         cv2.VideoWriter_fourcc(*'MP4V'),
                                         video_fps, (self.width, self.height))
            print(f'video mode enabled')

        if vision_modalities is None and self.segmentation_type[0] == None:
            nvisii.sample_pixel_area(x_sample_interval=(0.0, 1.0),
                                     y_sample_interval=(0.0, 1.0))
        else:
            nvisii.sample_pixel_area(x_sample_interval=(0.5, 0.5),
                                     y_sample_interval=(0.5, 0.5))

        self._init_nvisii_components()
opt.outf = f'output/{opt.outf}'

if not opt.seed is None:
    random.seed(int(opt.seed))

# # # # # # # # # # # # # # # # # # # # # # # # #

# # # # # # # # # # # # # # # # # # # # # # # # #

visii.initialize(headless=not opt.interactive)

if not opt.motionblur:
    visii.sample_time_interval((1, 1))

visii.sample_pixel_area(x_sample_interval=(.5, .5), y_sample_interval=(.5, .5))

# visii.set_max_bounce_depth(1)

visii.enable_denoiser()

camera = visii.entity.create(name="camera",
                             transform=visii.transform.create("camera"),
                             camera=visii.camera.create_perspective_from_fov(
                                 name="camera",
                                 field_of_view=0.785398,
                                 aspect=float(opt.width) / float(opt.height)))

# data structure
random_camera_movement = {
    'at': visii.vec3(1, 0, 0),
예제 #4
0
mesh1.get_material().set_base_color_texture(brick_base_color)

mesh1.get_transform().set_position((0.0, 0.0, 0))
mesh1.get_transform().set_scale((0.12, 0.12, 0.12))

nvisii.set_dome_light_intensity(0)

# # # # # # # # # # # # # # # # # # # # # # # # #

# nvisii offers different ways to export meta data
# these are exported as raw arrays of numbers

# for many segmentations, it might be beneficial to only
# sample pixel centers instead of the whole pixel area.
# to do so, call this function
nvisii.sample_pixel_area(x_sample_interval=(.5, .5),
                         y_sample_interval=(.5, .5))

nvisii.render_data_to_file(width=opt.width,
                           height=opt.height,
                           start_frame=0,
                           frame_count=1,
                           bounce=int(0),
                           options="depth",
                           file_path=f"{opt.outf}/depth.exr")

nvisii.render_data_to_file(width=opt.width,
                           height=opt.height,
                           start_frame=0,
                           frame_count=1,
                           bounce=int(0),
                           options="normal",