Exemplo n.º 1
0
nvisii.set_dome_light_sky(sun_position=(10, 10, 1), saturation=2)
nvisii.set_dome_light_exposure(1)

# # # # # # # # # # # # # # # # # # # # # # # # #

# This function loads a signle obj mesh. It ignores
# the associated .mtl file
mesh = nvisii.mesh.create_from_file("obj", "./content/dragon/dragon.obj")

obj_entity = nvisii.entity.create(
    name="obj_entity",
    mesh=mesh,
    transform=nvisii.transform.create("obj_entity"),
    material=nvisii.material.create("obj_entity"))

# lets set the obj_entity up
obj_entity.get_transform().set_rotation((0.7071, 0, 0, 0.7071))
obj_entity.get_material().set_base_color((0.9, 0.12, 0.08))
obj_entity.get_material().set_roughness(0.7)
obj_entity.get_material().set_specular(1)
obj_entity.get_material().set_sheen(1)

# # # # # # # # # # # # # # # # # # # # # # # # #

nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=opt.out)

# let's clean up GPU resources
nvisii.deinitialize()
Exemplo n.º 2
0
tex = nvisii.texture.create_from_file("dome",
                                      "./content/teatro_massimo_2k.hdr")
nvisii.set_dome_light_texture(tex, enable_cdf=True)
nvisii.set_dome_light_intensity(0.8)

nvisii.set_direct_lighting_clamp(10.0)
nvisii.set_indirect_lighting_clamp(10.0)
nvisii.set_max_bounce_depth(0, 0)
nvisii.sample_pixel_area((.5, .5), (.5, .5))

# # # # # # # # # # # # # # # # # # # # # # # # #
# First, let's render out the scene with motion blur to understand
# how the object is moving
nvisii.sample_time_interval((0.0, 1.0))
nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=f"{opt.outf}/motion_blur.png")


def save_image(data, name):
    img = Image.fromarray(
        np.clip((np.abs(data)**(1.0 / 2.2)) * 255, 0,
                255).astype(np.uint8)).transpose(PIL.Image.FLIP_TOP_BOTTOM)
    img.save(name)


# Now let's render out the where the object is at time = 0 and time = 1
nvisii.sample_time_interval((0.0, 0.0))  # only sample at t = 0
t0_array = nvisii.render(width=opt.width,
                         height=opt.height,
                         samples_per_pixel=opt.spp,
            nvisii.set_cursor_mode("NORMAL")
            rotate_camera = False

        if rotate_camera:
            rot.x -= (cursor[0] - cursor[2]) * 0.001
            rot.y -= (cursor[1] - cursor[3]) * 0.001
            init_rot = nvisii.angleAxis(nvisii.pi() * .5, (1, 0, 0))
            yaw = nvisii.angleAxis(rot.x, (0, 1, 0))
            pitch = nvisii.angleAxis(rot.y, (1, 0, 0))
            camera.get_transform().set_rotation(init_rot * yaw * pitch)

        # change speed movement
        if nvisii.is_button_pressed("UP"):
            speed_camera *= 0.5
            print('decrease speed camera', speed_camera)
        if nvisii.is_button_pressed("DOWN"):
            speed_camera /= 0.5
            print('increase speed camera', speed_camera)

    nvisii.register_pre_render_callback(interact)
    import time
    while not nvisii.should_window_close():
        time.sleep(.1)

nvisii.render_to_file(width=int(opt.width),
                      height=int(opt.height),
                      samples_per_pixel=int(opt.spp),
                      file_path=f"{opt.out}")

# let's clean up the GPU
nvisii.deinitialize()
Exemplo n.º 4
0
teapot.get_transform().set_position((1, 0, 0.7))
teapot.get_transform().set_scale((0.005, 0.005, 0.005))
teapot.get_material().set_base_color((1.0, 1.0, 1.0))
teapot.get_material().set_roughness(0.0)
teapot.get_material().set_metallic(1.0)
teapot.get_volume().set_gradient_factor(100)
teapot.get_volume().set_absorption(1)
teapot.get_volume().set_scattering(0)
teapot.get_volume().set_scale(250)
teapot.get_transform().set_angle_axis(-nvisii.pi() * .5, (1, 0, 0))
teapot.get_transform().add_angle_axis(nvisii.pi() * 1.1, (0, 1, 0))

# Volumes can be lit up using light sources
light = nvisii.entity.create(name="light",
                             mesh=nvisii.mesh.create_sphere("light"),
                             transform=nvisii.transform.create("light"),
                             light=nvisii.light.create("light"))
light.get_transform().set_position((0, 1, 2.5))
light.get_transform().set_scale((.2, .2, .2))
light.get_light().set_temperature(4000)
light.get_light().set_intensity(20)

# Render out the image
print("rendering to", "22_volumes.png")
nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path="22_volumes.png")

nvisii.deinitialize()
Exemplo n.º 5
0
        obj_entity = nvisii.entity.get(ids['nvisii_id'])
        dpos = nvisii.vec3(_dpos[0],_dpos[1],_dpos[2])
        new_pos = nvisii.vec3(pos[0],pos[1],pos[2])
        obj_entity.get_transform().set_position(new_pos)

        # Use linear velocity to blur the object in motion.
        # We use frames per second here to internally convert velocity in meters / second into meters / frame.
        # The "mix" parameter smooths out the motion blur temporally, reducing flickering from linear motion blur
        obj_entity.get_transform().set_linear_velocity(dpos, frames_per_second, mix = .7)

        # nvisii quat expects w as the first argument
        new_rot = nvisii.quat(rot[3], rot[0], rot[1], rot[2])
        drot = nvisii.vec3(_drot[0],_drot[1],_drot[2])
        obj_entity.get_transform().set_rotation(new_rot)
        
        # Use angular velocity to blur the object in motion. Same concepts as above, but for 
        # angular velocity instead of scalar.
        obj_entity.get_transform().set_angular_velocity(nvisii.quat(1.0, drot), frames_per_second, mix = .7)

    print(f'rendering frame {str(i).zfill(5)}/{str(opt.nb_frames).zfill(5)}')
    nvisii.render_to_file(
        width=int(opt.width), 
        height=int(opt.height), 
        samples_per_pixel=int(opt.spp),
        file_path=f"{opt.outf}/{str(i).zfill(5)}.png"
    )

p.disconnect()
nvisii.deinitialize()

subprocess.call(['ffmpeg', '-y', '-framerate', '30', '-i', r"%05d.png",  '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', '../pybullet_motion_blur.mp4'], cwd=os.path.realpath(opt.outf))
Exemplo n.º 6
0
 def render_to_file(self, img_file):
     nvisii.render_to_file(width=self.width,
                           height=self.height,
                           samples_per_pixel=self.spp,
                           file_path=img_file)
Exemplo n.º 7
0
mesh1.get_transform().set_angular_velocity(nvisii.angleAxis(0.5, (0, 0, 1)))
nvisii.sample_time_interval((1, 1))
nvisii.render_data_to_file(width=opt.width,
                           height=opt.height,
                           start_frame=0,
                           frame_count=1,
                           bounce=int(0),
                           options="diffuse_motion_vectors",
                           file_path=f"{opt.outf}/diffuse_motion_vectors.exr")

# for the final image, sample the entire pixel area to anti-alias the result
nvisii.sample_pixel_area(x_sample_interval=(0.0, 1.0),
                         y_sample_interval=(0.0, 1.0))

nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=f"{opt.outf}/img.png")

nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=f"{opt.outf}/img.exr")

nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path=f"{opt.outf}/img.hdr")

# let's clean up the GPU
nvisii.deinitialize()
Exemplo n.º 8
0
def interact():
    global prev_window_size
    global speed_camera
    global cursor
    global init_rot
    global rot
    global i

    window_size = nv.vec2(nv.get_window_size().x, nv.get_window_size().y)
    if (nv.length(window_size - prev_window_size) > 0):
        camera.get_camera().set_fov(.8, window_size.x / float(window_size.y))
    prev_window_size = window_size

    # nvisii camera matrix
    cam_matrix = camera.get_transform().get_local_to_world_matrix()
    dt = nv.vec4(0, 0, 0, 0)

    # translation
    if nv.is_button_held("W"): dt[2] = -speed_camera
    if nv.is_button_held("S"): dt[2] = speed_camera
    if nv.is_button_held("A"): dt[0] = -speed_camera
    if nv.is_button_held("D"): dt[0] = speed_camera
    if nv.is_button_held("Q"): dt[1] = -speed_camera
    if nv.is_button_held("E"): dt[1] = speed_camera

    # control the camera
    if nv.length(dt) > 0.0:
        w_dt = cam_matrix * dt
        camera.get_transform().add_position(nv.vec3(w_dt))

    # camera rotation
    cursor[2] = cursor[0]
    cursor[3] = cursor[1]
    cursor[0] = nv.get_cursor_pos().x
    cursor[1] = nv.get_cursor_pos().y
    if nv.is_button_held("MOUSE_LEFT"):
        rotate_camera = True
    else:
        rotate_camera = False

    if rotate_camera:
        rot.x -= (cursor[0] - cursor[2]) * 0.001
        rot.y -= (cursor[1] - cursor[3]) * 0.001
        # init_rot = nv.angleAxis(nv.pi() * .5, (1,0,0))
        yaw = nv.angleAxis(rot.x, (0, 1, 0))
        pitch = nv.angleAxis(rot.y, (1, 0, 0))
        camera.get_transform().set_rotation(init_rot * yaw * pitch)

    # change speed movement
    if nv.is_button_pressed("UP"):
        speed_camera *= 0.5
        print('decrease speed camera', speed_camera)

    if nv.is_button_pressed("DOWN"):
        speed_camera /= 0.5
        print('increase speed camera', speed_camera)

    # Render out an image
    if nv.is_button_pressed("SPACE"):
        i = i + 1
        nv.render_to_file(nv.get_window_size().x,
                          nv.get_window_size().y, 256,
                          str(i) + ".png")
Exemplo n.º 9
0
        c = [0,0,0]
        if magnitude > 0.000001:
            angle=py_ang(angle_vector)
            if use_magnitude:
                c = colorsys.hsv_to_rgb(angle/360,1,magnitude)
            else:
                c = colorsys.hsv_to_rgb(angle/360,1,1)
        # for i_c in range(3):
        image[i,j,0:3] = c
    return image



nvisii.render_to_file(
    width=int(opt.width), 
    height=int(opt.height), 
    samples_per_pixel=int(opt.spp),
    file_path=opt.outf + "20_frame1.png"
)

obj1.get_transform().set_position(obj1.get_transform().get_position(),previous=True)
obj1.get_transform().add_position(nvisii.vec3(0,0.5,0))

obj2.get_transform().set_position(obj2.get_transform().get_position(),previous=True)
obj2.get_transform().add_position(nvisii.vec3(0,0,0.5))

obj3.get_transform().set_rotation(obj3.get_transform().get_rotation(),previous=True)
obj3.get_transform().add_rotation(nvisii.quat(0,-1,0,0))

motion_vectors_array = nvisii.render_data(
    width=int(opt.width), 
    height=int(opt.height), 
Exemplo n.º 10
0
mat = floor.get_material()
# mat = nvisii.material.get("material_floor") # <- this also works

# Mirrors are smooth and "metallic".
mat.set_base_color((0.19, 0.16, 0.19))
mat.set_metallic(1)
mat.set_roughness(0)

# Make the floor large by scaling it
trans = floor.get_transform()
trans.set_scale((5, 5, 1))

# Let's also add a sphere
sphere = nvisii.entity.create(name="sphere",
                              mesh=nvisii.mesh.create_sphere("sphere"),
                              transform=nvisii.transform.create("sphere"),
                              material=nvisii.material.create("sphere"))
sphere.get_transform().set_position((0, 0, 0.41))
sphere.get_transform().set_scale((0.4, 0.4, 0.4))
sphere.get_material().set_base_color((0.1, 0.9, 0.08))
sphere.get_material().set_roughness(0.7)

# Now that we have a simple scene, let's render it
print("rendering to", "01_simple_scene.png")
nvisii.render_to_file(width=opt.width,
                      height=opt.height,
                      samples_per_pixel=opt.spp,
                      file_path="01_simple_scene.png")

nvisii.deinitialize()