def rotateCamera(value):
    value = value / 100.0
    camera.get_transform().look_at(
        at=(0, 0, 0.9),  # at position
        up=(0, 0, 1),  # up vector
        eye=(5 * math.cos(value * 2 * visii.pi()),
             5 * math.sin(value * 2 * visii.pi()), 2)  # eye position
    )
    def interact():
        global speed_camera
        global cursor
        global rot

        # visii camera matrix 
        cam_matrix = camera.get_transform().get_local_to_world_matrix()
        dt = visii.vec4(0,0,0,0)

        # translation
        if visii.is_button_held("W"): dt[2] = -speed_camera
        if visii.is_button_held("S"): dt[2] =  speed_camera
        if visii.is_button_held("A"): dt[0] = -speed_camera
        if visii.is_button_held("D"): dt[0] =  speed_camera
        if visii.is_button_held("Q"): dt[1] = -speed_camera
        if visii.is_button_held("E"): dt[1] =  speed_camera 

        # control the camera
        if visii.length(dt) > 0.0:
            w_dt = cam_matrix * dt
            camera.get_transform().add_position(visii.vec3(w_dt))

        # camera rotation
        cursor[2] = cursor[0]
        cursor[3] = cursor[1]
        cursor[0] = visii.get_cursor_pos().x
        cursor[1] = visii.get_cursor_pos().y
        if visii.is_button_held("MOUSE_LEFT"):
            visii.set_cursor_mode("DISABLED")
            rotate_camera = True
        else:
            visii.set_cursor_mode("NORMAL")
            rotate_camera = False

        if rotate_camera:
            rot.x -= (cursor[0] - cursor[2]) * 0.001
            rot.y -= (cursor[1] - cursor[3]) * 0.001
            init_rot = visii.angleAxis(visii.pi() * .5, (1,0,0))
            yaw = visii.angleAxis(rot.x, (0,1,0))
            pitch = visii.angleAxis(rot.y, (1,0,0)) 
            camera.get_transform().set_rotation(init_rot * yaw * pitch)

        # change speed movement
        if visii.is_button_pressed("UP"):
            speed_camera *= 0.5 
            print('decrease speed camera', speed_camera)
        if visii.is_button_pressed("DOWN"):
            speed_camera /= 0.5
            print('increase speed camera', speed_camera)
Beispiel #3
0
obj3.get_material().set_sheen(1)

obj4.get_transform().set_position(visii.vec3(1.5, 0, 0))
obj4.get_transform().set_rotation(visii.quat(0.7071, 0.7071, 0, 0))
obj4.get_material().set_base_color(visii.vec3(.5, .5, .5))
obj4.get_material().set_roughness(0.7)
obj4.get_material().set_specular(1)
obj4.get_material().set_sheen(1)

# Use linear motion blur on the first object...
obj1.get_transform().set_linear_velocity(visii.vec3(.0, .0, .2))

# angular motion blur on the second object...
obj2.get_transform().set_angular_velocity(
    visii.quat(1,
               visii.pi() / 16,
               visii.pi() / 16,
               visii.pi() / 16))

# and scalar motion blur on the third object
obj3.get_transform().set_scalar_velocity(visii.vec3(-.5, -.5, -.5))

# # # # # # # # # # # # # # # # # # # # # # # # #

visii.render_to_png(width=int(opt.width),
                    height=int(opt.height),
                    samples_per_pixel=int(opt.spp),
                    image_path=f"{opt.out}")

# let's clean up the GPU
visii.cleanup()
opt.out = '15_camera_motion_car_blur.png'
opt.control = True

# # # # # # # # # # # # # # # # # # # # # # # # #
visii.initialize()
visii.set_dome_light_intensity(.8)
visii.resize_window(int(opt.width), int(opt.height))
# # # # # # # # # # # # # # # # # # # # # # # # #

# load the textures
dome = visii.texture.create_from_file("dome", "content/teatro_massimo_2k.hdr")

# we can add HDR images to act as dome
visii.set_dome_light_texture(dome)
visii.set_dome_light_rotation(
    visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1)))

car_speed = 0
car_speed_x = car_speed
car_speed_y = -2 * car_speed

camera_height = 80
# # # # # # # # # # # # # # # # # # # # # # # # #

if not opt.noise is True:
    visii.enable_denoiser()

camera = visii.entity.create(name="camera",
                             transform=visii.transform.create("camera"),
                             camera=visii.camera.create(
                                 name="camera",
Beispiel #5
0
def f(frame_ids):
    # headless - no window
    # verbose - output number of frames rendered, etc..
    visii.initialize(headless=True, verbose=False)

    # Use a neural network to denoise ray traced
    visii.enable_denoiser()

    # set up dome background
    negatives = list(glob.glob("negatives/*.jpg"))
    visii.set_dome_light_intensity(1)

    # create an entity that will serve as our camera.
    camera = visii.entity.create(name="camera")

    # To place the camera into our scene, we'll add a "transform" component.
    # (All visii objects have a "name" that can be used for easy lookup later.)
    camera.set_transform(visii.transform.create(name="camera_transform"))

    # To make our camera entity act like a "camera", we'll add a camera component
    camera.set_camera(
        visii.camera.create_from_fov(
            name="camera_camera",
            field_of_view=1.4,  # note, this is in radians
            aspect=opt.width / float(opt.height)))

    # Finally, we'll select this entity to be the current camera entity.
    # (visii can only use one camera at the time)
    visii.set_camera_entity(camera)

    # lets store the camera look at information so we can export it
    camera_struct_look_at = {
        'at': [0, 0, 0],
        'up': [0, 0, 1],
        'eye': [-1, 0, 0]
    }

    # Lets set the camera to look at an object.
    # We'll do this by editing the transform component.
    camera.get_transform().look_at(at=camera_struct_look_at['at'],
                                   up=camera_struct_look_at['up'],
                                   eye=camera_struct_look_at['eye'])

    # This function loads a mesh ignoring .mtl
    mesh = visii.mesh.create_from_file(opt.entity, opt.model)

    # creates visii entity using loaded mesh
    obj_entity = visii.entity.create(
        name=opt.entity + "_entity",
        mesh=mesh,
        transform=visii.transform.create(opt.entity + "_entity"),
        material=visii.material.create(opt.entity + "_entity"),
    )

    # obj_entity.get_light().set_intensity(0.05)

    # you can also set the light color manually
    # obj_entity.get_light().set_color((1,0,0))

    # Add texture to the material
    material = visii.material.get(opt.entity + "_entity")
    texture = visii.texture.create_from_file(opt.entity, "./models/Cutie.PNG")
    material.set_base_color_texture(texture)

    # Lets add the cuboid to the object we want to export
    add_cuboid(opt.entity + "_entity", opt.debug)

    # lets keep track of the entities we want to export
    entities_to_export = [opt.entity + "_entity"]

    # Loop where we change and render each frame
    for i in tqdm(frame_ids):
        # load a random negtive onto the dome
        negative = cv2.imread(random.choice(negatives))

        # Skip dark backgrounds (20/255)
        if np.mean(negative) < 20:
            continue

        # Fix lighting of background and make it small within the FOV
        background = make_background(negative)
        cv2.imwrite("test" + str(i) + ".png", background)
        dome = visii.texture.create_from_file("dome", "test" + str(i) + ".png")
        visii.set_dome_light_texture(dome)
        visii.set_dome_light_rotation(
            visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1)))

        stretch_factor = 2
        scale = [
            random.uniform(1, stretch_factor),  # width
            random.uniform(1, stretch_factor),  # length
            random.uniform(1, stretch_factor)  # height 
        ]
        obj_entity.get_transform().set_scale(scale)

        # create random rotation while making usre the entity is facing forward in each frame
        rot = [
            random.uniform(-10, 10),  # Roll
            random.uniform(-15, 15),  # Pitch
            random.uniform(-45, 45)  # Yaw
        ]
        q = Quaternion.from_euler(rot[0], rot[1], rot[2], degrees=True)

        position = [
            random.uniform(0, 4),  # X Depth
            random.uniform(-1, 1),  # Y 
            random.uniform(-1, 1)  # Z
        ]
        # Scale the position based on depth into image to make sure it remains in frame
        position[1] *= position[0]
        position[2] *= position[0]

        obj_entity.get_transform().set_position(tuple(position))

        obj_entity.get_transform().set_rotation((q.x, q.y, q.z, q.w))

        # use random to make 95 % probability the frame data goes into training and
        # 5% chance it goes in test folder
        folder = ''
        if random.randint(0, 100) < opt.test_percent:
            folder = opt.entity + '_test/'
        else:
            folder = opt.entity + '_training/'

        # Render the scene
        visii.render_to_file(width=opt.width,
                             height=opt.height,
                             samples_per_pixel=opt.spp,
                             file_path=opt.out + folder + opt.entity + str(i) +
                             '.png')

        # set up JSON
        export_to_ndds_file(filename=opt.out + folder + opt.entity + str(i) +
                            '.json',
                            obj_names=entities_to_export,
                            width=opt.width,
                            height=opt.height,
                            camera_struct=camera_struct_look_at)

        # remove current negative from the dome
        visii.clear_dome_light_texture()
        visii.texture.remove("dome")

        os.remove("test" + str(i) + ".png")

    visii.deinitialize()
Beispiel #6
0
obj3.get_material().set_base_color((0, 0, 1))
obj3.get_material().set_roughness(0.7)
obj3.get_material().set_specular(1)
obj3.get_material().set_sheen(1)

obj4.get_transform().set_position((1.5, 0, 0))
obj4.get_transform().set_rotation((0.7071, 0, 0, 0.7071))
obj4.get_material().set_base_color((.5, .5, .5))
obj4.get_material().set_roughness(0.7)
obj4.get_material().set_specular(1)
obj4.get_material().set_sheen(1)

# Use linear motion blur on the first object...
obj1.get_transform().set_linear_velocity((.0, .0, .2))

# angular motion blur on the second object...
obj2.get_transform().set_angular_velocity(
    (visii.pi() / 16, visii.pi() / 16, visii.pi() / 16, 1))

# and scalar motion blur on the third object
obj3.get_transform().set_scalar_velocity((-.5, -.5, -.5))

# # # # # # # # # # # # # # # # # # # # # # # # #

visii.render_to_png(width=int(opt.width),
                    height=int(opt.height),
                    samples_per_pixel=int(opt.spp),
                    image_path=f"{opt.out}")

# let's clean up the GPU
visii.deinitialize()
Beispiel #7
0
    camera = visii.camera.create(
        name = "camera", 
        aspect = float(opt.width)/float(opt.height)
    )
)

# Add some motion to the camera
angle = 0
camera.get_transform().look_at(
    at = (0,0,.1),
    up = (0,0,1),
    eye = (math.sin(angle), math.cos(angle),.2),
    previous = True
)

angle = -visii.pi() * .05
camera.get_transform().look_at(
    at = (0,0,.1),
    up = (0,0,1),
    eye = (math.sin(angle), math.cos(angle),.2),
    previous = False
)

visii.set_camera_entity(camera)

# # # # # # # # # # # # # # # # # # # # # # # # #

floor = visii.entity.create(
    name="floor",
    mesh = visii.mesh.create_plane("floor"),
    transform = visii.transform.create("floor"),
opt.noise = False
opt.out = '15_camera_motion_car_blur.png'
opt.control = True

# # # # # # # # # # # # # # # # # # # # # # # # #
visii.initialize()
visii.set_dome_light_intensity(.8)
visii.resize_window(int(opt.width), int(opt.height))
# # # # # # # # # # # # # # # # # # # # # # # # #

# load the textures
dome = visii.texture.create_from_file("dome", "content/teatro_massimo_2k.hdr")

# we can add HDR images to act as dome
visii.set_dome_light_texture(dome)
visii.set_dome_light_rotation(visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1)))

car_speed = 0
car_speed_x = car_speed
car_speed_y = -2 * car_speed

camera_height = 80
# # # # # # # # # # # # # # # # # # # # # # # # #

if not opt.noise is True: 
    visii.enable_denoiser()

camera = visii.entity.create(
    name = "camera",
    transform = visii.transform.create("camera"),
    camera = visii.camera.create(
    mesh=visii.mesh.create_plane("mesh_floor"),
    transform=visii.transform.create("transform_floor"),
    material=visii.material.create("material_floor"))
floor.get_material().set_base_color((0.19, 0.16, 0.19))
floor.get_material().set_metallic(0)
floor.get_material().set_roughness(1)
floor.get_transform().set_scale((5, 5, 1))

# Teapot
teapotahedron = visii.entity.create(
    name="teapotahedron",
    mesh=visii.mesh.create_teapotahedron("teapotahedron", segments=32),
    transform=visii.transform.create("teapotahedron"),
    material=visii.material.create("teapotahedron"))
teapotahedron.get_transform().set_rotation(
    visii.angleAxis(visii.pi() / 4.0, (0, 0, 1)))
teapotahedron.get_transform().set_position((0, 0, 0.41))
teapotahedron.get_transform().set_scale((0.4, 0.4, 0.4))
teapotahedron.get_material().set_base_color((1.0, 1.0, 1.0))
teapotahedron.get_material().set_roughness(0.0)

# Objects can be made to be "alpha transparent", which simulates little holes in the
# mesh that let light through. The smaller the alpha, the more little holes.
teapotahedron.get_material().set_alpha(0.5)

# Make a QT window to demonstrate the difference between alpha transparency and transmission
app = QApplication([])  # Start an application.
window = QWidget()  # Create a window.
layout = QVBoxLayout()  # Create a layout.