Esempio n. 1
0
        # Controls how much incluence that specular reflection occurs at head-on
        # reflections
        if y == 16 or y == 17:
            mat.set_roughness(0.0)
            mat.set_specular(x / 20.0)

        # Controls the probability of a ray passing through the material
        if y == 18 or y == 19:
            mat.set_alpha(x / 20.0)

# Create a camera
center = visii.get_scene_aabb_center()
camera = visii.entity.create(
    name="camera",
    transform=visii.transform.create(name="camera_transform"),
    camera=visii.camera.create(name="camera_camera",
                               aspect=opt.width / opt.height))
camera.get_transform().look_at(at=(center.x, 0, center.z),
                               up=(0, 0, 1),
                               eye=(center.x, -5, center.z))
visii.set_camera_entity(camera)

# Render out the final image
print("rendering to", opt.out)
visii.render_to_file(width=opt.width,
                     height=opt.height,
                     samples_per_pixel=opt.spp,
                     file_path=opt.out)

visii.deinitialize()
        # Use the hsv to apply color as a function of the angle
        c = [0, 0, 0]
        if magnitude > 0.000001:
            angle = py_ang(angle_vector)
            if use_magnitude:
                c = colorsys.hsv_to_rgb(angle / 360, 1, magnitude)
            else:
                c = colorsys.hsv_to_rgb(angle / 360, 1, 1)
        # for i_c in range(3):
        image[i, j, 0:3] = c
    return image


visii.render_to_file(width=int(opt.width),
                     height=int(opt.height),
                     samples_per_pixel=int(opt.spp),
                     file_path=f"20_frame1.png")

obj1.get_transform().set_position(obj1.get_transform().get_position(),
                                  previous=True)
obj1.get_transform().add_position(visii.vec3(0, 0.5, 0))

obj2.get_transform().set_position(obj2.get_transform().get_position(),
                                  previous=True)
obj2.get_transform().add_position(visii.vec3(0, 0, 0.5))

obj3.get_transform().set_rotation(obj3.get_transform().get_rotation(),
                                  previous=True)
obj3.get_transform().add_rotation(visii.quat(0, -1, 0, 0))

motion_vectors_array = visii.render_data(width=int(opt.width),
mesh1.get_transform().set_angular_velocity(visii.angleAxis(0.5, (0, 0, 1)))
visii.sample_time_interval((1, 1))
visii.render_data_to_file(width=opt.width,
                          height=opt.height,
                          start_frame=0,
                          frame_count=1,
                          bounce=int(0),
                          options="diffuse_motion_vectors",
                          file_path=f"{opt.outf}/diffuse_motion_vectors.exr")

# for the final image, sample the entire pixel area to anti-alias the result
visii.sample_pixel_area(x_sample_interval=(0.0, 1.0),
                        y_sample_interval=(0.0, 1.0))

visii.render_to_file(width=opt.width,
                     height=opt.height,
                     samples_per_pixel=opt.spp,
                     file_path=f"{opt.outf}/img.png")

visii.render_to_file(width=opt.width,
                     height=opt.height,
                     samples_per_pixel=opt.spp,
                     file_path=f"{opt.outf}/img.exr")

visii.render_to_file(width=opt.width,
                     height=opt.height,
                     samples_per_pixel=opt.spp,
                     file_path=f"{opt.outf}/img.hdr")

# let's clean up the GPU
visii.deinitialize()
normals = np.array(mesh.vertex_normals).flatten().tolist()
vertices = np.array(mesh.vertices).flatten().tolist()

mesh = visii.mesh.create_from_data('stl_mesh',
                                   positions=vertices,
                                   normals=normals)

# # # # # # # # # # # # # # # # # # # # # # # # #

obj_entity = visii.entity.create(name="obj_entity",
                                 mesh=mesh,
                                 transform=visii.transform.create("obj_entity",
                                                                  scale=(0.3,
                                                                         0.3,
                                                                         0.3)),
                                 material=visii.material.create("obj_entity"))

obj_entity.get_material().set_base_color((0.9, 0.12, 0.08))
obj_entity.get_material().set_roughness(0.7)
obj_entity.get_material().set_specular(1)
obj_entity.get_material().set_sheen(1)

# # # # # # # # # # # # # # # # # # # # # # # # #

visii.render_to_file(width=int(opt.width),
                     height=int(opt.height),
                     samples_per_pixel=int(opt.spp),
                     file_path=f"{opt.out}")

# let's clean up the GPU
visii.deinitialize()
Esempio n. 5
0
def f(frame_ids):
    # headless - no window
    # verbose - output number of frames rendered, etc..
    visii.initialize(headless=True, verbose=False)

    # Use a neural network to denoise ray traced
    visii.enable_denoiser()

    # set up dome background
    negatives = list(glob.glob("negatives/*.jpg"))
    visii.set_dome_light_intensity(1)

    # create an entity that will serve as our camera.
    camera = visii.entity.create(name="camera")

    # To place the camera into our scene, we'll add a "transform" component.
    # (All visii objects have a "name" that can be used for easy lookup later.)
    camera.set_transform(visii.transform.create(name="camera_transform"))

    # To make our camera entity act like a "camera", we'll add a camera component
    camera.set_camera(
        visii.camera.create_from_fov(
            name="camera_camera",
            field_of_view=1.4,  # note, this is in radians
            aspect=opt.width / float(opt.height)))

    # Finally, we'll select this entity to be the current camera entity.
    # (visii can only use one camera at the time)
    visii.set_camera_entity(camera)

    # lets store the camera look at information so we can export it
    camera_struct_look_at = {
        'at': [0, 0, 0],
        'up': [0, 0, 1],
        'eye': [-1, 0, 0]
    }

    # Lets set the camera to look at an object.
    # We'll do this by editing the transform component.
    camera.get_transform().look_at(at=camera_struct_look_at['at'],
                                   up=camera_struct_look_at['up'],
                                   eye=camera_struct_look_at['eye'])

    # This function loads a mesh ignoring .mtl
    mesh = visii.mesh.create_from_file(opt.entity, opt.model)

    # creates visii entity using loaded mesh
    obj_entity = visii.entity.create(
        name=opt.entity + "_entity",
        mesh=mesh,
        transform=visii.transform.create(opt.entity + "_entity"),
        material=visii.material.create(opt.entity + "_entity"),
    )

    # obj_entity.get_light().set_intensity(0.05)

    # you can also set the light color manually
    # obj_entity.get_light().set_color((1,0,0))

    # Add texture to the material
    material = visii.material.get(opt.entity + "_entity")
    texture = visii.texture.create_from_file(opt.entity, "./models/Cutie.PNG")
    material.set_base_color_texture(texture)

    # Lets add the cuboid to the object we want to export
    add_cuboid(opt.entity + "_entity", opt.debug)

    # lets keep track of the entities we want to export
    entities_to_export = [opt.entity + "_entity"]

    # Loop where we change and render each frame
    for i in tqdm(frame_ids):
        # load a random negtive onto the dome
        negative = cv2.imread(random.choice(negatives))

        # Skip dark backgrounds (20/255)
        if np.mean(negative) < 20:
            continue

        # Fix lighting of background and make it small within the FOV
        background = make_background(negative)
        cv2.imwrite("test" + str(i) + ".png", background)
        dome = visii.texture.create_from_file("dome", "test" + str(i) + ".png")
        visii.set_dome_light_texture(dome)
        visii.set_dome_light_rotation(
            visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1)))

        stretch_factor = 2
        scale = [
            random.uniform(1, stretch_factor),  # width
            random.uniform(1, stretch_factor),  # length
            random.uniform(1, stretch_factor)  # height 
        ]
        obj_entity.get_transform().set_scale(scale)

        # create random rotation while making usre the entity is facing forward in each frame
        rot = [
            random.uniform(-10, 10),  # Roll
            random.uniform(-15, 15),  # Pitch
            random.uniform(-45, 45)  # Yaw
        ]
        q = Quaternion.from_euler(rot[0], rot[1], rot[2], degrees=True)

        position = [
            random.uniform(0, 4),  # X Depth
            random.uniform(-1, 1),  # Y 
            random.uniform(-1, 1)  # Z
        ]
        # Scale the position based on depth into image to make sure it remains in frame
        position[1] *= position[0]
        position[2] *= position[0]

        obj_entity.get_transform().set_position(tuple(position))

        obj_entity.get_transform().set_rotation((q.x, q.y, q.z, q.w))

        # use random to make 95 % probability the frame data goes into training and
        # 5% chance it goes in test folder
        folder = ''
        if random.randint(0, 100) < opt.test_percent:
            folder = opt.entity + '_test/'
        else:
            folder = opt.entity + '_training/'

        # Render the scene
        visii.render_to_file(width=opt.width,
                             height=opt.height,
                             samples_per_pixel=opt.spp,
                             file_path=opt.out + folder + opt.entity + str(i) +
                             '.png')

        # set up JSON
        export_to_ndds_file(filename=opt.out + folder + opt.entity + str(i) +
                            '.json',
                            obj_names=entities_to_export,
                            width=opt.width,
                            height=opt.height,
                            camera_struct=camera_struct_look_at)

        # remove current negative from the dome
        visii.clear_dome_light_texture()
        visii.texture.remove("dome")

        os.remove("test" + str(i) + ".png")

    visii.deinitialize()
        # We use frames per second here to internally convert velocity in meters / second into meters / frame.
        # The "mix" parameter smooths out the motion blur temporally, reducing flickering from linear motion blur
        obj_entity.get_transform().set_linear_velocity(dpos,
                                                       frames_per_second,
                                                       mix=.7)

        # visii quat expects w as the first argument
        new_rot = visii.quat(rot[3], rot[0], rot[1], rot[2])
        drot = visii.vec3(_drot[0], _drot[1], _drot[2])
        obj_entity.get_transform().set_rotation(new_rot)

        # Use angular velocity to blur the object in motion. Same concepts as above, but for
        # angular velocity instead of scalar.
        obj_entity.get_transform().set_angular_velocity(visii.quat(1.0, drot),
                                                        frames_per_second,
                                                        mix=.7)

    print(f'rendering frame {str(i).zfill(5)}/{str(opt.nb_frames).zfill(5)}')
    visii.render_to_file(width=int(opt.width),
                         height=int(opt.height),
                         samples_per_pixel=int(opt.spp),
                         file_path=f"{opt.outf}/{str(i).zfill(5)}.png")

p.disconnect()
visii.deinitialize()

subprocess.call([
    'ffmpeg', '-y', '-framerate', '30', '-i', r"%05d.png", '-vcodec',
    'libx264', '-pix_fmt', 'yuv420p', '../pybullet_motion_blur.mp4'
],
                cwd=os.path.realpath(opt.outf))
Esempio n. 7
0
mat = floor.get_material()
# mat = visii.material.get("material_floor") # <- this also works

# Mirrors are smooth and "metallic".
mat.set_base_color((0.19, 0.16, 0.19))
mat.set_metallic(1)
mat.set_roughness(0)

# Make the floor large by scaling it
trans = floor.get_transform()
trans.set_scale((5, 5, 1))

# Let's also add a sphere
sphere = visii.entity.create(name="sphere",
                             mesh=visii.mesh.create_sphere("sphere"),
                             transform=visii.transform.create("sphere"),
                             material=visii.material.create("sphere"))
sphere.get_transform().set_position((0, 0, 0.41))
sphere.get_transform().set_scale((0.4, 0.4, 0.4))
sphere.get_material().set_base_color((0.1, 0.9, 0.08))
sphere.get_material().set_roughness(0.7)

# Now that we have a simple scene, let's render it
print("rendering to", "01_simple_scene.png")
visii.render_to_file(width=opt.width,
                     height=opt.height,
                     samples_per_pixel=opt.spp,
                     file_path="01_simple_scene.png")

visii.deinitialize()