Exemple #1
0
def render_big_gallery(results_dir,
                       nb=30,
                       pts_colors=[0.5, 0.5, 0.5],
                       draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
        colors = np.tile(colors, (input_pc.shape[0], 1))

        input_pc_node = add_point_cloud_mesh_to_scene(input_pc, scene, pc_pose,
                                                      colors)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node)

        if draw_text:
            im_here = Image.fromarray(renderred_color)
            d = ImageDraw.Draw(im_here)
            fnt = ImageFont.truetype(
                font='/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',
                size=100)
            d.text((0, 0),
                   input_pf.split('/')[-1],
                   fill=(0, 0, 0, 255),
                   font=fnt)
            renderred_color = np.array(im_here)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
Exemple #2
0
 def __init__(self,
              render_scene=None,
              plane_grid_spacing=1.,
              plane_grid_num_of_lines=10,
              spheres_count=(8, 8)) -> None:
     """ Base class for PyRender viewer and offscreen renderer. """
     ViewerBase.__init__(self)
     self.spheres_count = spheres_count
     self.plane_grid_num_of_lines = plane_grid_num_of_lines
     self.plane_grid_spacing = plane_grid_spacing
     if render_scene is None:
         render_scene = PyRenderScene()
         render_scene.bg_color = np.array([0.75] * 3)
     if render_scene.main_camera_node is None:
         cam = PerspectiveCamera(yfov=np.deg2rad(60),
                                 aspectRatio=1.414,
                                 znear=0.005)
         cam_pose = np.eye(4)
         cam_pose[:3, 3] = RoboticTrackball.spherical_to_cartesian(
             3., 0., np.deg2rad(45.), target=np.zeros(3))
         cam_pose[:3, :3] = RoboticTrackball.look_at_rotation(
             eye=cam_pose[:3, 3], target=np.zeros(3), up=[0, 0, 1])
         nc = Node(camera=cam, matrix=cam_pose)
         render_scene.add_node(nc)
         render_scene.main_camera_node = nc
     self.render_scene = render_scene
     self.nodes_and_actors = []
Exemple #3
0
 def _build_scene(self, path, size, light, y_fov):
     self.scene = Scene(bg_color=[0, 0, 0, 0])
     self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
     self.camera = self.scene.add(
         PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
     self.mesh = self.scene.add(
         Mesh.from_trimesh(trimesh.load(path), smooth=True))
     self.world_origin = self.mesh.mesh.centroid
Exemple #4
0
def render_big_gallery_overlay(dir_1,
                               dir_2,
                               pts_color_1=[0.5, 0.5, 0.5],
                               pts_color_2=[0.5, 0.5, 0.5],
                               nb=30):
    '''
    return np array of a big image
    '''
    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames_1 = get_all_filnames(dir_1, nb)
    input_ply_filenames_2 = get_all_filnames(dir_2, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for idx, input_pf in enumerate(input_ply_filenames_1):

        input_pc_1 = read_ply_xyz(input_pf)
        input_pc_2 = read_ply_xyz(input_ply_filenames_2[idx])

        color_1 = np.array(pts_color_1)
        color_1 = np.tile(color_1, (input_pc_1.shape[0], 1))

        color_2 = np.array(pts_color_2)
        color_2 = np.tile(color_2, (input_pc_2.shape[0], 1))

        input_pc_node_1 = add_point_cloud_mesh_to_scene(
            input_pc_1, scene, pc_pose, color_1)
        input_pc_node_2 = add_point_cloud_mesh_to_scene(
            input_pc_2, scene, pc_pose, color_2)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node_1)
        scene.remove_node(input_pc_node_2)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
 def _create_scene(self):
     self._scene = Scene()
     camera = PerspectiveCamera(yfov=0.833,
                                znear=0.05,
                                zfar=3.0,
                                aspectRatio=1.0)
     cn = Node()
     cn.camera = camera
     pose_m = np.array([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],
                        [0.0, 0.0, -1.0, 0.88], [0.0, 0.0, 0.0, 1.0]])
     pose_m[:, 1:3] *= -1.0
     cn.matrix = pose_m
     self._scene.add_node(cn)
     self._scene.main_camera_node = cn
Exemple #6
0
def render_sensor(
    point_set,
    render_sensor_path="/Users/macbookpro15/Documents/mujoco_hand_exps/data/sensor_render"
):
    """
    pointset: it is collectiono of sensor points for all timesteps
    """
    # first take one of the point, subtract the center from it which
    # I know is the 0-th position out of the 220 points
    # form the mesh from this
    if not os.path.exists(render_sensor_path):
        os.makedirs(render_sensor_path)
    time_steps = len(point_set)
    for t in range(time_steps):
        sensor = trimesh.load_mesh(
            f'../data/mesh_dir/mesh_{t}_out/mc_mesh_out.ply')
        sensor_mesh = Mesh.from_trimesh(sensor)
        # Light for the scene
        direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
        spot_l = SpotLight(color=np.ones(3),
                           intensity=10.0,
                           innerConeAngle=np.pi / 16,
                           outerConeAngle=np.pi / 6)
        point_l = PointLight(color=np.ones(3), intensity=10.0)

        # add camera to the scene
        cam = PerspectiveCamera(yfov=(np.pi / 3.0))
        cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                             [0.0, np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

        # create the scene
        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
        point_mesh_node = scene.add(sensor_mesh)
        direc_l_node = scene.add(direc_l, pose=cam_pose)
        spot_l_node = scene.add(spot_l, pose=cam_pose)
        cam_node = scene.add(cam, pose=cam_pose)
        print('rendering the scene offline')
        r = OffscreenRenderer(viewport_width=640, viewport_height=480)
        color, depth = r.render(scene)
        r.delete()

        plt.figure()
        plt.imshow(color)
        plt.savefig(f'{render_sensor_path}/img_{t}.jpg')
Exemple #7
0
def render_mesh(mesh, h=256, w=256):
    """https://pyrender.readthedocs.io/en/latest/examples/quickstart.html"""
    mesh = pyrender.Mesh.from_trimesh(mesh.trimesh())
    scene = Scene()
    scene.add(mesh)

    # z-axis away from the scene, x-axis right, y-axis up
    pose = np.eye(4)
    pose[2, 3] = 250

    # add camera
    camera = PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    scene.add(camera, pose=pose)

    # add light
    # light = DirectionalLight(color=np.ones(3), intensity=5.0)
    light = PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
    scene.add(light, pose=pose)

    r = OffscreenRenderer(h, w)
    color, depth = r.render(scene)
    return color
Exemple #8
0
 def __init__(self,
              filepath,
              viewport_size=(128, 128),
              y_fov=3.14159 / 4.,
              distance=0.30,
              top_only=False,
              light=5.0,
              theta_steps=10,
              phi_steps=10):
     self.scene = Scene(bg_color=[0, 0, 0])
     self.camera = self.scene.add(
         PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
     self.mesh = self.scene.add(
         Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
     self.world_origin = self.mesh.mesh.centroid
     self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
     self.distance = distance
     # 0.1 values are to avoid gimbal lock
     theta_max = np.pi / 2.0 if top_only else np.pi
     self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
     self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
     self.renderer = OffscreenRenderer(*viewport_size)
     self.RGBA = RenderFlags.RGBA
Exemple #9
0
def test_nodes():

    x = Node()
    assert x.name is None
    assert x.camera is None
    assert x.children == []
    assert x.skin is None
    assert np.allclose(x.matrix, np.eye(4))
    assert x.mesh is None
    assert np.allclose(x.rotation, [0, 0, 0, 1])
    assert np.allclose(x.scale, np.ones(3))
    assert np.allclose(x.translation, np.zeros(3))
    assert x.weights is None
    assert x.light is None

    x.name = 'node'

    # Test node light/camera/mesh tests
    c = PerspectiveCamera(yfov=2.0)
    m = Mesh([])
    d = DirectionalLight()
    x.camera = c
    assert x.camera == c
    with pytest.raises(TypeError):
        x.camera = m
        x.camera = d
    x.camera = None
    x.mesh = m
    assert x.mesh == m
    with pytest.raises(TypeError):
        x.mesh = c
        x.mesh = d
    x.light = d
    assert x.light == d
    with pytest.raises(TypeError):
        x.light = m
        x.light = c

    # Test transformations getters/setters/etc...
    # Set up test values
    x = np.array([1.0, 0.0, 0.0])
    y = np.array([0.0, 1.0, 0.0])
    t = np.array([1.0, 2.0, 3.0])
    s = np.array([0.5, 2.0, 1.0])

    Mx = transformations.rotation_matrix(np.pi / 2.0, x)
    qx = np.roll(transformations.quaternion_about_axis(np.pi / 2.0, x), -1)
    Mxt = Mx.copy()
    Mxt[:3, 3] = t
    S = np.eye(4)
    S[:3, :3] = np.diag(s)
    Mxts = Mxt.dot(S)

    My = transformations.rotation_matrix(np.pi / 2.0, y)
    qy = np.roll(transformations.quaternion_about_axis(np.pi / 2.0, y), -1)
    Myt = My.copy()
    Myt[:3, 3] = t

    x = Node(matrix=Mx)
    assert np.allclose(x.matrix, Mx)
    assert np.allclose(x.rotation, qx)
    assert np.allclose(x.translation, np.zeros(3))
    assert np.allclose(x.scale, np.ones(3))

    x.matrix = My
    assert np.allclose(x.matrix, My)
    assert np.allclose(x.rotation, qy)
    assert np.allclose(x.translation, np.zeros(3))
    assert np.allclose(x.scale, np.ones(3))
    x.translation = t
    assert np.allclose(x.matrix, Myt)
    assert np.allclose(x.rotation, qy)
    x.rotation = qx
    assert np.allclose(x.matrix, Mxt)
    x.scale = s
    assert np.allclose(x.matrix, Mxts)

    x = Node(matrix=Mxt)
    assert np.allclose(x.matrix, Mxt)
    assert np.allclose(x.rotation, qx)
    assert np.allclose(x.translation, t)
    assert np.allclose(x.scale, np.ones(3))

    x = Node(matrix=Mxts)
    assert np.allclose(x.matrix, Mxts)
    assert np.allclose(x.rotation, qx)
    assert np.allclose(x.translation, t)
    assert np.allclose(x.scale, s)

    # Individual element getters
    x.scale[0] = 0
    assert np.allclose(x.scale[0], 0)

    x.translation[0] = 0
    assert np.allclose(x.translation[0], 0)

    x.matrix = np.eye(4)
    x.matrix[0, 0] = 500
    assert x.matrix[0, 0] == 1.0

    # Failures
    with pytest.raises(ValueError):
        x.matrix = 5 * np.eye(4)
    with pytest.raises(ValueError):
        x.matrix = np.eye(5)
    with pytest.raises(ValueError):
        x.matrix = np.eye(4).dot([5, 1, 1, 1])
    with pytest.raises(ValueError):
        x.rotation = np.array([1, 2])
    with pytest.raises(ValueError):
        x.rotation = np.array([1, 2, 3])
    with pytest.raises(ValueError):
        x.rotation = np.array([1, 2, 3, 4])
    with pytest.raises(ValueError):
        x.translation = np.array([1, 2, 3, 4])
    with pytest.raises(ValueError):
        x.scale = np.array([1, 2, 3, 4])
    # ==============================================================================
    # Light creation
    # ==============================================================================

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)
    point_l = PointLight(color=np.ones(3), intensity=10.0)

    # ==============================================================================
    # Camera creation
    # ==============================================================================

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    # ==============================================================================
    # Scene creation
    # ==============================================================================

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))

    # ==============================================================================
    # Adding objects to the scene
    # ==============================================================================
Exemple #11
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)

        camera_distance = 4
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)

            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
Exemple #12
0
def dump_rendered_scene(input_path, output_path, cam_pose, width, height,
                        focal):
    #==============================================================================
    # Mesh creation
    #==============================================================================

    #------------------------------------------------------------------------------
    # Creating textured meshes from trimeshes
    #------------------------------------------------------------------------------
    object_trimesh = trimesh.load(input_path)
    # https://trimsh.org/trimesh.html#trimesh.PointCloud.bounds
    print("Object extents ", object_trimesh.bounds)
    print("Input path ", input_path)

    #==============================================================================
    # Camera creation
    #==============================================================================
    cam_angle = focal
    cam = PerspectiveCamera(yfov=cam_angle)
    # cam_pose = np.array([
    #     [0.0,  -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
    #     [1.0, 0.0,           0.0,           0.0],
    #     [0.0,  np.sqrt(2)/2,  np.sqrt(2)/2, 0.4],
    #     [0.0,  0.0,           0.0,          1.0]
    # ])

    #==============================================================================
    # Scene creation
    #==============================================================================

    # scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    scene = Scene.from_trimesh_scene(object_trimesh,
                                     bg_color=np.array([0.0, 0.0, 0.0, 1.0]),
                                     ambient_light=np.array(
                                         [1.0, 1.0, 1.0, 1.0]))
    #==============================================================================
    # Rendering offscreen from that camera
    #==============================================================================

    cam_node = scene.add(cam, pose=cam_pose)
    r = OffscreenRenderer(viewport_width=width, viewport_height=height)

    flags = RenderFlags.RGBA
    # color, depth = r.render(scene, flags=flags)
    color, depth = r.render(scene)
    r.delete()

    depth_value = depth.copy()
    img_output = color.copy()
    # depth_value[depth_value <= 0.0001] = 1.5
    check_output = np.sum(color, axis=-1)
    print(color.shape, depth_value.shape, np.min(color), np.max(color),
          np.min(depth_value), np.max(depth_value), check_output.shape)
    print(color[check_output == 0].shape)
    # for i in range(width):
    # 	for j in range(height):
    # 		if(np.sum(color[j,i,:])==0):
    # 			img_output[j,i,0] = 255 - img_output[j,i,0]
    # 			img_output[j,i,1] = 255 - img_output[j,i,1]
    # 			img_output[j,i,2] = 255 - img_output[j,i,2]

    # import matplotlib.pyplot as plt
    # plt.figure(figsize=(20,20))
    # plt.imshow(color)

    img = Image.fromarray(img_output, 'RGB')
    img.save(output_path)

    return cam_angle
Exemple #13
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 4.5
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
Exemple #14
0
 def _build_camera(self, y_fov, viewport_size, pose):
     aspect_ratio = np.divide(*viewport_size)
     camera = PerspectiveCamera(y_fov, aspectRatio=aspect_ratio)
     camera = self.scene.add(camera, pose=pose)
     return camera
Exemple #15
0
def test_perspective_camera():

    # Set up constants
    znear = 0.05
    zfar = 100
    yfov = np.pi / 3.0
    width = 1000.0
    height = 500.0
    aspectRatio = 640.0 / 480.0

    # Test basics
    with pytest.raises(TypeError):
        p = PerspectiveCamera()

    p = PerspectiveCamera(yfov=yfov)
    assert p.yfov == yfov
    assert p.znear == 0.05
    assert p.zfar is None
    assert p.aspectRatio is None
    p.name = 'asdf'
    p.name = None

    with pytest.raises(ValueError):
        p.yfov = 0.0

    with pytest.raises(ValueError):
        p.yfov = -1.0

    with pytest.raises(ValueError):
        p.znear = -1.0

    p.znear = 0.0
    p.znear = 0.05
    p.zfar = 100.0
    assert p.zfar == 100.0

    with pytest.raises(ValueError):
        p.zfar = 0.03

    with pytest.raises(ValueError):
        p.zfar = 0.05

    p.aspectRatio = 10.0
    assert p.aspectRatio == 10.0

    with pytest.raises(ValueError):
        p.aspectRatio = 0.0

    with pytest.raises(ValueError):
        p.aspectRatio = -1.0

    # Test matrix getting/setting

    # NF
    p.znear = 0.05
    p.zfar = 100
    p.aspectRatio = None

    with pytest.raises(ValueError):
        p.get_projection_matrix()

    assert np.allclose(
        p.get_projection_matrix(width, height),
        np.array([[1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
                  [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
                  [
                      0.0, 0.0, (zfar + znear) / (znear - zfar),
                      (2 * zfar * znear) / (znear - zfar)
                  ], [0.0, 0.0, -1.0, 0.0]]))

    # NFA
    p.aspectRatio = aspectRatio
    assert np.allclose(
        p.get_projection_matrix(width, height),
        np.array([[1.0 / (aspectRatio * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
                  [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
                  [
                      0.0, 0.0, (zfar + znear) / (znear - zfar),
                      (2 * zfar * znear) / (znear - zfar)
                  ], [0.0, 0.0, -1.0, 0.0]]))
    assert np.allclose(p.get_projection_matrix(),
                       p.get_projection_matrix(width, height))

    # N
    p.zfar = None
    p.aspectRatio = None
    assert np.allclose(
        p.get_projection_matrix(width, height),
        np.array([[1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
                  [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
                  [0.0, 0.0, -1.0, -2.0 * znear], [0.0, 0.0, -1.0, 0.0]]))
# Add lights for walls
for wall_light_pose in wall_light_poses:
    scene.add(spot_l_sides, pose=wall_light_pose, name='wall_light')

#==============================================================================
# Camera creation
#==============================================================================

# Camera properties
yfov = (np.pi / 3.0)
IMAGE_WIDTH = 600
IMAGE_HEIGHT = 600
aspect_ratio = IMAGE_WIDTH / IMAGE_HEIGHT

# Camera
camera = PerspectiveCamera(yfov=yfov, aspectRatio=aspect_ratio, znear=0.01)

# Specify parameters of camera of scene that will be previewed
alpha, beta, distance = 0, np.pi/3, 0.06

# Compute centroid of flower (3D mesh coordinates)
centroid = center_mesh.centroid

# Compute the edge of the flower (3D mesh coordinates)
flower_edge = random.choice(flower_mesh.bounds)

# Compute at the average of centroid and edge (3D mesh coordinates)
centroid_edge = [0] * 3
flower_edge_away = [0] * 3
for i in range(3):
    centroid_edge[i] = (centroid[i] + flower_edge[i])/2.0
def test_scenes():

    # Basics
    s = Scene()
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))
    assert len(s.nodes) == 0
    assert s.name is None
    s.name = 'asdf'
    s.bg_color = None
    s.ambient_light = None
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))

    assert s.nodes == set()
    assert s.cameras == set()
    assert s.lights == set()
    assert s.point_lights == set()
    assert s.spot_lights == set()
    assert s.directional_lights == set()
    assert s.meshes == set()
    assert s.camera_nodes == set()
    assert s.light_nodes == set()
    assert s.point_light_nodes == set()
    assert s.spot_light_nodes == set()
    assert s.directional_light_nodes == set()
    assert s.mesh_nodes == set()
    assert s.main_camera_node is None
    assert np.all(s.bounds == 0)
    assert np.all(s.centroid == 0)
    assert np.all(s.extents == 0)
    assert np.all(s.scale == 0)

    # From trimesh scene
    tms = trimesh.load('tests/data/WaterBottle.glb')
    s = Scene.from_trimesh_scene(tms)
    assert len(s.meshes) == 1
    assert len(s.mesh_nodes) == 1

    # Test bg color formatting
    s = Scene(bg_color=[0, 1.0, 0])
    assert np.allclose(s.bg_color, np.array([0.0, 1.0, 0.0, 1.0]))

    # Test constructor for nodes
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2, n3]
    s = Scene(nodes=nodes)
    n1.children.append(n2)
    s = Scene(nodes=nodes)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)
    n3.children = []
    n2.children.append(n3)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)

    # Test node accessors
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2]
    s = Scene(nodes=nodes)
    assert s.has_node(n1)
    assert s.has_node(n2)
    assert not s.has_node(n3)

    # Test node poses
    for n in nodes:
        assert np.allclose(s.get_pose(n), np.eye(4))
    with pytest.raises(ValueError):
        s.get_pose(n3)
    with pytest.raises(ValueError):
        s.set_pose(n3, np.eye(4))
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), np.eye(4))

    nodes = [n1, n2, n3]
    tf2 = np.eye(4)
    tf2[:3, :3] = np.diag([-1, -1, 1])
    n1.children.append(n2)
    n1.matrix = tf
    n2.matrix = tf2
    s = Scene(nodes=nodes)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))
    assert np.allclose(s.get_pose(n3), np.eye(4))

    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    s = Scene()
    s.add_node(n1)
    with pytest.raises(ValueError):
        s.add_node(n2)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf)
    s.set_pose(n2, tf2)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))

    # Test node removal
    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    n2.children.append(n3)
    s = Scene(nodes=[n1, n2, n3])
    s.remove_node(n2)
    assert len(s.nodes) == 1
    assert n1 in s.nodes
    assert len(n1.children) == 0
    assert len(n2.children) == 1
    s.add_node(n2, parent_node=n1)
    assert len(n1.children) == 1
    n1.matrix = tf
    n3.matrix = tf2
    assert np.allclose(s.get_pose(n3), tf.dot(tf2))

    # Now test ADD function
    s = Scene()
    m = Mesh([], name='m')
    cp = PerspectiveCamera(yfov=2.0)
    co = OrthographicCamera(xmag=1.0, ymag=1.0)
    dl = DirectionalLight()
    pl = PointLight()
    sl = SpotLight()

    n1 = s.add(m, name='mn')
    assert n1.mesh == m
    assert len(s.nodes) == 1
    assert len(s.mesh_nodes) == 1
    assert n1 in s.mesh_nodes
    assert len(s.meshes) == 1
    assert m in s.meshes
    assert len(s.get_nodes(node=n2)) == 0
    n2 = s.add(m, pose=tf)
    assert len(s.nodes) == len(s.mesh_nodes) == 2
    assert len(s.meshes) == 1
    assert len(s.get_nodes(node=n1)) == 1
    assert len(s.get_nodes(node=n1, name='mn')) == 1
    assert len(s.get_nodes(name='mn')) == 1
    assert len(s.get_nodes(obj=m)) == 2
    assert len(s.get_nodes(obj=m, obj_name='m')) == 2
    assert len(s.get_nodes(obj=co)) == 0
    nsl = s.add(sl, name='sln')
    npl = s.add(pl, parent_name='sln')
    assert nsl.children[0] == npl
    ndl = s.add(dl, parent_node=npl)
    assert npl.children[0] == ndl
    nco = s.add(co)
    ncp = s.add(cp)

    assert len(s.light_nodes) == len(s.lights) == 3
    assert len(s.point_light_nodes) == len(s.point_lights) == 1
    assert npl in s.point_light_nodes
    assert len(s.spot_light_nodes) == len(s.spot_lights) == 1
    assert nsl in s.spot_light_nodes
    assert len(s.directional_light_nodes) == len(s.directional_lights) == 1
    assert ndl in s.directional_light_nodes
    assert len(s.cameras) == len(s.camera_nodes) == 2
    assert s.main_camera_node == nco
    s.main_camera_node = ncp
    s.remove_node(ncp)
    assert len(s.cameras) == len(s.camera_nodes) == 1
    assert s.main_camera_node == nco
    s.remove_node(n2)
    assert len(s.meshes) == 1
    s.remove_node(n1)
    assert len(s.meshes) == 0
    s.remove_node(nsl)
    assert len(s.lights) == 0
    s.remove_node(nco)
    assert s.main_camera_node is None

    s.add_node(n1)
    s.clear()
    assert len(s.nodes) == 0

    # Trigger final errors
    with pytest.raises(ValueError):
        s.main_camera_node = None
    with pytest.raises(ValueError):
        s.main_camera_node = ncp
    with pytest.raises(ValueError):
        s.add(m, parent_node=n1)
    with pytest.raises(ValueError):
        s.add(m, name='asdf')
        s.add(m, name='asdf')
        s.add(m, parent_name='asdf')
    with pytest.raises(ValueError):
        s.add(m, parent_name='asfd')
    with pytest.raises(TypeError):
        s.add(None)

    s.clear()
    # Test bounds
    m1 = Mesh.from_trimesh(trimesh.creation.box())
    m2 = Mesh.from_trimesh(trimesh.creation.box())
    m3 = Mesh.from_trimesh(trimesh.creation.box())
    n1 = Node(mesh=m1)
    n2 = Node(mesh=m2, translation=[1.0, 0.0, 0.0])
    n3 = Node(mesh=m3, translation=[0.5, 0.0, 1.0])
    s.add_node(n1)
    s.add_node(n2)
    s.add_node(n3)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [1.5, 0.5, 1.5]])
    s.clear()
    s.add_node(n1)
    s.add_node(n2, parent_node=n1)
    s.add_node(n3, parent_node=n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.0, 0.5, 1.5]])
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n3, tf)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.5, 1.5, 1.5]])
    s.remove_node(n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])
    s.clear()
    assert np.allclose(s.bounds, 0.0)
Exemple #18
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
Exemple #19
0
    # Drill trimesh
    drill_trimesh = trimesh.load('./models/drill.obj')
    drill_trimesh.visual.vertex_colors = np.array((1, 0, 0, 1))
    drill_trimesh.visual.face_colors = np.array((1, 0, 0, 1))
    drill_mesh = Mesh.from_trimesh(drill_trimesh)

    #drill_pose = np.eye(4)
    #drill_pose[0,3] = 0.1
    #drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])

    #==============================================================================
    # Camera creation
    #==============================================================================

    cam = PerspectiveCamera(yfov=(np.pi / 3.0),
                            aspectRatio=1.0 * 640 / 480,
                            znear=0.1,
                            zfar=6)
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    proj = cam.get_projection_matrix()
    mvp = proj @ np.linalg.inv(cam_pose)
    inv_mvp = np.linalg.inv(mvp)
    #==============================================================================
    # Scene creation
    #==============================================================================

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    cam_node = scene.add(cam, pose=cam_pose)
Exemple #20
0
def main():

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    os.makedirs(args.output_directory, exist_ok=True)

    with ShardedRecordWriter(args.output_directory + '/{:04d}.tfrecords',
                             args.num_scenes_per_file) as writer:

        for scene_index in tqdm(range(args.total_scenes)):
            full_scene, normals_scene, masks_scene = build_scene(
                floor_textures,
                wall_textures,
                fix_light_position=args.fix_light_position)
            object_nodes, object_mask_nodes = place_objects(
                full_scene,
                masks_scene,
                colors,
                objects,
                max_num_objects=args.max_num_objects,
                min_num_objects=args.min_num_objects,
                discrete_position=args.discrete_position,
                rotate_object=args.rotate_object)
            object_velocities = np.random.uniform(
                -1., 1., [len(object_nodes), 3]) * [1., 0., 1.]
            camera_distance = np.random.uniform(3., 4.8)
            camera = PerspectiveCamera(yfov=math.pi / 4)
            camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
            full_scene.add_node(camera_node)
            normals_scene.add_node(camera_node)
            masks_scene.add_node(camera_node)
            initial_yaw = np.random.uniform(-np.pi, np.pi)
            delta_yaw = np.random.normal(
                0.3, 0.05) * (np.random.randint(2) * 2 - 1.)
            pitch = 0.  # np.random.normal(0., 0.1) - 0.03
            all_frames = []
            all_depths = []
            all_masks = []
            all_normals = []
            all_bboxes = []
            all_camera_positions = []
            all_camera_yaws = []
            all_camera_pitches = []
            all_camera_matrices = []
            for observation_index in range(args.num_observations_per_scene):

                yaw = initial_yaw + delta_yaw * observation_index

                camera_xz = camera_distance * np.array(
                    (math.sin(yaw), math.cos(yaw)))

                camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
                camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
                camera_node.translation = camera_position

                # Image and depths (latter are linear float32 depths in world space, with zero for sky)
                flags = RenderFlags.NONE if args.no_shadows else RenderFlags.SHADOWS_DIRECTIONAL
                if args.anti_aliasing:
                    flags |= RenderFlags.ANTI_ALIASING
                image, depths = renderer.render(full_scene, flags=flags)

                # Background (wall/floor) normals in view space
                normals_world = renderer.render(normals_scene,
                                                flags=RenderFlags.NONE)[0]
                normals_world = np.where(
                    np.sum(normals_world, axis=2, keepdims=True) == 0, 0.,
                    (normals_world.astype(np.float32) / 255. - 0.5) *
                    2)  # this has zeros for the sky
                normals_view = np.einsum(
                    'ij,yxj->yxi', np.linalg.inv(camera_node.matrix[:3, :3]),
                    normals_world)

                # Instance segmentation masks
                masks_image = renderer.render(masks_scene,
                                              flags=RenderFlags.NONE)[0]

                # Instance 3D bboxes in view space (axis-aligned)
                def get_mesh_node_bbox(node):
                    object_to_view_matrix = np.dot(
                        np.linalg.inv(camera_node.matrix),
                        full_scene.get_pose(node))
                    assert len(node.mesh.primitives) == 1
                    vertices_object = np.concatenate([
                        node.mesh.primitives[0].positions,
                        np.ones_like(node.mesh.primitives[0].positions[:, :1])
                    ],
                                                     axis=1)
                    vertices_view = np.einsum('ij,vj->vi',
                                              object_to_view_matrix,
                                              vertices_object)[:, :3]
                    return np.min(vertices_view, axis=0), np.max(vertices_view,
                                                                 axis=0)

                object_bboxes_view = [
                    get_mesh_node_bbox(object_parent.children[0])
                    for object_parent in object_nodes
                ]

                all_frames.append(
                    cv2.imencode('.jpg', image[..., ::-1])[1].tostring())
                all_masks.append(
                    cv2.imencode('.png', masks_image[..., ::-1])[1].tostring())
                all_depths.append(depths)
                all_normals.append(normals_view)
                all_bboxes.append(object_bboxes_view)
                all_camera_positions.append(camera_position)
                all_camera_yaws.append(yaw)
                all_camera_pitches.append(pitch)
                all_camera_matrices.append(camera_node.matrix)

                if args.visualize:
                    plt.clf()
                    plt.imshow(image)
                    plt.pause(1e-10)

                if args.moving_objects:
                    for object_node, object_velocity in zip(
                            object_nodes, object_velocities):
                        new_translation = object_node.translation + object_velocity
                        new_translation = np.clip(new_translation, -3., 3.)
                        object_node.translation = new_translation

            all_bboxes = np.asarray(
                all_bboxes)  # :: frame, obj, min/max, x/y/z
            all_bboxes = np.concatenate([
                all_bboxes,
                np.zeros([
                    all_bboxes.shape[0],
                    args.max_num_objects - all_bboxes.shape[1], 2, 3
                ])
            ],
                                        axis=1)

            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'frames':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_frames)),
                    'masks':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_masks)),
                    'depths':
                    float32_feature(all_depths),
                    'normals':
                    float32_feature(all_normals),
                    'bboxes':
                    float32_feature(all_bboxes),
                    'camera_positions':
                    float32_feature(all_camera_positions),
                    'camera_yaws':
                    float32_feature(all_camera_yaws),
                    'camera_pitches':
                    float32_feature(all_camera_pitches),
                    'camera_matrices':
                    float32_feature(all_camera_matrices),
                }))
            writer.write(example.SerializeToString())

    renderer.delete()
Exemple #21
0
def main():
    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    floor_textures = [
        "../textures/lg_floor_d.tga",
        "../textures/lg_style_01_floor_blue_d.tga",
        "../textures/lg_style_01_floor_orange_bright_d.tga",
    ]

    wall_textures = [
        "../textures/lg_style_01_wall_cerise_d.tga",
        "../textures/lg_style_01_wall_green_bright_d.tga",
        "../textures/lg_style_01_wall_red_bright_d.tga",
        "../textures/lg_style_02_wall_yellow_d.tga",
        "../textures/lg_style_03_wall_orange_bright_d.tga",
    ]

    objects = [
        pyrender.objects.Capsule,
        pyrender.objects.Cylinder,
        pyrender.objects.Icosahedron,
        pyrender.objects.Box,
        pyrender.objects.Sphere,
    ]

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  objects,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
    scene.add(spot_l_sides, pose=wall_light_pose, name='wall_light')

#==============================================================================
# 3D to 2D mapping
#==============================================================================

# camera properties
yfov = (np.pi / 3.0)
height = 600
width = 600
fov = 1.0 / (np.tan(yfov / 2.0))
aspect_ratio = width / height
zfar = 10

# camera
cam = PerspectiveCamera(yfov=yfov, aspectRatio=aspect_ratio, zfar=zfar)

# distance to near and far clipping planes
far = cam.zfar
near = cam.znear

# Specify parameters of camera of scene that will be previewed
# alpha, beta, distance, force_height, force = 0, np.pi/4, 0.1, 0.35, False
alpha, beta, distance, force_height, force = np.pi / 6, np.pi / 3, 0.3, 0, False
new_campose, labels = generateCameraPoseFromSpherical(distance=distance,
                                                      alpha=alpha,
                                                      beta=beta,
                                                      force_t_z=force_height)
cam_node = scene.add(cam, pose=new_campose)

# There are different definitions of the clip matrix based on if
def main():
    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_dice(scene,
               mnist_images,
               discrete_position=args.discrete_position,
               rotate_dice=args.rotate_dice)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "mnist_dice"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_dice:
        filename += "_rotate_dice"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0, 3] = 0.1
    drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
        [0.0, 0.0, 0.0, 1.0],
    ])

    boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
    boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2, 1, 1))
    poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
    poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh)
    scene.add_node(boxf_node)

    _ = scene.add(drill_mesh, pose=drill_pose)
    _ = scene.add(bottle_mesh, pose=bottle_pose)
    _ = scene.add(wood_mesh)
    _ = scene.add(direc_l, pose=cam_pose)
    _ = scene.add(spot_l, pose=cam_pose)
    _ = scene.add(points_mesh)

    _ = scene.add(cam, pose=cam_pose)

    r = OffscreenRenderer(viewport_width=640, viewport_height=480)
    color, depth = r.render(scene)

    assert color.shape == (480, 640, 3)
    assert depth.shape == (480, 640)
    assert np.max(depth.data) > 0.05
    assert np.count_nonzero(depth.data) > (0.2 * depth.size)
    r.delete()