コード例 #1
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #2
0
    while True:
        r += delta_r
        frame += 1
        t += 50
        if t % 1000 == 0:
            delta_r *= -1

        s = parametric_surface.doughnut(R, r, [50, 20])

        doughnut_trimesh = trimesh.Trimesh(
            vertices=s.flat_vertices,
            faces=s.flat_triangular_mesh_indices,
        )
        # for facet in doughnut_trimesh.facets:
        #     doughnut_trimesh.visual.face_colors[facet] = trimesh.visual.random_color()
        mesh = pyrender.Mesh.from_trimesh(doughnut_trimesh, smooth=False)
        mesh_node = Node(mesh=mesh, translation=np.array([0.0, 0.0, 0.0]))

        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]),
                      bg_color=[0.0, 0.0, 0.0])
        cam_node = scene.add(cam, pose=cam_pose)
        scene.add_node(mesh_node)
        # v = Viewer(scene)
        color, depth = off_screen_renderer.render(scene)
        cv2.imshow('f', color)
        cv2.waitKey(1)
        end_time = time.time()
        print(frame / (end_time - start_time))

    off_screen_renderer.delete()
コード例 #3
0
 def render_camera_image(self):
     """ Render the camera image for the current scene. """
     renderer = OffscreenRenderer(self.camera.width, self.camera.height)
     image = renderer.render(self._scene, flags=RenderFlags.DEPTH_ONLY)
     renderer.delete()
     return image
コード例 #4
0
#==============================================================================
# Using the viewer with a default camera
#==============================================================================

# v = Viewer(scene, shadows=True)

#==============================================================================
# Using the viewer with a pre-specified camera
#==============================================================================
cam_node = scene.add(cam, pose=cam_pose)
# v = Viewer(scene, central_node=drill_node)

#==============================================================================
# Rendering offscreen from that camera
#==============================================================================

r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2)
color, depth = r.render(scene)
r.delete()

#import matplotlib.pyplot as plt
#plt.figure()
#plt.imshow(color)
#plt.show()

import imageio
imageio.imwrite('test_rgb.png', color)
min_val, max_val = np.percentile(depth[depth>0], (1, 99))
imageio.imwrite('test_depth.png', np.uint8((depth - min_val)/(max_val - min_val)*255.0))

コード例 #5
0
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0, 3] = 0.1
    drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
        [0.0, 0.0, 0.0, 1.0],
    ])

    boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
    boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2, 1, 1))
    poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
    poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh)
    scene.add_node(boxf_node)

    _ = scene.add(drill_mesh, pose=drill_pose)
    _ = scene.add(bottle_mesh, pose=bottle_pose)
    _ = scene.add(wood_mesh)
    _ = scene.add(direc_l, pose=cam_pose)
    _ = scene.add(spot_l, pose=cam_pose)
    _ = scene.add(points_mesh)

    _ = scene.add(cam, pose=cam_pose)

    r = OffscreenRenderer(viewport_width=640, viewport_height=480)
    color, depth = r.render(scene)

    assert color.shape == (480, 640, 3)
    assert depth.shape == (480, 640)
    assert np.max(depth.data) > 0.05
    assert np.count_nonzero(depth.data) > (0.2 * depth.size)
    r.delete()
コード例 #6
0
def dump_rendered_scene(input_path, output_path, cam_pose, width, height,
                        focal):
    #==============================================================================
    # Mesh creation
    #==============================================================================

    #------------------------------------------------------------------------------
    # Creating textured meshes from trimeshes
    #------------------------------------------------------------------------------
    object_trimesh = trimesh.load(input_path)
    # https://trimsh.org/trimesh.html#trimesh.PointCloud.bounds
    print("Object extents ", object_trimesh.bounds)
    print("Input path ", input_path)

    #==============================================================================
    # Camera creation
    #==============================================================================
    cam_angle = focal
    cam = PerspectiveCamera(yfov=cam_angle)
    # cam_pose = np.array([
    #     [0.0,  -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
    #     [1.0, 0.0,           0.0,           0.0],
    #     [0.0,  np.sqrt(2)/2,  np.sqrt(2)/2, 0.4],
    #     [0.0,  0.0,           0.0,          1.0]
    # ])

    #==============================================================================
    # Scene creation
    #==============================================================================

    # scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    scene = Scene.from_trimesh_scene(object_trimesh,
                                     bg_color=np.array([0.0, 0.0, 0.0, 1.0]),
                                     ambient_light=np.array(
                                         [1.0, 1.0, 1.0, 1.0]))
    #==============================================================================
    # Rendering offscreen from that camera
    #==============================================================================

    cam_node = scene.add(cam, pose=cam_pose)
    r = OffscreenRenderer(viewport_width=width, viewport_height=height)

    flags = RenderFlags.RGBA
    # color, depth = r.render(scene, flags=flags)
    color, depth = r.render(scene)
    r.delete()

    depth_value = depth.copy()
    img_output = color.copy()
    # depth_value[depth_value <= 0.0001] = 1.5
    check_output = np.sum(color, axis=-1)
    print(color.shape, depth_value.shape, np.min(color), np.max(color),
          np.min(depth_value), np.max(depth_value), check_output.shape)
    print(color[check_output == 0].shape)
    # for i in range(width):
    # 	for j in range(height):
    # 		if(np.sum(color[j,i,:])==0):
    # 			img_output[j,i,0] = 255 - img_output[j,i,0]
    # 			img_output[j,i,1] = 255 - img_output[j,i,1]
    # 			img_output[j,i,2] = 255 - img_output[j,i,2]

    # import matplotlib.pyplot as plt
    # plt.figure(figsize=(20,20))
    # plt.imshow(color)

    img = Image.fromarray(img_output, 'RGB')
    img.save(output_path)

    return cam_angle
コード例 #7
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)

        camera_distance = 4
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)

            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #8
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    scene, cube_nodes = build_scene(args.num_cubes, color_candidates)
    camera = OrthographicCamera(xmag=0.9, ymag=0.9)
    camera_node = Node(camera=camera)
    scene.add_node(camera_node)
    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

        # Change cube color and position
        update_cube_color_and_position(cube_nodes, color_candidates)

        # Transfer changes to the vertex buffer on gpu
        udpate_vertex_buffer(cube_nodes)

    renderer.delete()
コード例 #9
0
ファイル: rooms_ring_camera.py プロジェクト: pmh47/o3v
def main():

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    os.makedirs(args.output_directory, exist_ok=True)

    with ShardedRecordWriter(args.output_directory + '/{:04d}.tfrecords',
                             args.num_scenes_per_file) as writer:

        for scene_index in tqdm(range(args.total_scenes)):
            full_scene, normals_scene, masks_scene = build_scene(
                floor_textures,
                wall_textures,
                fix_light_position=args.fix_light_position)
            object_nodes, object_mask_nodes = place_objects(
                full_scene,
                masks_scene,
                colors,
                objects,
                max_num_objects=args.max_num_objects,
                min_num_objects=args.min_num_objects,
                discrete_position=args.discrete_position,
                rotate_object=args.rotate_object)
            object_velocities = np.random.uniform(
                -1., 1., [len(object_nodes), 3]) * [1., 0., 1.]
            camera_distance = np.random.uniform(3., 4.8)
            camera = PerspectiveCamera(yfov=math.pi / 4)
            camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
            full_scene.add_node(camera_node)
            normals_scene.add_node(camera_node)
            masks_scene.add_node(camera_node)
            initial_yaw = np.random.uniform(-np.pi, np.pi)
            delta_yaw = np.random.normal(
                0.3, 0.05) * (np.random.randint(2) * 2 - 1.)
            pitch = 0.  # np.random.normal(0., 0.1) - 0.03
            all_frames = []
            all_depths = []
            all_masks = []
            all_normals = []
            all_bboxes = []
            all_camera_positions = []
            all_camera_yaws = []
            all_camera_pitches = []
            all_camera_matrices = []
            for observation_index in range(args.num_observations_per_scene):

                yaw = initial_yaw + delta_yaw * observation_index

                camera_xz = camera_distance * np.array(
                    (math.sin(yaw), math.cos(yaw)))

                camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
                camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
                camera_node.translation = camera_position

                # Image and depths (latter are linear float32 depths in world space, with zero for sky)
                flags = RenderFlags.NONE if args.no_shadows else RenderFlags.SHADOWS_DIRECTIONAL
                if args.anti_aliasing:
                    flags |= RenderFlags.ANTI_ALIASING
                image, depths = renderer.render(full_scene, flags=flags)

                # Background (wall/floor) normals in view space
                normals_world = renderer.render(normals_scene,
                                                flags=RenderFlags.NONE)[0]
                normals_world = np.where(
                    np.sum(normals_world, axis=2, keepdims=True) == 0, 0.,
                    (normals_world.astype(np.float32) / 255. - 0.5) *
                    2)  # this has zeros for the sky
                normals_view = np.einsum(
                    'ij,yxj->yxi', np.linalg.inv(camera_node.matrix[:3, :3]),
                    normals_world)

                # Instance segmentation masks
                masks_image = renderer.render(masks_scene,
                                              flags=RenderFlags.NONE)[0]

                # Instance 3D bboxes in view space (axis-aligned)
                def get_mesh_node_bbox(node):
                    object_to_view_matrix = np.dot(
                        np.linalg.inv(camera_node.matrix),
                        full_scene.get_pose(node))
                    assert len(node.mesh.primitives) == 1
                    vertices_object = np.concatenate([
                        node.mesh.primitives[0].positions,
                        np.ones_like(node.mesh.primitives[0].positions[:, :1])
                    ],
                                                     axis=1)
                    vertices_view = np.einsum('ij,vj->vi',
                                              object_to_view_matrix,
                                              vertices_object)[:, :3]
                    return np.min(vertices_view, axis=0), np.max(vertices_view,
                                                                 axis=0)

                object_bboxes_view = [
                    get_mesh_node_bbox(object_parent.children[0])
                    for object_parent in object_nodes
                ]

                all_frames.append(
                    cv2.imencode('.jpg', image[..., ::-1])[1].tostring())
                all_masks.append(
                    cv2.imencode('.png', masks_image[..., ::-1])[1].tostring())
                all_depths.append(depths)
                all_normals.append(normals_view)
                all_bboxes.append(object_bboxes_view)
                all_camera_positions.append(camera_position)
                all_camera_yaws.append(yaw)
                all_camera_pitches.append(pitch)
                all_camera_matrices.append(camera_node.matrix)

                if args.visualize:
                    plt.clf()
                    plt.imshow(image)
                    plt.pause(1e-10)

                if args.moving_objects:
                    for object_node, object_velocity in zip(
                            object_nodes, object_velocities):
                        new_translation = object_node.translation + object_velocity
                        new_translation = np.clip(new_translation, -3., 3.)
                        object_node.translation = new_translation

            all_bboxes = np.asarray(
                all_bboxes)  # :: frame, obj, min/max, x/y/z
            all_bboxes = np.concatenate([
                all_bboxes,
                np.zeros([
                    all_bboxes.shape[0],
                    args.max_num_objects - all_bboxes.shape[1], 2, 3
                ])
            ],
                                        axis=1)

            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'frames':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_frames)),
                    'masks':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_masks)),
                    'depths':
                    float32_feature(all_depths),
                    'normals':
                    float32_feature(all_normals),
                    'bboxes':
                    float32_feature(all_bboxes),
                    'camera_positions':
                    float32_feature(all_camera_positions),
                    'camera_yaws':
                    float32_feature(all_camera_yaws),
                    'camera_pitches':
                    float32_feature(all_camera_pitches),
                    'camera_matrices':
                    float32_feature(all_camera_matrices),
                }))
            writer.write(example.SerializeToString())

    renderer.delete()
コード例 #10
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    assert args.max_num_cubes <= 10

    probabilities = np.array(combinations[:args.max_num_cubes]) / np.sum(
        np.array(combinations[:args.max_num_cubes]))

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):
        scene, cube_nodes = build_scene(args.max_num_cubes, color_candidates,
                                        probabilities)
        camera = OrthographicCamera(xmag=0.9, ymag=0.9)
        camera_node = Node(camera=camera)
        scene.add_node(camera_node)

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()