def main():
    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    rad_step = math.pi / 18
    total_frames = int(math.pi * 2 / rad_step)
    camera_distance = 2

    fig = plt.figure(figsize=(3, 3))
    ims = []

    for num_cubes in range(1, 8):
        scene = build_scene(num_cubes, color_candidates)[0]
        camera = OrthographicCamera(xmag=0.9, ymag=0.9)
        camera_node = Node(camera=camera)
        scene.add_node(camera_node)

        current_rad = 0

        for _ in range(total_frames):
            camera_position = np.array(
                (math.sin(current_rad), math.sin(math.pi / 6),
                 math.cos(current_rad)))
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            im = plt.imshow(image, interpolation="none", animated=True)
            ims.append([im])

            current_rad += rad_step

    renderer.delete()

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    ani.save("shepard_metzler.gif", writer="imagemagick")
Example #2
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    scene, cube_nodes = build_scene(args.num_cubes, color_candidates)
    camera = OrthographicCamera(xmag=0.9, ymag=0.9)
    camera_node = Node(camera=camera)
    scene.add_node(camera_node)
    renderer = OffscreenRenderer(
        viewport_width=args.image_size, viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

        # Change cube color and position
        update_cube_color_and_position(cube_nodes, color_candidates)

        # Transfer changes to the vertex buffer on gpu
        udpate_vertex_buffer(cube_nodes)

    renderer.delete()
def test_scenes():

    # Basics
    s = Scene()
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))
    assert len(s.nodes) == 0
    assert s.name is None
    s.name = 'asdf'
    s.bg_color = None
    s.ambient_light = None
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))

    assert s.nodes == set()
    assert s.cameras == set()
    assert s.lights == set()
    assert s.point_lights == set()
    assert s.spot_lights == set()
    assert s.directional_lights == set()
    assert s.meshes == set()
    assert s.camera_nodes == set()
    assert s.light_nodes == set()
    assert s.point_light_nodes == set()
    assert s.spot_light_nodes == set()
    assert s.directional_light_nodes == set()
    assert s.mesh_nodes == set()
    assert s.main_camera_node is None
    assert np.all(s.bounds == 0)
    assert np.all(s.centroid == 0)
    assert np.all(s.extents == 0)
    assert np.all(s.scale == 0)

    # From trimesh scene
    tms = trimesh.load('tests/data/WaterBottle.glb')
    s = Scene.from_trimesh_scene(tms)
    assert len(s.meshes) == 1
    assert len(s.mesh_nodes) == 1

    # Test bg color formatting
    s = Scene(bg_color=[0, 1.0, 0])
    assert np.allclose(s.bg_color, np.array([0.0, 1.0, 0.0, 1.0]))

    # Test constructor for nodes
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2, n3]
    s = Scene(nodes=nodes)
    n1.children.append(n2)
    s = Scene(nodes=nodes)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)
    n3.children = []
    n2.children.append(n3)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)

    # Test node accessors
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2]
    s = Scene(nodes=nodes)
    assert s.has_node(n1)
    assert s.has_node(n2)
    assert not s.has_node(n3)

    # Test node poses
    for n in nodes:
        assert np.allclose(s.get_pose(n), np.eye(4))
    with pytest.raises(ValueError):
        s.get_pose(n3)
    with pytest.raises(ValueError):
        s.set_pose(n3, np.eye(4))
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), np.eye(4))

    nodes = [n1, n2, n3]
    tf2 = np.eye(4)
    tf2[:3, :3] = np.diag([-1, -1, 1])
    n1.children.append(n2)
    n1.matrix = tf
    n2.matrix = tf2
    s = Scene(nodes=nodes)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))
    assert np.allclose(s.get_pose(n3), np.eye(4))

    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    s = Scene()
    s.add_node(n1)
    with pytest.raises(ValueError):
        s.add_node(n2)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf)
    s.set_pose(n2, tf2)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))

    # Test node removal
    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    n2.children.append(n3)
    s = Scene(nodes=[n1, n2, n3])
    s.remove_node(n2)
    assert len(s.nodes) == 1
    assert n1 in s.nodes
    assert len(n1.children) == 0
    assert len(n2.children) == 1
    s.add_node(n2, parent_node=n1)
    assert len(n1.children) == 1
    n1.matrix = tf
    n3.matrix = tf2
    assert np.allclose(s.get_pose(n3), tf.dot(tf2))

    # Now test ADD function
    s = Scene()
    m = Mesh([], name='m')
    cp = PerspectiveCamera(yfov=2.0)
    co = OrthographicCamera(xmag=1.0, ymag=1.0)
    dl = DirectionalLight()
    pl = PointLight()
    sl = SpotLight()

    n1 = s.add(m, name='mn')
    assert n1.mesh == m
    assert len(s.nodes) == 1
    assert len(s.mesh_nodes) == 1
    assert n1 in s.mesh_nodes
    assert len(s.meshes) == 1
    assert m in s.meshes
    assert len(s.get_nodes(node=n2)) == 0
    n2 = s.add(m, pose=tf)
    assert len(s.nodes) == len(s.mesh_nodes) == 2
    assert len(s.meshes) == 1
    assert len(s.get_nodes(node=n1)) == 1
    assert len(s.get_nodes(node=n1, name='mn')) == 1
    assert len(s.get_nodes(name='mn')) == 1
    assert len(s.get_nodes(obj=m)) == 2
    assert len(s.get_nodes(obj=m, obj_name='m')) == 2
    assert len(s.get_nodes(obj=co)) == 0
    nsl = s.add(sl, name='sln')
    npl = s.add(pl, parent_name='sln')
    assert nsl.children[0] == npl
    ndl = s.add(dl, parent_node=npl)
    assert npl.children[0] == ndl
    nco = s.add(co)
    ncp = s.add(cp)

    assert len(s.light_nodes) == len(s.lights) == 3
    assert len(s.point_light_nodes) == len(s.point_lights) == 1
    assert npl in s.point_light_nodes
    assert len(s.spot_light_nodes) == len(s.spot_lights) == 1
    assert nsl in s.spot_light_nodes
    assert len(s.directional_light_nodes) == len(s.directional_lights) == 1
    assert ndl in s.directional_light_nodes
    assert len(s.cameras) == len(s.camera_nodes) == 2
    assert s.main_camera_node == nco
    s.main_camera_node = ncp
    s.remove_node(ncp)
    assert len(s.cameras) == len(s.camera_nodes) == 1
    assert s.main_camera_node == nco
    s.remove_node(n2)
    assert len(s.meshes) == 1
    s.remove_node(n1)
    assert len(s.meshes) == 0
    s.remove_node(nsl)
    assert len(s.lights) == 0
    s.remove_node(nco)
    assert s.main_camera_node is None

    s.add_node(n1)
    s.clear()
    assert len(s.nodes) == 0

    # Trigger final errors
    with pytest.raises(ValueError):
        s.main_camera_node = None
    with pytest.raises(ValueError):
        s.main_camera_node = ncp
    with pytest.raises(ValueError):
        s.add(m, parent_node=n1)
    with pytest.raises(ValueError):
        s.add(m, name='asdf')
        s.add(m, name='asdf')
        s.add(m, parent_name='asdf')
    with pytest.raises(ValueError):
        s.add(m, parent_name='asfd')
    with pytest.raises(TypeError):
        s.add(None)

    s.clear()
    # Test bounds
    m1 = Mesh.from_trimesh(trimesh.creation.box())
    m2 = Mesh.from_trimesh(trimesh.creation.box())
    m3 = Mesh.from_trimesh(trimesh.creation.box())
    n1 = Node(mesh=m1)
    n2 = Node(mesh=m2, translation=[1.0, 0.0, 0.0])
    n3 = Node(mesh=m3, translation=[0.5, 0.0, 1.0])
    s.add_node(n1)
    s.add_node(n2)
    s.add_node(n3)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [1.5, 0.5, 1.5]])
    s.clear()
    s.add_node(n1)
    s.add_node(n2, parent_node=n1)
    s.add_node(n3, parent_node=n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.0, 0.5, 1.5]])
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n3, tf)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.5, 1.5, 1.5]])
    s.remove_node(n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])
    s.clear()
    assert np.allclose(s.bounds, 0.0)
Example #4
0
def main():
    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    floor_textures = [
        "../textures/lg_floor_d.tga",
        "../textures/lg_style_01_floor_blue_d.tga",
        "../textures/lg_style_01_floor_orange_bright_d.tga",
    ]

    wall_textures = [
        "../textures/lg_style_01_wall_cerise_d.tga",
        "../textures/lg_style_01_wall_green_bright_d.tga",
        "../textures/lg_style_01_wall_red_bright_d.tga",
        "../textures/lg_style_02_wall_yellow_d.tga",
        "../textures/lg_style_03_wall_orange_bright_d.tga",
    ]

    objects = [
        pyrender.objects.Capsule,
        pyrender.objects.Cylinder,
        pyrender.objects.Icosahedron,
        pyrender.objects.Box,
        pyrender.objects.Sphere,
    ]

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  objects,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
def main():
    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_dice(scene,
               mnist_images,
               discrete_position=args.discrete_position,
               rotate_dice=args.rotate_dice)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "mnist_dice"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_dice:
        filename += "_rotate_dice"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
Example #6
0
def test_orthographic_camera():
    xm = 1.0
    ym = 2.0
    n = 0.05
    f = 100.0

    with pytest.raises(TypeError):
        c = OrthographicCamera()

    c = OrthographicCamera(xmag=xm, ymag=ym)

    assert c.xmag == xm
    assert c.ymag == ym
    assert c.znear == 0.05
    assert c.zfar == 100.0
    assert c.name is None

    with pytest.raises(TypeError):
        c.ymag = None

    with pytest.raises(ValueError):
        c.ymag = 0.0

    with pytest.raises(ValueError):
        c.ymag = -1.0

    with pytest.raises(TypeError):
        c.xmag = None

    with pytest.raises(ValueError):
        c.xmag = 0.0

    with pytest.raises(ValueError):
        c.xmag = -1.0

    with pytest.raises(TypeError):
        c.znear = None

    with pytest.raises(ValueError):
        c.znear = 0.0

    with pytest.raises(ValueError):
        c.znear = -1.0

    with pytest.raises(ValueError):
        c.zfar = 0.01

    assert np.allclose(
        c.get_projection_matrix(),
        np.array([[1.0 / xm, 0, 0, 0], [0, 1.0 / ym, 0, 0],
                  [0, 0, 2.0 / (n - f), (f + n) / (n - f)], [0, 0, 0, 1.0]]))