Exemple #1
0
class PixelMaskRenderer():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        path_OBJ: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        light: List of floats indicating [max_light, min_light]
        top_only: Boolean. If True images are only take from the top.
        roll: Float, to sample [-roll, roll] rolls of the Z OpenGL camera axis.
        shift: Float, to sample [-shift, shift] to move in X, Y OpenGL axes.
    """
    def __init__(self, path_OBJ, viewport_size=(128, 128), y_fov=3.14159 / 4.0,
                 distance=[0.3, 0.5], light=[0.5, 30], top_only=False,
                 roll=None, shift=None):
        self.distance, self.roll, self.shift = distance, roll, shift
        self.light_intensity, self.top_only = light, top_only
        self._build_scene(path_OBJ, viewport_size, light, y_fov)
        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])
        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
        self.epsilon = 0.01

    def _build_scene(self, path, size, light, y_fov):
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self.scene.add(
            DirectionalLight([1.0, 1.0, 1.0], np.mean(light)))
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
        self.pixel_mesh = self.scene.add(color_object(path))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path), smooth=True))
        self.world_origin = self.mesh.mesh.centroid

    def _sample_parameters(self):
        distance = sample_uniformly(self.distance)
        camera_origin = sample_point_in_sphere(distance, self.top_only)
        camera_origin = random_perturbation(camera_origin, self.epsilon)
        light_intensity = sample_uniformly(self.light_intensity)
        return camera_origin, light_intensity

    def render(self):
        camera_origin, intensity = self._sample_parameters()
        camera_to_world, world_to_camera = compute_modelview_matrices(
            camera_origin, self.world_origin, self.roll, self.shift)
        self.light.light.intensity = intensity
        self.scene.set_pose(self.camera, camera_to_world)
        self.scene.set_pose(self.light, camera_to_world)
        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask
Exemple #2
0
class DictionaryView():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        filepath: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        top_only: Boolean. If True images are only take from the top.
        light: List of floats indicating [max_light, min_light]
    """
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.,
                 distance=0.30,
                 top_only=False,
                 light=5.0,
                 theta_steps=10,
                 phi_steps=10):
        self.scene = Scene(bg_color=[0, 0, 0])
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
        self.world_origin = self.mesh.mesh.centroid
        self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
        self.distance = distance
        # 0.1 values are to avoid gimbal lock
        theta_max = np.pi / 2.0 if top_only else np.pi
        self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
        self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA

    def render(self):
        dictionary_data = []
        for theta_arg, theta in enumerate(self.thetas):
            for phi_arg, phi in enumerate(self.phis):
                x = self.distance * np.sin(theta) * np.cos(phi)
                y = self.distance * np.sin(theta) * np.sin(phi)
                z = self.distance * np.cos(theta)
                matrices = compute_modelview_matrices(np.array([x, z, y]),
                                                      self.world_origin)
                camera_to_world, world_to_camera = matrices
                self.scene.set_pose(self.camera, camera_to_world)
                self.scene.set_pose(self.light, camera_to_world)
                camera_to_world = camera_to_world.flatten()
                world_to_camera = world_to_camera.flatten()
                image, depth = self.renderer.render(self.scene,
                                                    flags=self.RGBA)
                image, alpha = split_alpha_channel(image)
                matrices = np.vstack([world_to_camera, camera_to_world])
                sample = {
                    'image': image,
                    'alpha': alpha,
                    'depth': depth,
                    'matrices': matrices
                }
                dictionary_data.append(sample)
        return dictionary_data
def test_scenes():

    # Basics
    s = Scene()
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))
    assert len(s.nodes) == 0
    assert s.name is None
    s.name = 'asdf'
    s.bg_color = None
    s.ambient_light = None
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))

    assert s.nodes == set()
    assert s.cameras == set()
    assert s.lights == set()
    assert s.point_lights == set()
    assert s.spot_lights == set()
    assert s.directional_lights == set()
    assert s.meshes == set()
    assert s.camera_nodes == set()
    assert s.light_nodes == set()
    assert s.point_light_nodes == set()
    assert s.spot_light_nodes == set()
    assert s.directional_light_nodes == set()
    assert s.mesh_nodes == set()
    assert s.main_camera_node is None
    assert np.all(s.bounds == 0)
    assert np.all(s.centroid == 0)
    assert np.all(s.extents == 0)
    assert np.all(s.scale == 0)

    # From trimesh scene
    tms = trimesh.load('tests/data/WaterBottle.glb')
    s = Scene.from_trimesh_scene(tms)
    assert len(s.meshes) == 1
    assert len(s.mesh_nodes) == 1

    # Test bg color formatting
    s = Scene(bg_color=[0, 1.0, 0])
    assert np.allclose(s.bg_color, np.array([0.0, 1.0, 0.0, 1.0]))

    # Test constructor for nodes
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2, n3]
    s = Scene(nodes=nodes)
    n1.children.append(n2)
    s = Scene(nodes=nodes)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)
    n3.children = []
    n2.children.append(n3)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)

    # Test node accessors
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2]
    s = Scene(nodes=nodes)
    assert s.has_node(n1)
    assert s.has_node(n2)
    assert not s.has_node(n3)

    # Test node poses
    for n in nodes:
        assert np.allclose(s.get_pose(n), np.eye(4))
    with pytest.raises(ValueError):
        s.get_pose(n3)
    with pytest.raises(ValueError):
        s.set_pose(n3, np.eye(4))
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), np.eye(4))

    nodes = [n1, n2, n3]
    tf2 = np.eye(4)
    tf2[:3, :3] = np.diag([-1, -1, 1])
    n1.children.append(n2)
    n1.matrix = tf
    n2.matrix = tf2
    s = Scene(nodes=nodes)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))
    assert np.allclose(s.get_pose(n3), np.eye(4))

    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    s = Scene()
    s.add_node(n1)
    with pytest.raises(ValueError):
        s.add_node(n2)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf)
    s.set_pose(n2, tf2)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))

    # Test node removal
    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    n2.children.append(n3)
    s = Scene(nodes=[n1, n2, n3])
    s.remove_node(n2)
    assert len(s.nodes) == 1
    assert n1 in s.nodes
    assert len(n1.children) == 0
    assert len(n2.children) == 1
    s.add_node(n2, parent_node=n1)
    assert len(n1.children) == 1
    n1.matrix = tf
    n3.matrix = tf2
    assert np.allclose(s.get_pose(n3), tf.dot(tf2))

    # Now test ADD function
    s = Scene()
    m = Mesh([], name='m')
    cp = PerspectiveCamera(yfov=2.0)
    co = OrthographicCamera(xmag=1.0, ymag=1.0)
    dl = DirectionalLight()
    pl = PointLight()
    sl = SpotLight()

    n1 = s.add(m, name='mn')
    assert n1.mesh == m
    assert len(s.nodes) == 1
    assert len(s.mesh_nodes) == 1
    assert n1 in s.mesh_nodes
    assert len(s.meshes) == 1
    assert m in s.meshes
    assert len(s.get_nodes(node=n2)) == 0
    n2 = s.add(m, pose=tf)
    assert len(s.nodes) == len(s.mesh_nodes) == 2
    assert len(s.meshes) == 1
    assert len(s.get_nodes(node=n1)) == 1
    assert len(s.get_nodes(node=n1, name='mn')) == 1
    assert len(s.get_nodes(name='mn')) == 1
    assert len(s.get_nodes(obj=m)) == 2
    assert len(s.get_nodes(obj=m, obj_name='m')) == 2
    assert len(s.get_nodes(obj=co)) == 0
    nsl = s.add(sl, name='sln')
    npl = s.add(pl, parent_name='sln')
    assert nsl.children[0] == npl
    ndl = s.add(dl, parent_node=npl)
    assert npl.children[0] == ndl
    nco = s.add(co)
    ncp = s.add(cp)

    assert len(s.light_nodes) == len(s.lights) == 3
    assert len(s.point_light_nodes) == len(s.point_lights) == 1
    assert npl in s.point_light_nodes
    assert len(s.spot_light_nodes) == len(s.spot_lights) == 1
    assert nsl in s.spot_light_nodes
    assert len(s.directional_light_nodes) == len(s.directional_lights) == 1
    assert ndl in s.directional_light_nodes
    assert len(s.cameras) == len(s.camera_nodes) == 2
    assert s.main_camera_node == nco
    s.main_camera_node = ncp
    s.remove_node(ncp)
    assert len(s.cameras) == len(s.camera_nodes) == 1
    assert s.main_camera_node == nco
    s.remove_node(n2)
    assert len(s.meshes) == 1
    s.remove_node(n1)
    assert len(s.meshes) == 0
    s.remove_node(nsl)
    assert len(s.lights) == 0
    s.remove_node(nco)
    assert s.main_camera_node is None

    s.add_node(n1)
    s.clear()
    assert len(s.nodes) == 0

    # Trigger final errors
    with pytest.raises(ValueError):
        s.main_camera_node = None
    with pytest.raises(ValueError):
        s.main_camera_node = ncp
    with pytest.raises(ValueError):
        s.add(m, parent_node=n1)
    with pytest.raises(ValueError):
        s.add(m, name='asdf')
        s.add(m, name='asdf')
        s.add(m, parent_name='asdf')
    with pytest.raises(ValueError):
        s.add(m, parent_name='asfd')
    with pytest.raises(TypeError):
        s.add(None)

    s.clear()
    # Test bounds
    m1 = Mesh.from_trimesh(trimesh.creation.box())
    m2 = Mesh.from_trimesh(trimesh.creation.box())
    m3 = Mesh.from_trimesh(trimesh.creation.box())
    n1 = Node(mesh=m1)
    n2 = Node(mesh=m2, translation=[1.0, 0.0, 0.0])
    n3 = Node(mesh=m3, translation=[0.5, 0.0, 1.0])
    s.add_node(n1)
    s.add_node(n2)
    s.add_node(n3)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [1.5, 0.5, 1.5]])
    s.clear()
    s.add_node(n1)
    s.add_node(n2, parent_node=n1)
    s.add_node(n3, parent_node=n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.0, 0.5, 1.5]])
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n3, tf)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.5, 1.5, 1.5]])
    s.remove_node(n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])
    s.clear()
    assert np.allclose(s.bounds, 0.0)
Exemple #4
0
class CanonicalScene():
    def __init__(self, path_OBJ, camera_pose, min_corner, max_corner,
                 symmetric_transforms, viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0, light_intensity=[0.5, 30]):
        self.light_intensity = light_intensity
        self.symmetric_transforms = symmetric_transforms
        self.min_corner, self.max_corner = min_corner, max_corner
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self._build_light(light_intensity, camera_pose)
        self.camera = self._build_camera(y_fov, viewport_size, camera_pose)
        self.pixel_mesh = self.scene.add(color_object(path_OBJ))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path_OBJ), smooth=True))

        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])

        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT

    def _build_light(self, light, pose):
        directional_light = DirectionalLight([1.0, 1.0, 1.0], np.mean(light))
        directional_light = self.scene.add(directional_light, pose=pose)
        return directional_light

    def _build_camera(self, y_fov, viewport_size, pose):
        aspect_ratio = np.divide(*viewport_size)
        camera = PerspectiveCamera(y_fov, aspectRatio=aspect_ratio)
        camera = self.scene.add(camera, pose=pose)
        return camera

    def _sample_parameters(self, min_corner, max_corner):
        mesh_transform = sample_affine_transform(min_corner, max_corner)
        light_intensity = sample_uniformly(self.light_intensity)
        return mesh_transform, light_intensity

    def render(self):
        mesh_transform, light_intensity = self._sample_parameters(
            self.min_corner, self.max_corner)
        mesh_rotation = mesh_transform[0:3, 0:3]
        canonical_rotation = calculate_canonical_rotation(
            mesh_rotation, self.symmetric_transforms)
        # mesh_rotation[0:3, 0:3] = canonical_rotation
        canonical_rotation = np.dot(mesh_rotation, canonical_rotation)
        mesh_rotation[0:3, 0:3] = canonical_rotation
        self.scene.set_pose(self.mesh, mesh_transform)
        self.scene.set_pose(self.pixel_mesh, mesh_transform)
        self.light.light.intensity = light_intensity

        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask

    def render_symmetries(self):
        images, alphas, RGB_masks = [], [], []
        for rotation in self.symmetric_transforms:
            symmetric_transform = to_affine_matrix(rotation, np.zeros(3))
            self.scene.set_pose(self.mesh, symmetric_transform)
            self.scene.set_pose(self.pixel_mesh, symmetric_transform)
            self.pixel_mesh.mesh.is_visible = False
            image, depth = self.renderer.render(self.scene, self.flags_RGBA)
            self.pixel_mesh.mesh.is_visible = True
            image, alpha = split_alpha_channel(image)
            self.mesh.mesh.is_visible = False
            RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
            self.mesh.mesh.is_visible = True
            images.append(image)
            alphas.append(alpha)
            RGB_masks.append(RGB_mask[..., 0:3])
        images = np.concatenate(images, axis=1)
        RGB_masks = np.concatenate(RGB_masks, axis=1)
        images = np.concatenate([images, RGB_masks], axis=0)
        return images
Exemple #5
0
class DualView():
    """Scene that renders a single object from two different locations.

    # Arguments
        OBJ_filepath: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: Float. Max distance from the camera to the origin.
        light: Integer representing the light intensity.
        top_only: Boolean. If True images are only take from the top.
        scale: Float, factor to apply to translation vector.
        roll: Float, to sample [-roll, roll] rolls of the Z OpenGL camera axis.
        shift: Float, to sample [-shift, shift] to move in X, Y OpenGL axes.
    """
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0,
                 distance=0.3,
                 light=5.0,
                 top_only=True,
                 scale=10.0,
                 roll=None,
                 shift=None):

        self._build_scene(filepath, viewport_size, light, y_fov)
        self.distance, self.roll = distance, roll
        self.top_only, self.shift, self.scale = top_only, shift, scale
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA
        self.epsilon = 0.01

    def _build_scene(self, path, size, light, y_fov):
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path), smooth=True))
        self.world_origin = self.mesh.mesh.centroid

    def _sample_camera_origin(self):
        distance = sample_uniformly(self.distance)
        camera_origin = sample_point_in_sphere(distance, self.top_only)
        camera_origin = random_perturbation(camera_origin, self.epsilon)
        return camera_origin

    def _change_scene(self):
        camera_origin = self._sample_camera_origin()
        camera_to_world, world_to_camera = compute_modelview_matrices(
            camera_origin, self.world_origin, self.roll, self.shift)
        self.scene.set_pose(self.camera, camera_to_world)
        self.scene.set_pose(self.light, camera_to_world)
        return camera_to_world, world_to_camera

    def render(self):
        A_to_world, world_to_A = self._change_scene()
        image_A, depth_A = self.renderer.render(self.scene, flags=self.RGBA)
        B_to_world, world_to_B = self._change_scene()
        image_B, depth_B = self.renderer.render(self.scene, flags=self.RGBA)
        image_A, alpha_A = split_alpha_channel(image_A)
        image_B, alpha_B = split_alpha_channel(image_B)
        world_to_A = scale_translation(world_to_A, self.scale)
        world_to_B = scale_translation(world_to_B, self.scale)
        A_to_world = scale_translation(A_to_world, self.scale)
        B_to_world = scale_translation(B_to_world, self.scale)
        matrices = np.vstack([
            world_to_A.flatten(),
            world_to_B.flatten(),
            A_to_world.flatten(),
            B_to_world.flatten()
        ])
        return {
            'image_A': image_A,
            'alpha_A': alpha_A,
            'depth_A': depth_A,
            'image_B': image_B,
            'alpha_B': alpha_B,
            'depth_B': depth_B,
            'matrices': matrices
        }