Ejemplo n.º 1
0
def render_big_gallery(results_dir,
                       nb=30,
                       pts_colors=[0.5, 0.5, 0.5],
                       draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
        colors = np.tile(colors, (input_pc.shape[0], 1))

        input_pc_node = add_point_cloud_mesh_to_scene(input_pc, scene, pc_pose,
                                                      colors)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node)

        if draw_text:
            im_here = Image.fromarray(renderred_color)
            d = ImageDraw.Draw(im_here)
            fnt = ImageFont.truetype(
                font='/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',
                size=100)
            d.text((0, 0),
                   input_pf.split('/')[-1],
                   fill=(0, 0, 0, 255),
                   font=fnt)
            renderred_color = np.array(im_here)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
Ejemplo n.º 2
0
def quick_color_visualize():
    scene = Scene(bg_color=[0, 0, 0])
    root = os.path.expanduser('~')
    mesh_path = '.keras/paz/datasets/ycb_models/035_power_drill/textured.obj'
    path = os.path.join(root, mesh_path)
    mesh = color_object(path)
    scene.add(mesh)
    Viewer(scene, use_raymond_lighting=True, flags=RenderFlags.FLAT)
Ejemplo n.º 3
0
class PixelMaskRenderer():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        path_OBJ: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        light: List of floats indicating [max_light, min_light]
        top_only: Boolean. If True images are only take from the top.
        roll: Float, to sample [-roll, roll] rolls of the Z OpenGL camera axis.
        shift: Float, to sample [-shift, shift] to move in X, Y OpenGL axes.
    """
    def __init__(self, path_OBJ, viewport_size=(128, 128), y_fov=3.14159 / 4.0,
                 distance=[0.3, 0.5], light=[0.5, 30], top_only=False,
                 roll=None, shift=None):
        self.distance, self.roll, self.shift = distance, roll, shift
        self.light_intensity, self.top_only = light, top_only
        self._build_scene(path_OBJ, viewport_size, light, y_fov)
        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])
        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
        self.epsilon = 0.01

    def _build_scene(self, path, size, light, y_fov):
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self.scene.add(
            DirectionalLight([1.0, 1.0, 1.0], np.mean(light)))
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
        self.pixel_mesh = self.scene.add(color_object(path))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path), smooth=True))
        self.world_origin = self.mesh.mesh.centroid

    def _sample_parameters(self):
        distance = sample_uniformly(self.distance)
        camera_origin = sample_point_in_sphere(distance, self.top_only)
        camera_origin = random_perturbation(camera_origin, self.epsilon)
        light_intensity = sample_uniformly(self.light_intensity)
        return camera_origin, light_intensity

    def render(self):
        camera_origin, intensity = self._sample_parameters()
        camera_to_world, world_to_camera = compute_modelview_matrices(
            camera_origin, self.world_origin, self.roll, self.shift)
        self.light.light.intensity = intensity
        self.scene.set_pose(self.camera, camera_to_world)
        self.scene.set_pose(self.light, camera_to_world)
        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask
Ejemplo n.º 4
0
def render_big_gallery_overlay(dir_1,
                               dir_2,
                               pts_color_1=[0.5, 0.5, 0.5],
                               pts_color_2=[0.5, 0.5, 0.5],
                               nb=30):
    '''
    return np array of a big image
    '''
    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames_1 = get_all_filnames(dir_1, nb)
    input_ply_filenames_2 = get_all_filnames(dir_2, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for idx, input_pf in enumerate(input_ply_filenames_1):

        input_pc_1 = read_ply_xyz(input_pf)
        input_pc_2 = read_ply_xyz(input_ply_filenames_2[idx])

        color_1 = np.array(pts_color_1)
        color_1 = np.tile(color_1, (input_pc_1.shape[0], 1))

        color_2 = np.array(pts_color_2)
        color_2 = np.tile(color_2, (input_pc_2.shape[0], 1))

        input_pc_node_1 = add_point_cloud_mesh_to_scene(
            input_pc_1, scene, pc_pose, color_1)
        input_pc_node_2 = add_point_cloud_mesh_to_scene(
            input_pc_2, scene, pc_pose, color_2)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node_1)
        scene.remove_node(input_pc_node_2)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
Ejemplo n.º 5
0
    def _reset_scene(self, scale_factor=1.0):
        """Resets the scene.

        Parameters
        ----------
        scale_factor : float
            optional scale factor to apply to the image dimensions
        """
        # delete scene
        if self._scene is not None:
            self._scene.clear()
            del self._scene

        # create scene
        scene = Scene()

        # setup camera
        camera = IntrinsicsCamera(
            self.camera.intrinsics.fx,
            self.camera.intrinsics.fy,
            self.camera.intrinsics.cx,
            self.camera.intrinsics.cy,
        )
        pose_m = self.camera.pose.matrix.copy()
        pose_m[:, 1:3] *= -1.0
        scene.add(camera, pose=pose_m, name=self.camera.frame)
        scene.main_camera_node = next(
            iter(scene.get_nodes(name=self.camera.frame))
        )

        material = MetallicRoughnessMaterial(
            baseColorFactor=np.array([1, 1, 1, 1.0]),
            metallicFactor=0.2,
            roughnessFactor=0.8,
        )

        # add workspace objects
        for obj_key in self.state.workspace_keys:
            obj_state = self.state[obj_key]
            obj_mesh = Mesh.from_trimesh(obj_state.mesh, material=material)
            T_obj_world = obj_state.pose.matrix
            scene.add(obj_mesh, pose=T_obj_world, name=obj_key)

        # add scene objects
        for obj_key in self.state.obj_keys:
            obj_state = self.state[obj_key]
            obj_mesh = Mesh.from_trimesh(obj_state.mesh, material=material)
            T_obj_world = obj_state.pose.matrix
            scene.add(obj_mesh, pose=T_obj_world, name=obj_key)

        # add light (for color rendering)
        light = DirectionalLight(color=np.ones(3), intensity=1.0)
        scene.add(light, pose=np.eye(4))
        ray_light_nodes = self._create_raymond_lights()
        [scene.add_node(rln) for rln in ray_light_nodes]

        self._scene = scene
Ejemplo n.º 6
0
def render_sensor(
    point_set,
    render_sensor_path="/Users/macbookpro15/Documents/mujoco_hand_exps/data/sensor_render"
):
    """
    pointset: it is collectiono of sensor points for all timesteps
    """
    # first take one of the point, subtract the center from it which
    # I know is the 0-th position out of the 220 points
    # form the mesh from this
    if not os.path.exists(render_sensor_path):
        os.makedirs(render_sensor_path)
    time_steps = len(point_set)
    for t in range(time_steps):
        sensor = trimesh.load_mesh(
            f'../data/mesh_dir/mesh_{t}_out/mc_mesh_out.ply')
        sensor_mesh = Mesh.from_trimesh(sensor)
        # Light for the scene
        direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
        spot_l = SpotLight(color=np.ones(3),
                           intensity=10.0,
                           innerConeAngle=np.pi / 16,
                           outerConeAngle=np.pi / 6)
        point_l = PointLight(color=np.ones(3), intensity=10.0)

        # add camera to the scene
        cam = PerspectiveCamera(yfov=(np.pi / 3.0))
        cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                             [0.0, np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

        # create the scene
        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
        point_mesh_node = scene.add(sensor_mesh)
        direc_l_node = scene.add(direc_l, pose=cam_pose)
        spot_l_node = scene.add(spot_l, pose=cam_pose)
        cam_node = scene.add(cam, pose=cam_pose)
        print('rendering the scene offline')
        r = OffscreenRenderer(viewport_width=640, viewport_height=480)
        color, depth = r.render(scene)
        r.delete()

        plt.figure()
        plt.imshow(color)
        plt.savefig(f'{render_sensor_path}/img_{t}.jpg')
Ejemplo n.º 7
0
    def _reset_scene(self, scale_factor=1.0):
        """ Resets the scene.

        Parameters
        ----------
        scale_factor : float
            optional scale factor to apply to the image dimensions
        """
        # delete scene
        if self._scene is not None:
            self._scene.clear()
            del self._scene

        # create scene
        scene = Scene()

        # setup camera
        camera = IntrinsicsCamera(self.camera.intrinsics.fx, self.camera.intrinsics.fy, 
                                  self.camera.intrinsics.cx, self.camera.intrinsics.cy)
        pose_m = self.camera.pose.matrix.copy()
        pose_m[:,1:3] *= -1.0
        scene.add(camera, pose=pose_m, name=self.camera.frame)
        scene.main_camera_node = next(iter(scene.get_nodes(name=self.camera.frame)))

        # add workspace objects
        for obj_key in self.state.workspace_keys:
            obj_state = self.state[obj_key]
            obj_mesh = Mesh.from_trimesh(obj_state.mesh)
            T_obj_world = obj_state.pose.matrix
            scene.add(obj_mesh, pose=T_obj_world, name=obj_key)

        # add scene objects
        for obj_key in self.state.obj_keys:
            obj_state = self.state[obj_key]
            obj_mesh = Mesh.from_trimesh(obj_state.mesh)
            T_obj_world = obj_state.pose.matrix
            scene.add(obj_mesh, pose=T_obj_world, name=obj_key)

        self._scene = scene
Ejemplo n.º 8
0
def render_mesh(mesh, h=256, w=256):
    """https://pyrender.readthedocs.io/en/latest/examples/quickstart.html"""
    mesh = pyrender.Mesh.from_trimesh(mesh.trimesh())
    scene = Scene()
    scene.add(mesh)

    # z-axis away from the scene, x-axis right, y-axis up
    pose = np.eye(4)
    pose[2, 3] = 250

    # add camera
    camera = PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    scene.add(camera, pose=pose)

    # add light
    # light = DirectionalLight(color=np.ones(3), intensity=5.0)
    light = PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
    scene.add(light, pose=pose)

    r = OffscreenRenderer(h, w)
    color, depth = r.render(scene)
    return color
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0, 3] = 0.1
    drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
        [0.0, 0.0, 0.0, 1.0],
    ])

    boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
    boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2, 1, 1))
    poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
    poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh)
    scene.add_node(boxf_node)

    _ = scene.add(drill_mesh, pose=drill_pose)
    _ = scene.add(bottle_mesh, pose=bottle_pose)
    _ = scene.add(wood_mesh)
    _ = scene.add(direc_l, pose=cam_pose)
    _ = scene.add(spot_l, pose=cam_pose)
    _ = scene.add(points_mesh)

    _ = scene.add(cam, pose=cam_pose)

    r = OffscreenRenderer(viewport_width=640, viewport_height=480)
    color, depth = r.render(scene)

    assert color.shape == (480, 640, 3)
    assert depth.shape == (480, 640)
    assert np.max(depth.data) > 0.05
    assert np.count_nonzero(depth.data) > (0.2 * depth.size)
    r.delete()
Ejemplo n.º 10
0
def main(mesh_path):
    # rendering conf
    ambient_light = 0.8
    directional_light = 1.0
    img_res = 512
    cam_f = 500
    cam_c = img_res / 2.0

    scene = Scene(ambient_light=np.array(
        [ambient_light, ambient_light, ambient_light, 1.0]))

    mesh_v, mesh_vc, mesh_f = load_ply_data(mesh_path)
    mesh_ = trimesh.Trimesh(vertices=mesh_v,
                            faces=mesh_f,
                            vertex_colors=mesh_vc)
    points_mesh = Mesh.from_trimesh(mesh_, smooth=True, material=None)
    mesh_node = scene.add(points_mesh)

    cam = IntrinsicsCamera(fx=cam_f, fy=cam_f, cx=cam_c, cy=cam_c)
    cam_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                         [0.0, 0.0, 1.0, 2.0], [0.0, 0.0, 0.0, 1.0]])

    direc_l = DirectionalLight(color=np.ones(3), intensity=directional_light)
    light_node_1 = scene.add(direc_l,
                             pose=np.matmul(rotationy(30), rotationx(45)))
    direc_l = DirectionalLight(color=np.ones(3), intensity=directional_light)
    light_node_2 = scene.add(direc_l,
                             pose=np.matmul(rotationy(-30), rotationx(45)))
    direc_l = DirectionalLight(color=np.ones(3), intensity=directional_light)
    light_node_3 = scene.add(direc_l,
                             pose=np.matmul(rotationy(-180), rotationx(45)))
    direc_l = DirectionalLight(color=np.ones(3),
                               intensity=(directional_light - 0.5))
    light_node_4 = scene.add(direc_l,
                             pose=np.matmul(rotationy(0), rotationx(-10)))

    ################
    # rendering
    cam_node = scene.add(cam, pose=cam_pose)
    render_flags = {
        'flip_wireframe': False,
        'all_wireframe': False,
        'all_solid': False,
        'shadows': True,
        'vertex_normals': False,
        'face_normals': False,
        'cull_faces': True,
        'point_size': 1.0,
    }
    viewer_flags = {
        'mouse_pressed': False,
        'rotate': False,
        'rotate_rate': np.pi / 6.0,
        'rotate_axis': np.array([0.0, 1.0, 0.0]),
        'view_center': np.array([0.0, 0.0, 0.0]),
        'record': False,
        'use_raymond_lighting': False,
        'use_direct_lighting': False,
        'lighting_intensity': 3.0,
        'use_perspective_cam': True,
        'window_title': 'DIT',
        'refresh_rate': 25.0,
        'fullscreen': False,
        'show_world_axis': False,
        'show_mesh_axes': False,
        'caption': None,
        'save_one_frame': False,
    }
    v = Viewer(scene,
               viewport_size=(512, 512),
               render_flags=render_flags,
               viewer_flags=viewer_flags,
               run_in_thread=False)
    v.close()
Ejemplo n.º 11
0
# Flower pose
flower_pose = get_flower_pose(flower_type, file_name)

#==============================================================================
# Scene creation
#==============================================================================

scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))

#==============================================================================
# Adding objects to the scene
#==============================================================================

# Flower
flower_mesh = mesh_list.pop(index_1)    
flower_node = scene.add(flower_mesh, pose=flower_pose, name='flower')

# Center (aka stigma)
if flower_type in no_center_flowers:
    center_mesh = flower_mesh
else:
    center_mesh =  mesh_list.pop(index_2)
    center_node = scene.add(center_mesh, pose=flower_pose, name='center')

# Stem (often not used)
stem_mesh = mesh_list.pop(index_3)
stem_node = scene.add(stem_mesh, pose=flower_pose, name='stem')

# The other sub-meshes of the flowers
for i in range(len(mesh_list)):
    scene.add(mesh_list[i], pose=flower_pose, name='flower'+str(i))
Ejemplo n.º 12
0
def test_scenes():

    # Basics
    s = Scene()
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))
    assert len(s.nodes) == 0
    assert s.name is None
    s.name = 'asdf'
    s.bg_color = None
    s.ambient_light = None
    assert np.allclose(s.bg_color, np.ones(4))
    assert np.allclose(s.ambient_light, np.zeros(3))

    assert s.nodes == set()
    assert s.cameras == set()
    assert s.lights == set()
    assert s.point_lights == set()
    assert s.spot_lights == set()
    assert s.directional_lights == set()
    assert s.meshes == set()
    assert s.camera_nodes == set()
    assert s.light_nodes == set()
    assert s.point_light_nodes == set()
    assert s.spot_light_nodes == set()
    assert s.directional_light_nodes == set()
    assert s.mesh_nodes == set()
    assert s.main_camera_node is None
    assert np.all(s.bounds == 0)
    assert np.all(s.centroid == 0)
    assert np.all(s.extents == 0)
    assert np.all(s.scale == 0)

    # From trimesh scene
    tms = trimesh.load('tests/data/WaterBottle.glb')
    s = Scene.from_trimesh_scene(tms)
    assert len(s.meshes) == 1
    assert len(s.mesh_nodes) == 1

    # Test bg color formatting
    s = Scene(bg_color=[0, 1.0, 0])
    assert np.allclose(s.bg_color, np.array([0.0, 1.0, 0.0, 1.0]))

    # Test constructor for nodes
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2, n3]
    s = Scene(nodes=nodes)
    n1.children.append(n2)
    s = Scene(nodes=nodes)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)
    n3.children = []
    n2.children.append(n3)
    n3.children.append(n2)
    with pytest.raises(ValueError):
        s = Scene(nodes=nodes)

    # Test node accessors
    n1 = Node()
    n2 = Node()
    n3 = Node()
    nodes = [n1, n2]
    s = Scene(nodes=nodes)
    assert s.has_node(n1)
    assert s.has_node(n2)
    assert not s.has_node(n3)

    # Test node poses
    for n in nodes:
        assert np.allclose(s.get_pose(n), np.eye(4))
    with pytest.raises(ValueError):
        s.get_pose(n3)
    with pytest.raises(ValueError):
        s.set_pose(n3, np.eye(4))
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), np.eye(4))

    nodes = [n1, n2, n3]
    tf2 = np.eye(4)
    tf2[:3, :3] = np.diag([-1, -1, 1])
    n1.children.append(n2)
    n1.matrix = tf
    n2.matrix = tf2
    s = Scene(nodes=nodes)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))
    assert np.allclose(s.get_pose(n3), np.eye(4))

    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    s = Scene()
    s.add_node(n1)
    with pytest.raises(ValueError):
        s.add_node(n2)
    s.set_pose(n1, tf)
    assert np.allclose(s.get_pose(n1), tf)
    assert np.allclose(s.get_pose(n2), tf)
    s.set_pose(n2, tf2)
    assert np.allclose(s.get_pose(n2), tf.dot(tf2))

    # Test node removal
    n1 = Node()
    n2 = Node()
    n3 = Node()
    n1.children.append(n2)
    n2.children.append(n3)
    s = Scene(nodes=[n1, n2, n3])
    s.remove_node(n2)
    assert len(s.nodes) == 1
    assert n1 in s.nodes
    assert len(n1.children) == 0
    assert len(n2.children) == 1
    s.add_node(n2, parent_node=n1)
    assert len(n1.children) == 1
    n1.matrix = tf
    n3.matrix = tf2
    assert np.allclose(s.get_pose(n3), tf.dot(tf2))

    # Now test ADD function
    s = Scene()
    m = Mesh([], name='m')
    cp = PerspectiveCamera(yfov=2.0)
    co = OrthographicCamera(xmag=1.0, ymag=1.0)
    dl = DirectionalLight()
    pl = PointLight()
    sl = SpotLight()

    n1 = s.add(m, name='mn')
    assert n1.mesh == m
    assert len(s.nodes) == 1
    assert len(s.mesh_nodes) == 1
    assert n1 in s.mesh_nodes
    assert len(s.meshes) == 1
    assert m in s.meshes
    assert len(s.get_nodes(node=n2)) == 0
    n2 = s.add(m, pose=tf)
    assert len(s.nodes) == len(s.mesh_nodes) == 2
    assert len(s.meshes) == 1
    assert len(s.get_nodes(node=n1)) == 1
    assert len(s.get_nodes(node=n1, name='mn')) == 1
    assert len(s.get_nodes(name='mn')) == 1
    assert len(s.get_nodes(obj=m)) == 2
    assert len(s.get_nodes(obj=m, obj_name='m')) == 2
    assert len(s.get_nodes(obj=co)) == 0
    nsl = s.add(sl, name='sln')
    npl = s.add(pl, parent_name='sln')
    assert nsl.children[0] == npl
    ndl = s.add(dl, parent_node=npl)
    assert npl.children[0] == ndl
    nco = s.add(co)
    ncp = s.add(cp)

    assert len(s.light_nodes) == len(s.lights) == 3
    assert len(s.point_light_nodes) == len(s.point_lights) == 1
    assert npl in s.point_light_nodes
    assert len(s.spot_light_nodes) == len(s.spot_lights) == 1
    assert nsl in s.spot_light_nodes
    assert len(s.directional_light_nodes) == len(s.directional_lights) == 1
    assert ndl in s.directional_light_nodes
    assert len(s.cameras) == len(s.camera_nodes) == 2
    assert s.main_camera_node == nco
    s.main_camera_node = ncp
    s.remove_node(ncp)
    assert len(s.cameras) == len(s.camera_nodes) == 1
    assert s.main_camera_node == nco
    s.remove_node(n2)
    assert len(s.meshes) == 1
    s.remove_node(n1)
    assert len(s.meshes) == 0
    s.remove_node(nsl)
    assert len(s.lights) == 0
    s.remove_node(nco)
    assert s.main_camera_node is None

    s.add_node(n1)
    s.clear()
    assert len(s.nodes) == 0

    # Trigger final errors
    with pytest.raises(ValueError):
        s.main_camera_node = None
    with pytest.raises(ValueError):
        s.main_camera_node = ncp
    with pytest.raises(ValueError):
        s.add(m, parent_node=n1)
    with pytest.raises(ValueError):
        s.add(m, name='asdf')
        s.add(m, name='asdf')
        s.add(m, parent_name='asdf')
    with pytest.raises(ValueError):
        s.add(m, parent_name='asfd')
    with pytest.raises(TypeError):
        s.add(None)

    s.clear()
    # Test bounds
    m1 = Mesh.from_trimesh(trimesh.creation.box())
    m2 = Mesh.from_trimesh(trimesh.creation.box())
    m3 = Mesh.from_trimesh(trimesh.creation.box())
    n1 = Node(mesh=m1)
    n2 = Node(mesh=m2, translation=[1.0, 0.0, 0.0])
    n3 = Node(mesh=m3, translation=[0.5, 0.0, 1.0])
    s.add_node(n1)
    s.add_node(n2)
    s.add_node(n3)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [1.5, 0.5, 1.5]])
    s.clear()
    s.add_node(n1)
    s.add_node(n2, parent_node=n1)
    s.add_node(n3, parent_node=n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.0, 0.5, 1.5]])
    tf = np.eye(4)
    tf[:3, 3] = np.ones(3)
    s.set_pose(n3, tf)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [2.5, 1.5, 1.5]])
    s.remove_node(n2)
    assert np.allclose(s.bounds, [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]])
    s.clear()
    assert np.allclose(s.bounds, 0.0)
Ejemplo n.º 13
0
class DualView():
    """Scene that renders a single object from two different locations.

    # Arguments
        OBJ_filepath: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: Float. Max distance from the camera to the origin.
        light: Integer representing the light intensity.
        top_only: Boolean. If True images are only take from the top.
        scale: Float, factor to apply to translation vector.
        roll: Float, to sample [-roll, roll] rolls of the Z OpenGL camera axis.
        shift: Float, to sample [-shift, shift] to move in X, Y OpenGL axes.
    """
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0,
                 distance=0.3,
                 light=5.0,
                 top_only=True,
                 scale=10.0,
                 roll=None,
                 shift=None):

        self._build_scene(filepath, viewport_size, light, y_fov)
        self.distance, self.roll = distance, roll
        self.top_only, self.shift, self.scale = top_only, shift, scale
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA
        self.epsilon = 0.01

    def _build_scene(self, path, size, light, y_fov):
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path), smooth=True))
        self.world_origin = self.mesh.mesh.centroid

    def _sample_camera_origin(self):
        distance = sample_uniformly(self.distance)
        camera_origin = sample_point_in_sphere(distance, self.top_only)
        camera_origin = random_perturbation(camera_origin, self.epsilon)
        return camera_origin

    def _change_scene(self):
        camera_origin = self._sample_camera_origin()
        camera_to_world, world_to_camera = compute_modelview_matrices(
            camera_origin, self.world_origin, self.roll, self.shift)
        self.scene.set_pose(self.camera, camera_to_world)
        self.scene.set_pose(self.light, camera_to_world)
        return camera_to_world, world_to_camera

    def render(self):
        A_to_world, world_to_A = self._change_scene()
        image_A, depth_A = self.renderer.render(self.scene, flags=self.RGBA)
        B_to_world, world_to_B = self._change_scene()
        image_B, depth_B = self.renderer.render(self.scene, flags=self.RGBA)
        image_A, alpha_A = split_alpha_channel(image_A)
        image_B, alpha_B = split_alpha_channel(image_B)
        world_to_A = scale_translation(world_to_A, self.scale)
        world_to_B = scale_translation(world_to_B, self.scale)
        A_to_world = scale_translation(A_to_world, self.scale)
        B_to_world = scale_translation(B_to_world, self.scale)
        matrices = np.vstack([
            world_to_A.flatten(),
            world_to_B.flatten(),
            A_to_world.flatten(),
            B_to_world.flatten()
        ])
        return {
            'image_A': image_A,
            'alpha_A': alpha_A,
            'depth_A': depth_A,
            'image_B': image_B,
            'alpha_B': alpha_B,
            'depth_B': depth_B,
            'matrices': matrices
        }
Ejemplo n.º 14
0
                            znear=0.1,
                            zfar=6)
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    proj = cam.get_projection_matrix()
    mvp = proj @ np.linalg.inv(cam_pose)
    inv_mvp = np.linalg.inv(mvp)
    #==============================================================================
    # Scene creation
    #==============================================================================

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    cam_node = scene.add(cam, pose=cam_pose)

    scene.main_camera_node = cam_node

    #==============================================================================
    drill_node = scene.add(drill_mesh)
    r = OffscreenRenderer(viewport_width=640, viewport_height=480)

    rf = pyrender.RenderFlags.NONE
    rf |= pyrender.RenderFlags.USE_RAW_DEPTH
    color, raw_depth = r.render(scene, flags=rf)
    r.delete()

    # unproject to get point cloud
    pcd = unproj(inv_mvp, raw_depth)
Ejemplo n.º 15
0
        yield from traverse_dynamic_dataset(root_dir)

if __name__ == "__main__":
    # Light creation
    direc_l = DirectionalLight(color=np.ones(3), intensity=5.0)
    # spot_l = SpotLight(color=np.ones(3), intensity=10.0, innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
    point_l = PointLight(color=np.ones(3), intensity=10.0)

    # Camera creation
    cam = PerspectiveCamera(yfov=(np.pi / 3.0))

    # Scene creation
    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))

    # Adding objects to the scene
    cam_node = scene.add(cam)
    direc_l_node = scene.add(direc_l, parent_node=cam_node)
    # spot_l_node = scene.add(spot_l, parent_node=cam_node)
    point_l_node = scene.add(point_l, parent_node=cam_node)

    r = OffscreenRenderer(viewport_width=g_single_viewport_size[0], viewport_height=g_single_viewport_size[1])

    # Start logging flag
    write_log("Start logging at %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    write_log("Dataset path: %s" % os.path.abspath(g_dataset_dir))
    write_log("Logging path: %s" % os.path.abspath(g_log_filename))
    write_log("Dataset kind: %s" % g_dataset_kind)

    for meshes, save_paths in traverse_dataset(g_dataset_dir, g_dataset_kind):

        for filename, component_list in g_vis_list.items():
Ejemplo n.º 16
0
    # ------------------------------------------------------------------------------
    # By manually creating nodes
    # ------------------------------------------------------------------------------
    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh, translation=np.array([-0.1, -0.10, 0.05]))
    scene.add_node(boxf_node)

    # ------------------------------------------------------------------------------
    # By using the add() utility function
    # ------------------------------------------------------------------------------
    drill_node = scene.add(drill_mesh, pose=drill_pose)
    bottle_node = scene.add(bottle_mesh, pose=bottle_pose)
    wood_node = scene.add(wood_mesh)
    direc_l_node = scene.add(direc_l, pose=cam_pose)
    spot_l_node = scene.add(spot_l, pose=cam_pose)

    # ==============================================================================
    # Using the viewer with a default camera
    # ==============================================================================

    # needs a screen
    # v = Viewer(scene, shadows=True)

    # ==============================================================================
    # Using the viewer with a pre-specified camera
    # ==============================================================================
Ejemplo n.º 17
0
    vec = np.array([0, point_size_ndc])
    vertices[0, 0:2] = vec
    vertices[1, 0:2] = _rot_mat(np.radians(120)) @ vec
    vertices[2, 0:2] = _rot_mat(np.radians(-120)) @ vec
    mat = MetallicRoughnessMaterial(doubleSided=True,
                                    baseColorFactor=[1.0, 0, 0, 1])

    instances = [np.eye(4) for i in range(pts_2d.shape[0])]
    for i in range(pts_2d.shape[0]):
        instances[i][0, 3] = (2 * pts_2d[i][0] - width) / height
        instances[i][1, 3] = pts_2d[i][1] / height * 2 - 1
        instances[i][2, 3] = -1
    prim = Primitive(positions=vertices,
                     mode=GLTF.TRIANGLES,
                     material=mat,
                     poses=instances)
    return Mesh(primitives=[prim])


pts_mesh = create_2d_point_mesh(640,
                                480, [[0, 0], [320, 240], [640, 480]],
                                point_size_px=15)

duck_node = scene.add(duckmesh)
dn = scene.add_hud_node(Node(name="hud", mesh=pts_mesh))

Viewer(scene,
       run_in_thread=False,
       viewer_flags={"use_direct_lighting": True},
       viewport_size=(640, 480))
Ejemplo n.º 18
0
class CanonicalScene():
    def __init__(self, path_OBJ, camera_pose, min_corner, max_corner,
                 symmetric_transforms, viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0, light_intensity=[0.5, 30]):
        self.light_intensity = light_intensity
        self.symmetric_transforms = symmetric_transforms
        self.min_corner, self.max_corner = min_corner, max_corner
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self._build_light(light_intensity, camera_pose)
        self.camera = self._build_camera(y_fov, viewport_size, camera_pose)
        self.pixel_mesh = self.scene.add(color_object(path_OBJ))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path_OBJ), smooth=True))

        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])

        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT

    def _build_light(self, light, pose):
        directional_light = DirectionalLight([1.0, 1.0, 1.0], np.mean(light))
        directional_light = self.scene.add(directional_light, pose=pose)
        return directional_light

    def _build_camera(self, y_fov, viewport_size, pose):
        aspect_ratio = np.divide(*viewport_size)
        camera = PerspectiveCamera(y_fov, aspectRatio=aspect_ratio)
        camera = self.scene.add(camera, pose=pose)
        return camera

    def _sample_parameters(self, min_corner, max_corner):
        mesh_transform = sample_affine_transform(min_corner, max_corner)
        light_intensity = sample_uniformly(self.light_intensity)
        return mesh_transform, light_intensity

    def render(self):
        mesh_transform, light_intensity = self._sample_parameters(
            self.min_corner, self.max_corner)
        mesh_rotation = mesh_transform[0:3, 0:3]
        canonical_rotation = calculate_canonical_rotation(
            mesh_rotation, self.symmetric_transforms)
        # mesh_rotation[0:3, 0:3] = canonical_rotation
        canonical_rotation = np.dot(mesh_rotation, canonical_rotation)
        mesh_rotation[0:3, 0:3] = canonical_rotation
        self.scene.set_pose(self.mesh, mesh_transform)
        self.scene.set_pose(self.pixel_mesh, mesh_transform)
        self.light.light.intensity = light_intensity

        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask

    def render_symmetries(self):
        images, alphas, RGB_masks = [], [], []
        for rotation in self.symmetric_transforms:
            symmetric_transform = to_affine_matrix(rotation, np.zeros(3))
            self.scene.set_pose(self.mesh, symmetric_transform)
            self.scene.set_pose(self.pixel_mesh, symmetric_transform)
            self.pixel_mesh.mesh.is_visible = False
            image, depth = self.renderer.render(self.scene, self.flags_RGBA)
            self.pixel_mesh.mesh.is_visible = True
            image, alpha = split_alpha_channel(image)
            self.mesh.mesh.is_visible = False
            RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
            self.mesh.mesh.is_visible = True
            images.append(image)
            alphas.append(alpha)
            RGB_masks.append(RGB_mask[..., 0:3])
        images = np.concatenate(images, axis=1)
        RGB_masks = np.concatenate(RGB_masks, axis=1)
        images = np.concatenate([images, RGB_masks], axis=0)
        return images
Ejemplo n.º 19
0
# We don't need to add the flower in this script

# Flower
flower_mesh = mesh_list.pop(index_1)
# flower_node = scene.add(flower_mesh, pose=flower_pose, name='flower')

# # Center (aka stigma)
center_mesh = mesh_list.pop(index_2)
# center_node = scene.add(center_mesh, pose=flower_pose, name='center')

# # The other sub-meshes of the flowers
# for i in range(len(mesh_list)):
#     scene.add(mesh_list[i], pose=flower_pose, name='flower'+str(i))

# Base square, inf square and walls
base_node = scene.add(base_mesh, pose=base_pose)
inf_base_node = scene.add(inf_base_mesh, pose=inf_base_pose)
for pose in wall_poses:
    scene.add(wall_mesh, pose=pose, name='wall')

center_point = ((flower_pose @ np.concatenate(
    (center_mesh.centroid, np.array([1]))).T).T)[:-1]
center_point = center_point.astype('float32')

# print(((flower_pose @  np.concatenate(center_mesh.centroid, np.array([[1]])).T).T))
# ball_trimesh = trimesh.creation.icosphere(subdivisions=3)
# ball_mesh = Mesh.from_trimesh(ball_trimesh)
# scene.add(ball_mesh, pose=spatial_transform_Matrix(scale=0.005, t_x=center_point[0], t_y=center_point[1], t_z=center_point[2]))

#==============================================================================
# Adding lights
Ejemplo n.º 20
0
from pyrender import Mesh, Scene, Viewer
from io import BytesIO
import numpy as np
import trimesh
import requests
import time
duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb"

duck = trimesh.load(BytesIO(requests.get(duck_source).content),
                    file_type='glb')
duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0])
scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]))
scene.add(duckmesh)

orig_positions = duckmesh.primitives[0].positions
vwr = Viewer(scene,
             run_in_thread=True,
             render_flags={
                 "cull_faces": False,
                 "all_wireframe": False
             },
             use_raymond_lighting=True,
             bg_color=[255, 0, 255])

while vwr.is_active:
    with vwr.render_lock:
        duckmesh.primitives[0].positions = orig_positions + np.random.randn(
            *orig_positions.shape) * 2
    time.sleep(0.1)
Ejemplo n.º 21
0
    while True:
        r += delta_r
        frame += 1
        t += 50
        if t % 1000 == 0:
            delta_r *= -1

        s = parametric_surface.doughnut(R, r, [50, 20])

        doughnut_trimesh = trimesh.Trimesh(
            vertices=s.flat_vertices,
            faces=s.flat_triangular_mesh_indices,
        )
        # for facet in doughnut_trimesh.facets:
        #     doughnut_trimesh.visual.face_colors[facet] = trimesh.visual.random_color()
        mesh = pyrender.Mesh.from_trimesh(doughnut_trimesh, smooth=False)
        mesh_node = Node(mesh=mesh, translation=np.array([0.0, 0.0, 0.0]))

        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]),
                      bg_color=[0.0, 0.0, 0.0])
        cam_node = scene.add(cam, pose=cam_pose)
        scene.add_node(mesh_node)
        # v = Viewer(scene)
        color, depth = off_screen_renderer.render(scene)
        cv2.imshow('f', color)
        cv2.waitKey(1)
        end_time = time.time()
        print(frame / (end_time - start_time))

    off_screen_renderer.delete()
Ejemplo n.º 22
0
class HVTPClient:
    def __init__(self):
        """
        """

        self.scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
        self.scene.add(PointLight(color=[0.5, 0.2, 0.3], intensity=2.0))
        self.scene.add(
            SpotLight(color=[0.1, 0.6, 0.3],
                      intensity=2.0,
                      innerConeAngle=0.05,
                      outerConeAngle=0.5))
        self.scene.add(
            DirectionalLight(color=[0.33, 0.33, 0.33], intensity=2.0))

        self.root_node = None
        # self.scene.add_node(self.root_node)

    def create(self, ip, port):
        """
        """

        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.client_socket.connect((ip, port))
        self.client_socket.setblocking(True)

    def destroy(self):
        """
        """

        self.client_socket.shutdown(socket.SHUT_RDWR)
        self.client_socket.close()

    def run(self):
        """
        """
        self.viewer = Viewer(self.scene,
                             use_raymond_lighting=False,
                             cull_faces=False,
                             run_in_thread=True)

        threading.Thread(target=self.accept_messages).start()

    def accept_messages(self):
        """
        Polls our socket for changes that have been sent by the server.
        """

        print("Starting message acceptor")

        while True:
            try:
                packet_header = self.client_socket.recv(
                    HVTPConstants.HVTP_HEADER_SIZE_IN_BYTES)

                print("Received!")

                if len(packet_header) == 0:
                    print("Connection closed by server")
                    sys.exit()

                # Read the header

                i = 0
                offs = 4

                hvtp_magic = packet_header[(i *
                                            offs):(offs + i * offs)].decode(
                                                HVTPConstants.HVTP_ENCODING)
                i += 1
                hvtp_version = int.from_bytes(
                    packet_header[(i * offs):(offs + i * offs)],
                    byteorder='big',
                    signed=False)
                i += 1
                hvtp_length = int.from_bytes(
                    packet_header[(i * offs):(offs + i * offs)],
                    byteorder='big',
                    signed=False)
                i += 1
                hvtp_type = packet_header[(i * offs):(offs + i * offs)].decode(
                    HVTPConstants.HVTP_ENCODING)

                print(hvtp_magic)
                print(hvtp_version)
                print(hvtp_length)
                print(hvtp_type)

                # Read the rest of the packet

                payload = self.client_socket.recv(hvtp_length)

                loaded_payload = trimesh.load(BytesIO(payload),
                                              file_type='glb')

                mesh = Mesh.from_trimesh(list(loaded_payload.dump()))

                # Add to the scene

                self.add_to_scene(mesh)

            except IOError as e:
                # This is normal on non blocking connections - when there are no incoming data error is going to be raised
                # Some operating systems will indicate that using AGAIN, and some using WOULDBLOCK error code
                # We are going to check for both - if one of them - that's expected, means no incoming data, continue as normal
                # If we got different error code - something happened

                if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
                    print('Reading error: {}'.format(str(e)))
                    sys.exit()

            except Exception as e:
                # Any other exception - something happened, exit
                print('Reading error: {}'.format(str(e)))
                sys.exit()

    def add_to_scene(self, mesh):
        """
        """

        # Grab the viewer mutex

        self.viewer.render_lock.acquire()

        # Remove anything before and insert the mesh into the scene-graph

        if self.root_node != None:
            self.scene.remove_node(self.root_node)

        self.root_node = self.scene.add(mesh)

        # Release the viewer mutex

        self.viewer.render_lock.release()
flower_pose = get_flower_pose(flower_type)

#==============================================================================
# Scene creation
#==============================================================================

scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))

#==============================================================================
# Adding objects to the scene
#==============================================================================

# Flower
flower_mesh = mesh_list.pop(index_1)
flower_node = scene.add(flower_mesh, pose=flower_pose, name='flower')

# Center (aka stigma)
center_mesh = mesh_list.pop(index_2)
center_node = scene.add(center_mesh, pose=flower_pose, name='center')

# The other sub-meshes of the flowers
for i in range(len(mesh_list)):
    scene.add(mesh_list[i], pose=flower_pose, name='flower' + str(i))

# Base square, inf square and walls
base_node = scene.add(base_mesh, pose=base_pose)
inf_base_node = scene.add(inf_base_mesh, pose=inf_base_pose)
for pose in wall_poses:
    scene.add(wall_mesh, pose=pose, name='wall')
Ejemplo n.º 24
0
class DictionaryView():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        filepath: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        top_only: Boolean. If True images are only take from the top.
        light: List of floats indicating [max_light, min_light]
    """
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.,
                 distance=0.30,
                 top_only=False,
                 light=5.0,
                 theta_steps=10,
                 phi_steps=10):
        self.scene = Scene(bg_color=[0, 0, 0])
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
        self.world_origin = self.mesh.mesh.centroid
        self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
        self.distance = distance
        # 0.1 values are to avoid gimbal lock
        theta_max = np.pi / 2.0 if top_only else np.pi
        self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
        self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA

    def render(self):
        dictionary_data = []
        for theta_arg, theta in enumerate(self.thetas):
            for phi_arg, phi in enumerate(self.phis):
                x = self.distance * np.sin(theta) * np.cos(phi)
                y = self.distance * np.sin(theta) * np.sin(phi)
                z = self.distance * np.cos(theta)
                matrices = compute_modelview_matrices(np.array([x, z, y]),
                                                      self.world_origin)
                camera_to_world, world_to_camera = matrices
                self.scene.set_pose(self.camera, camera_to_world)
                self.scene.set_pose(self.light, camera_to_world)
                camera_to_world = camera_to_world.flatten()
                world_to_camera = world_to_camera.flatten()
                image, depth = self.renderer.render(self.scene,
                                                    flags=self.RGBA)
                image, alpha = split_alpha_channel(image)
                matrices = np.vstack([world_to_camera, camera_to_world])
                sample = {
                    'image': image,
                    'alpha': alpha,
                    'depth': depth,
                    'matrices': matrices
                }
                dictionary_data.append(sample)
        return dictionary_data
Ejemplo n.º 25
0

def location(x, y, z):
    return np.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y],
                     [0.0, 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])


if __name__ == "__main__":

    scene = Scene(ambient_light=np.array([0.1, 0.1, 0.1, 1.0]))

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                         [0.0, 0.0, 1.0, 2.0], [0.0, 0.0, 0.0, 1.0]])
    points_mesh = Mesh.from_points(np.array([[0.0, 0.0, 0.0]]))
    human_node = scene.add(points_mesh)
    plane_size = 0.7
    plane_points = np.array([[plane_size, -0.65, plane_size],
                             [plane_size, -0.65, -plane_size],
                             [-plane_size, -0.65, plane_size],
                             [-plane_size, -0.65, -plane_size]])
    plane_faces = np.array([[0, 1, 2], [2, 1, 3]])
    plane_colors = np.ones((4, 3), dtype=np.float32) / 3.0
    plane_mesh = trimesh.Trimesh(vertices=plane_points,
                                 faces=plane_faces,
                                 vertex_colors=plane_colors)
    plane_mesh = Mesh.from_trimesh(plane_mesh, smooth=False)
    plane_node = scene.add(plane_mesh)
    direc_l = DirectionalLight(color=np.ones(3), intensity=3.0)
    light_node1 = scene.add(direc_l,
                            pose=np.matmul(rotationx(20), rotationy(60)))