コード例 #1
0
    def render_segmentation_images(self):
        """Renders segmentation masks (modal and amodal) for each object in the state.
        """

        full_depth = self.observation        
        modal_data = np.zeros((full_depth.shape[0], full_depth.shape[1], len(self.obj_keys)), dtype=np.uint8)
        amodal_data = np.zeros((full_depth.shape[0], full_depth.shape[1], len(self.obj_keys)), dtype=np.uint8)
        renderer = OffscreenRenderer(self.camera.width, self.camera.height)
        flags = RenderFlags.DEPTH_ONLY

        # Hide all meshes
        obj_mesh_nodes = [next(iter(self._scene.get_nodes(name=k))) for k in self.obj_keys]
        for mn in self._scene.mesh_nodes:
            mn.mesh.is_visible = False

        for i, node in enumerate(obj_mesh_nodes):
            node.mesh.is_visible = True

            depth = renderer.render(self._scene, flags=flags)
            amodal_mask = depth > 0.0
            modal_mask = np.logical_and(
                (np.abs(depth - full_depth) < 1e-6), full_depth > 0.0
            )
            amodal_data[amodal_mask,i] = np.iinfo(np.uint8).max
            modal_data[modal_mask,i] = np.iinfo(np.uint8).max
            node.mesh.is_visible = False

        renderer.delete()
        
        # Show all meshes
        for mn in self._scene.mesh_nodes:
            mn.mesh.is_visible = True

        return amodal_data, modal_data
コード例 #2
0
 def render_camera_image(self, color=True):
     """Render the camera image for the current scene."""
     renderer = OffscreenRenderer(self.camera.width, self.camera.height)
     flags = RenderFlags.NONE if color else RenderFlags.DEPTH_ONLY
     image = renderer.render(self._scene, flags=flags)
     renderer.delete()
     return image
コード例 #3
0
def render_big_gallery(results_dir,
                       nb=30,
                       pts_colors=[0.5, 0.5, 0.5],
                       draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
        colors = np.tile(colors, (input_pc.shape[0], 1))

        input_pc_node = add_point_cloud_mesh_to_scene(input_pc, scene, pc_pose,
                                                      colors)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node)

        if draw_text:
            im_here = Image.fromarray(renderred_color)
            d = ImageDraw.Draw(im_here)
            fnt = ImageFont.truetype(
                font='/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',
                size=100)
            d.text((0, 0),
                   input_pf.split('/')[-1],
                   fill=(0, 0, 0, 255),
                   font=fnt)
            renderred_color = np.array(im_here)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
コード例 #4
0
 def __init__(self, render_scene=None, viewport_size=None) -> None:
     viewport_width, viewport_height = viewport_size if viewport_size is not None else (
         640, 480)
     PyRenderBase.__init__(self, render_scene=render_scene)
     OffscreenRenderer.__init__(self,
                                viewport_width=viewport_width,
                                viewport_height=viewport_height,
                                point_size=1.0)
コード例 #5
0
def main():
    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    rad_step = math.pi / 18
    total_frames = int(math.pi * 2 / rad_step)
    camera_distance = 2

    fig = plt.figure(figsize=(3, 3))
    ims = []

    for num_cubes in range(1, 8):
        scene = build_scene(num_cubes, color_candidates)[0]
        camera = OrthographicCamera(xmag=0.9, ymag=0.9)
        camera_node = Node(camera=camera)
        scene.add_node(camera_node)

        current_rad = 0

        for _ in range(total_frames):
            camera_position = np.array(
                (math.sin(current_rad), math.sin(math.pi / 6),
                 math.cos(current_rad)))
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            im = plt.imshow(image, interpolation="none", animated=True)
            ims.append([im])

            current_rad += rad_step

    renderer.delete()

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    ani.save("shepard_metzler.gif", writer="imagemagick")
コード例 #6
0
ファイル: pixel_mask_renderer.py プロジェクト: oarriaga/paz
class PixelMaskRenderer():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        path_OBJ: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        light: List of floats indicating [max_light, min_light]
        top_only: Boolean. If True images are only take from the top.
        roll: Float, to sample [-roll, roll] rolls of the Z OpenGL camera axis.
        shift: Float, to sample [-shift, shift] to move in X, Y OpenGL axes.
    """
    def __init__(self, path_OBJ, viewport_size=(128, 128), y_fov=3.14159 / 4.0,
                 distance=[0.3, 0.5], light=[0.5, 30], top_only=False,
                 roll=None, shift=None):
        self.distance, self.roll, self.shift = distance, roll, shift
        self.light_intensity, self.top_only = light, top_only
        self._build_scene(path_OBJ, viewport_size, light, y_fov)
        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])
        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
        self.epsilon = 0.01

    def _build_scene(self, path, size, light, y_fov):
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self.scene.add(
            DirectionalLight([1.0, 1.0, 1.0], np.mean(light)))
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*size)))
        self.pixel_mesh = self.scene.add(color_object(path))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path), smooth=True))
        self.world_origin = self.mesh.mesh.centroid

    def _sample_parameters(self):
        distance = sample_uniformly(self.distance)
        camera_origin = sample_point_in_sphere(distance, self.top_only)
        camera_origin = random_perturbation(camera_origin, self.epsilon)
        light_intensity = sample_uniformly(self.light_intensity)
        return camera_origin, light_intensity

    def render(self):
        camera_origin, intensity = self._sample_parameters()
        camera_to_world, world_to_camera = compute_modelview_matrices(
            camera_origin, self.world_origin, self.roll, self.shift)
        self.light.light.intensity = intensity
        self.scene.set_pose(self.camera, camera_to_world)
        self.scene.set_pose(self.light, camera_to_world)
        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask
コード例 #7
0
ファイル: pixel_mask_renderer.py プロジェクト: oarriaga/paz
 def __init__(self, path_OBJ, viewport_size=(128, 128), y_fov=3.14159 / 4.0,
              distance=[0.3, 0.5], light=[0.5, 30], top_only=False,
              roll=None, shift=None):
     self.distance, self.roll, self.shift = distance, roll, shift
     self.light_intensity, self.top_only = light, top_only
     self._build_scene(path_OBJ, viewport_size, light, y_fov)
     self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])
     self.flags_RGBA = RenderFlags.RGBA
     self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
     self.epsilon = 0.01
コード例 #8
0
def render_big_gallery_overlay(dir_1,
                               dir_2,
                               pts_color_1=[0.5, 0.5, 0.5],
                               pts_color_2=[0.5, 0.5, 0.5],
                               nb=30):
    '''
    return np array of a big image
    '''
    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames_1 = get_all_filnames(dir_1, nb)
    input_ply_filenames_2 = get_all_filnames(dir_2, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for idx, input_pf in enumerate(input_ply_filenames_1):

        input_pc_1 = read_ply_xyz(input_pf)
        input_pc_2 = read_ply_xyz(input_ply_filenames_2[idx])

        color_1 = np.array(pts_color_1)
        color_1 = np.tile(color_1, (input_pc_1.shape[0], 1))

        color_2 = np.array(pts_color_2)
        color_2 = np.tile(color_2, (input_pc_2.shape[0], 1))

        input_pc_node_1 = add_point_cloud_mesh_to_scene(
            input_pc_1, scene, pc_pose, color_1)
        input_pc_node_2 = add_point_cloud_mesh_to_scene(
            input_pc_2, scene, pc_pose, color_2)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node_1)
        scene.remove_node(input_pc_node_2)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
コード例 #9
0
ファイル: scene.py プロジェクト: zuoguoqing/paz
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0,
                 distance=0.3,
                 light=5.0,
                 top_only=True,
                 scale=10.0,
                 roll=None,
                 shift=None):

        self._build_scene(filepath, viewport_size, light, y_fov)
        self.distance, self.roll = distance, roll
        self.top_only, self.shift, self.scale = top_only, shift, scale
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA
        self.epsilon = 0.01
コード例 #10
0
def render_sensor(
    point_set,
    render_sensor_path="/Users/macbookpro15/Documents/mujoco_hand_exps/data/sensor_render"
):
    """
    pointset: it is collectiono of sensor points for all timesteps
    """
    # first take one of the point, subtract the center from it which
    # I know is the 0-th position out of the 220 points
    # form the mesh from this
    if not os.path.exists(render_sensor_path):
        os.makedirs(render_sensor_path)
    time_steps = len(point_set)
    for t in range(time_steps):
        sensor = trimesh.load_mesh(
            f'../data/mesh_dir/mesh_{t}_out/mc_mesh_out.ply')
        sensor_mesh = Mesh.from_trimesh(sensor)
        # Light for the scene
        direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
        spot_l = SpotLight(color=np.ones(3),
                           intensity=10.0,
                           innerConeAngle=np.pi / 16,
                           outerConeAngle=np.pi / 6)
        point_l = PointLight(color=np.ones(3), intensity=10.0)

        # add camera to the scene
        cam = PerspectiveCamera(yfov=(np.pi / 3.0))
        cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                             [0.0, np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

        # create the scene
        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
        point_mesh_node = scene.add(sensor_mesh)
        direc_l_node = scene.add(direc_l, pose=cam_pose)
        spot_l_node = scene.add(spot_l, pose=cam_pose)
        cam_node = scene.add(cam, pose=cam_pose)
        print('rendering the scene offline')
        r = OffscreenRenderer(viewport_width=640, viewport_height=480)
        color, depth = r.render(scene)
        r.delete()

        plt.figure()
        plt.imshow(color)
        plt.savefig(f'{render_sensor_path}/img_{t}.jpg')
コード例 #11
0
    def __init__(self, path_OBJ, camera_pose, min_corner, max_corner,
                 symmetric_transforms, viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0, light_intensity=[0.5, 30]):
        self.light_intensity = light_intensity
        self.symmetric_transforms = symmetric_transforms
        self.min_corner, self.max_corner = min_corner, max_corner
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self._build_light(light_intensity, camera_pose)
        self.camera = self._build_camera(y_fov, viewport_size, camera_pose)
        self.pixel_mesh = self.scene.add(color_object(path_OBJ))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path_OBJ), smooth=True))

        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])

        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
コード例 #12
0
def render_mesh(mesh, h=256, w=256):
    """https://pyrender.readthedocs.io/en/latest/examples/quickstart.html"""
    mesh = pyrender.Mesh.from_trimesh(mesh.trimesh())
    scene = Scene()
    scene.add(mesh)

    # z-axis away from the scene, x-axis right, y-axis up
    pose = np.eye(4)
    pose[2, 3] = 250

    # add camera
    camera = PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    scene.add(camera, pose=pose)

    # add light
    # light = DirectionalLight(color=np.ones(3), intensity=5.0)
    light = PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
    scene.add(light, pose=pose)

    r = OffscreenRenderer(h, w)
    color, depth = r.render(scene)
    return color
コード例 #13
0
ファイル: scenes.py プロジェクト: zuoguoqing/paz
 def __init__(self,
              filepath,
              viewport_size=(128, 128),
              y_fov=3.14159 / 4.,
              distance=0.30,
              top_only=False,
              light=5.0,
              theta_steps=10,
              phi_steps=10):
     self.scene = Scene(bg_color=[0, 0, 0])
     self.camera = self.scene.add(
         PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
     self.mesh = self.scene.add(
         Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
     self.world_origin = self.mesh.mesh.centroid
     self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
     self.distance = distance
     # 0.1 values are to avoid gimbal lock
     theta_max = np.pi / 2.0 if top_only else np.pi
     self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
     self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
     self.renderer = OffscreenRenderer(*viewport_size)
     self.RGBA = RenderFlags.RGBA
コード例 #14
0
    spot_l_node = scene.add(spot_l, pose=cam_pose)

    # ==============================================================================
    # Using the viewer with a default camera
    # ==============================================================================

    # needs a screen
    # v = Viewer(scene, shadows=True)

    # ==============================================================================
    # Using the viewer with a pre-specified camera
    # ==============================================================================

    # needs a screen
    # cam_node = scene.add(cam, pose=cam_pose)
    # v = Viewer(scene, central_node=drill_node)

    # ==============================================================================
    # Rendering offscreen from that camera
    # ==============================================================================

    cam_node = scene.add(cam, pose=cam_pose)
    r = OffscreenRenderer(viewport_width=640 * 2, viewport_height=480 * 2)
    color, depth = r.render(scene)
    r.delete()

    import matplotlib.pyplot as plt

    plt.figure(figsize=(20, 20))
    plt.imshow(color)
    plt.show()
コード例 #15
0
class CanonicalScene():
    def __init__(self, path_OBJ, camera_pose, min_corner, max_corner,
                 symmetric_transforms, viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0, light_intensity=[0.5, 30]):
        self.light_intensity = light_intensity
        self.symmetric_transforms = symmetric_transforms
        self.min_corner, self.max_corner = min_corner, max_corner
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self._build_light(light_intensity, camera_pose)
        self.camera = self._build_camera(y_fov, viewport_size, camera_pose)
        self.pixel_mesh = self.scene.add(color_object(path_OBJ))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path_OBJ), smooth=True))

        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])

        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT

    def _build_light(self, light, pose):
        directional_light = DirectionalLight([1.0, 1.0, 1.0], np.mean(light))
        directional_light = self.scene.add(directional_light, pose=pose)
        return directional_light

    def _build_camera(self, y_fov, viewport_size, pose):
        aspect_ratio = np.divide(*viewport_size)
        camera = PerspectiveCamera(y_fov, aspectRatio=aspect_ratio)
        camera = self.scene.add(camera, pose=pose)
        return camera

    def _sample_parameters(self, min_corner, max_corner):
        mesh_transform = sample_affine_transform(min_corner, max_corner)
        light_intensity = sample_uniformly(self.light_intensity)
        return mesh_transform, light_intensity

    def render(self):
        mesh_transform, light_intensity = self._sample_parameters(
            self.min_corner, self.max_corner)
        mesh_rotation = mesh_transform[0:3, 0:3]
        canonical_rotation = calculate_canonical_rotation(
            mesh_rotation, self.symmetric_transforms)
        # mesh_rotation[0:3, 0:3] = canonical_rotation
        canonical_rotation = np.dot(mesh_rotation, canonical_rotation)
        mesh_rotation[0:3, 0:3] = canonical_rotation
        self.scene.set_pose(self.mesh, mesh_transform)
        self.scene.set_pose(self.pixel_mesh, mesh_transform)
        self.light.light.intensity = light_intensity

        self.pixel_mesh.mesh.is_visible = False
        image, depth = self.renderer.render(self.scene, self.flags_RGBA)
        self.pixel_mesh.mesh.is_visible = True
        image, alpha = split_alpha_channel(image)
        self.mesh.mesh.is_visible = False
        RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
        self.mesh.mesh.is_visible = True
        return image, alpha, RGB_mask

    def render_symmetries(self):
        images, alphas, RGB_masks = [], [], []
        for rotation in self.symmetric_transforms:
            symmetric_transform = to_affine_matrix(rotation, np.zeros(3))
            self.scene.set_pose(self.mesh, symmetric_transform)
            self.scene.set_pose(self.pixel_mesh, symmetric_transform)
            self.pixel_mesh.mesh.is_visible = False
            image, depth = self.renderer.render(self.scene, self.flags_RGBA)
            self.pixel_mesh.mesh.is_visible = True
            image, alpha = split_alpha_channel(image)
            self.mesh.mesh.is_visible = False
            RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
            self.mesh.mesh.is_visible = True
            images.append(image)
            alphas.append(alpha)
            RGB_masks.append(RGB_mask[..., 0:3])
        images = np.concatenate(images, axis=1)
        RGB_masks = np.concatenate(RGB_masks, axis=1)
        images = np.concatenate([images, RGB_masks], axis=0)
        return images
コード例 #16
0
class Ui_MainWindow(QtWidgets.QMainWindow, Ui_MainWindow_Base):
    def __init__(self, args):
        super(self.__class__, self).__init__()

        self.save_params_path = args.save_model_params if args.save_model_params is not None\
            else args.fit_result

        # Result retrieve
        with open(args.fit_result, 'rb') as f:
            fitting_results = pickle.load(f, encoding='latin1')
        self.cam_rot = fitting_results['camera_rotation'][0]
        self.cam_trans = fitting_results['camera_translation'][0]

        # Background image setup
        self.base_img = load_image(
            args.image_path) if args.image_path else None
        if self.base_img is not None:
            self.canvas_size = np.array(self.base_img.shape[:2][::-1],
                                        dtype=np.float32)
            if self.canvas_size[1] > MAX_CANVAS_HEIGHT:
                self.canvas_size *= (MAX_CANVAS_HEIGHT /
                                     float(self.canvas_size[1]))
            if self.canvas_size[0] > MAX_CANVAS_WIDTH:
                self.canvas_size *= (MAX_CANVAS_WIDTH /
                                     float(self.canvas_size[0]))
            self.canvas_size = tuple(self.canvas_size.astype(int))
        else:
            self.canvas_size = None

        # Model setup
        self.model = self._init_model(fitting_results, args.gender)

        # Scene setup
        self.scene = Scene(bg_color=[1.0, 1.0, 1.0])
        self.material = MetallicRoughnessMaterial(metallicFactor=0.0,
                                                  alphaMode='BLEND',
                                                  baseColorFactor=(5.0, 5.0,
                                                                   5.0, 1.0))
        im_size = self.canvas_size if self.canvas_size is not None else (
            MAX_CANVAS_WIDTH, MAX_CANVAS_HEIGHT)
        # self.camera = PerspectiveCamera(yfov=np.pi/3.0,
        #                                 aspectRatio=float(self.canvas_size[0])/self.canvas_size[1])
        self.camera = IntrinsicsCamera(fx=5000.,
                                       fy=5000.,
                                       cx=im_size[0] / 2,
                                       cy=im_size[1] / 2)
        # self.camera = CustomIntrinsicsCamera(fx=5000., fy=5000.,
        #                                cx=im_size[0]/2, cy=im_size[1]/2)

        self.camera_params = {
            't': self.cam_trans,
            'rt': cv2.Rodrigues(self.cam_rot)[0],
            'c': np.array(im_size).astype(float) / 2,
            'f': np.array((im_size[0], ) * 2) * 1.0,
            'k': np.zeros(5),
            'frustum': {
                'near': self.camera.znear,
                'far': self.camera.zfar,
                'width': im_size[0],
                'height': im_size[1]
            }
        }
        camera_pos = np.eye(4)
        # print(self.cam_trans)
        # print(self.camera.get_projection_matrix(im_size[0], im_size[1]))
        # print(self.camera.cx, self.camera.cy)
        camera_pos[:3, :3] = self.cam_rot
        self.cam_trans[0] *= -1
        camera_pos[:3, 3] = self.cam_trans
        slight = SpotLight(color=[1.0, 1.0, 1.0], intensity=10.0)
        slight_pos = np.eye(4)
        slight_pos[:3, 3] = np.array([0, 0, 5])
        self.scene.add(self.camera, pose=camera_pos)
        self.scene.add(slight, pose=slight_pos)
        self.renderer = OffscreenRenderer(viewport_width=im_size[0],
                                          viewport_height=im_size[1],
                                          point_size=1.0)

        # Rest setup
        self.setupUi(self)
        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera_widget = Ui_CameraWidget(self.camera,
                                             self.camera_params['frustum'],
                                             self.draw, self.camera_params)
        self._bind_actions()
        self._update_canvas = True

    def _init_model(self, params, gender):
        model = SMPLX(DATA_FOLDER,
                      gender=gender,
                      use_pca=False,
                      body_pose=params['body_pose'],
                      betas=params['betas'],
                      left_hand_pose=params['left_hand_pose'],
                      right_hand_pose=params['right_hand_pose'],
                      leye_pose=params['leye_pose'],
                      reye_pose=params['reye_pose'],
                      expression=params['expression'])

        return model

    def _bind_actions(self):
        self.btn_camera.clicked.connect(lambda: self._show_camera_widget())

        for i, val in enumerate(self.model.betas[0]):
            self.__dict__['shape_{}'.format(i)].setProperty(
                'value', int(10 * val + 50))

        for key, shape in self._shapes():
            shape.valueChanged[int].connect(
                lambda val, k=key: self._update_shape(k, val))
        self.set_body_layout()

        self.pos_0.valueChanged[float].connect(
            lambda val: self._update_position(0, val))
        self.pos_1.valueChanged[float].connect(
            lambda val: self._update_position(1, val))
        self.pos_2.valueChanged[float].connect(
            lambda val: self._update_position(2, val))

        self.reset_pose.clicked.connect(self._reset_pose)
        self.reset_shape.clicked.connect(self._reset_shape)
        self.reset_postion.clicked.connect(self._reset_position)

        self.to_body.clicked.connect(self.set_body_layout)
        self.to_left_hand.clicked.connect(self.set_left_layout)
        self.to_right_hand.clicked.connect(self.set_right_layout)
        self.save_params.clicked.connect(self._save_params)

        self.canvas.wheelEvent = self._zoom
        self.canvas.mousePressEvent = self._mouse_begin
        self.canvas.mouseMoveEvent = self._move
        self.canvas.mouseReleaseEvent = self._mouse_end

        self.view_joints.triggered.connect(self.draw)
        self.view_joint_ids.triggered.connect(self.draw)
        self.view_bones.triggered.connect(self.draw)

        self.pos_0.setValue(self.model.transl[0][0])
        self.pos_1.setValue(self.model.transl[0][1])
        self.pos_2.setValue(self.model.transl[0][2])

    def set_body_layout(self):
        for key, pose in self._poses():
            try:
                pose.valueChanged[int].disconnect()
            except Exception as e:
                pass

        for i, val in enumerate(self.model.global_orient.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i)].setProperty('value', value)

        for i, val in enumerate(self.model.body_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i + 3)].setProperty('value', value)

        for i, val in enumerate(self.model.jaw_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i + 66)].setProperty('value', value)

        for key, pose in self._poses():
            pose.valueChanged[int].connect(
                lambda val, k=key: self._update_pose_body(k, val))

        self.label_3.setText(_translate("MainWindow", "Global Orientation"))
        self.label_4.setText(_translate("MainWindow", "Hip L"))
        self.label_5.setText(_translate("MainWindow", "Hip R"))
        self.label_6.setText(_translate("MainWindow", "Lower Spine"))
        self.label_7.setText(_translate("MainWindow", "Knee L"))
        self.label_8.setText(_translate("MainWindow", "Knee R"))
        self.label_9.setText(_translate("MainWindow", "Mid Spine"))
        self.label_10.setText(_translate("MainWindow", "Ankle L"))
        self.label_11.setText(_translate("MainWindow", "Ankle R"))
        self.label_12.setText(_translate("MainWindow", "Upper Spine"))
        self.label_13.setText(_translate("MainWindow", "Toes L"))
        self.label_14.setText(_translate("MainWindow", "Toes R"))
        self.label_15.setText(_translate("MainWindow", "Neck"))
        self.label_16.setText(_translate("MainWindow", "Clavicle L"))
        self.label_17.setText(_translate("MainWindow", "Clavicle R"))
        self.label_18.setText(_translate("MainWindow", "Head"))
        self.label_19.setText(_translate("MainWindow", "Shoulder L"))
        self.label_20.setText(_translate("MainWindow", "Shoulder R"))
        self.label_21.setText(_translate("MainWindow", "Elbow L"))
        self.label_22.setText(_translate("MainWindow", "Elbow R"))
        self.label_23.setText(_translate("MainWindow", "Wrist L"))
        self.label_24.setText(_translate("MainWindow", "Wrist R"))
        self.label_25.setText(_translate("MainWindow", "Jaw"))
        self.label_26.setText(_translate("MainWindow", "-"))

    def set_left_layout(self):
        for key, pose in self._poses():
            try:
                pose.valueChanged[int].disconnect()
            except Exception as e:
                pass
        for i, val in enumerate(self.model.left_hand_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i)].setProperty('value', value)

        for i, val in enumerate(self.model.leye_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i + 45)].setProperty('value', value)

        for key, pose in self._poses():
            pose.valueChanged[int].connect(
                lambda val, k=key: self._update_pose_left(k, val))

        self.label_3.setText(_translate("MainWindow", "Index1 L"))
        self.label_4.setText(_translate("MainWindow", "Index2 L"))
        self.label_5.setText(_translate("MainWindow", "Index3 L"))
        self.label_6.setText(_translate("MainWindow", "Middle1 L"))
        self.label_7.setText(_translate("MainWindow", "Middle2 L"))
        self.label_8.setText(_translate("MainWindow", "Middle3 L"))
        self.label_9.setText(_translate("MainWindow", "Little1 L"))
        self.label_10.setText(_translate("MainWindow", "Little2 L"))
        self.label_11.setText(_translate("MainWindow", "Little3 L"))
        self.label_12.setText(_translate("MainWindow", "Ring1 L"))
        self.label_13.setText(_translate("MainWindow", "Ring2 L"))
        self.label_14.setText(_translate("MainWindow", "Ring3 L"))
        self.label_15.setText(_translate("MainWindow", "Thumb1 L"))
        self.label_16.setText(_translate("MainWindow", "Thumb2 L"))
        self.label_17.setText(_translate("MainWindow", "Thumb3 L"))
        self.label_18.setText(_translate("MainWindow", "Eye Left"))
        self.label_19.setText(_translate("MainWindow", "-"))
        self.label_20.setText(_translate("MainWindow", "-"))
        self.label_21.setText(_translate("MainWindow", "-"))
        self.label_22.setText(_translate("MainWindow", "-"))
        self.label_23.setText(_translate("MainWindow", "-"))
        self.label_24.setText(_translate("MainWindow", "-"))
        self.label_25.setText(_translate("MainWindow", "-"))
        self.label_26.setText(_translate("MainWindow", "-"))

    def set_right_layout(self):
        for key, pose in self._poses():
            try:
                pose.valueChanged[int].disconnect()
            except Exception as e:
                pass
        for i, val in enumerate(
                self.model.right_hand_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i)].setProperty('value', value)

        for i, val in enumerate(self.model.reye_pose.detach().numpy()[0]):
            value = int(50 * val / np.pi + 50)
            self.__dict__['pose_{}'.format(i + 45)].setProperty('value', value)

        for key, pose in self._poses():
            pose.valueChanged[int].connect(
                lambda val, k=key: self._update_pose_right(k, val))

        self.label_3.setText(_translate("MainWindow", "Index1 R"))
        self.label_4.setText(_translate("MainWindow", "Index2 R"))
        self.label_5.setText(_translate("MainWindow", "Index3 R"))
        self.label_6.setText(_translate("MainWindow", "Middle1 R"))
        self.label_7.setText(_translate("MainWindow", "Middle2 R"))
        self.label_8.setText(_translate("MainWindow", "Middle3 R"))
        self.label_9.setText(_translate("MainWindow", "Little1 R"))
        self.label_10.setText(_translate("MainWindow", "Little2 R"))
        self.label_11.setText(_translate("MainWindow", "Little3 R"))
        self.label_12.setText(_translate("MainWindow", "Ring1 R"))
        self.label_13.setText(_translate("MainWindow", "Ring2 R"))
        self.label_14.setText(_translate("MainWindow", "Ring3 R"))
        self.label_15.setText(_translate("MainWindow", "Thumb1 R"))
        self.label_16.setText(_translate("MainWindow", "Thumb2 R"))
        self.label_17.setText(_translate("MainWindow", "Thumb3 R"))
        self.label_18.setText(_translate("MainWindow", "Eye Left"))
        self.label_19.setText(_translate("MainWindow", "-"))
        self.label_20.setText(_translate("MainWindow", "-"))
        self.label_21.setText(_translate("MainWindow", "-"))
        self.label_22.setText(_translate("MainWindow", "-"))
        self.label_23.setText(_translate("MainWindow", "-"))
        self.label_24.setText(_translate("MainWindow", "-"))
        self.label_25.setText(_translate("MainWindow", "-"))
        self.label_26.setText(_translate("MainWindow", "-"))

    def _save_params(self):
        params = {
            'camera_rotation': np.expand_dims(self.cam_rot, axis=0),
            'camera_translation': np.expand_dims(self.cam_trans, axis=0),
            'body_pose': self.model.body_pose.detach().numpy(),
            'left_hand_pose': self.model.left_hand_pose.detach().numpy(),
            'right_hand_pose': self.model.right_hand_pose.detach().numpy(),
            'jaw_pose': self.model.jaw_pose.detach().numpy(),
            'reye_pose': self.model.reye_pose.detach().numpy(),
            'leye_pose': self.model.leye_pose.detach().numpy(),
            'expression': self.model.expression.detach().numpy(),
            'global_orient': self.model.global_orient.detach().numpy(),
            'betas': self.model.betas.detach().numpy()
        }
        with open(self.save_params_path, 'wb') as f:
            pickle.dump(params, f, protocol=2)

    def showEvent(self, event):
        self._init_camera()
        super(self.__class__, self).showEvent(event)

    def resizeEvent(self, event):
        self._init_camera()
        super(self.__class__, self).resizeEvent(event)

    def closeEvent(self, event):
        self.camera_widget.close()
        super(self.__class__, self).closeEvent(event)

    def draw(self):
        if self._update_canvas:
            img = self._overlay_image()
            if self.canvas_size is not None:
                img = cv2.resize(img, self.canvas_size)
            self.canvas.setScaledContents(False)
            self.canvas.setPixmap(self._to_pixmap(img))

    def _init_camera(self):
        self.draw()

    def _overlay_image(self):
        for node in self.scene.get_nodes():
            if node.name == 'body_mesh':
                self.scene.remove_node(node)
                break
        rectified_orient = self.model.global_orient.detach().clone()
        rectified_orient[0][0] += np.pi
        model_obj = self.model(global_orient=rectified_orient)
        out_mesh = Trimesh(model_obj.vertices.detach().numpy().squeeze(),
                           self.model.faces.squeeze())
        rot = trimesh.transformations.rotation_matrix(np.radians(180),
                                                      [1, 0, 0])
        out_mesh.apply_transform(rot)
        mesh = Mesh.from_trimesh(out_mesh, material=self.material)

        self.scene.add(mesh, name='body_mesh')
        rendered, _ = self.renderer.render(self.scene)

        if self.base_img is not None:
            img = self.base_img.copy()
        else:
            img = np.uint8(rendered)
        img_mask = (rendered != 255.0).sum(axis=2) == 3

        img[img_mask] = rendered[img_mask].astype(np.uint8)
        img = np.concatenate(
            (img, img_mask[:, :, np.newaxis].astype(np.uint8)), axis=2)
        return img

    def _zoom(self, event):
        delta = -event.angleDelta().y() / 1200.0
        self.camera_widget.pos_2.setValue(self.camera_widget.pos_2.value() +
                                          delta)

    def _mouse_begin(self, event):
        if event.button() == 1:
            self._moving = True
        elif event.button() == 2:
            self._rotating = True
        self._mouse_begin_pos = event.pos()

    def _mouse_end(self, event):
        self._moving = False
        self._rotating = False

    def _move(self, event):
        if self._moving:
            delta = event.pos() - self._mouse_begin_pos
            self.camera_widget.pos_0.setValue(
                self.camera_widget.pos_0.value() + delta.x() / 500.)
            self.camera_widget.pos_1.setValue(
                self.camera_widget.pos_1.value() + delta.y() / 500.)
            self._mouse_begin_pos = event.pos()
        elif self._rotating:
            delta = event.pos() - self._mouse_begin_pos
            self.camera_widget.rot_0.setValue(
                self.camera_widget.rot_0.value() + delta.y() / 300.)
            self.camera_widget.rot_1.setValue(
                self.camera_widget.rot_1.value() - delta.x() / 300.)
            self._mouse_begin_pos = event.pos()

    def _show_camera_widget(self):
        self.camera_widget.show()
        self.camera_widget.raise_()

    def _update_shape(self, id, val):
        val = (val - 50) / 50.0 * 5.0
        self.model.betas[0][id] = val
        self.draw()

    def _reset_shape(self):
        self._update_canvas = False
        for key, shape in self._shapes():
            # value = self.model_params['betas'][key] / 5.0 * 50.0 + 50
            shape.setValue(50)
        self._update_canvas = True
        self.draw()

    def _update_pose_body(self, id, val):
        val = (val - 50) / 50.0 * np.pi

        if id < 3:
            self.model.global_orient[0, id] = val
        elif id > 65:
            if id < 69:
                self.model.jaw_pose[0, id - 66] = val
        else:
            self.model.body_pose[0, id - 3] = val
        self.draw()

    def _update_pose_left(self, id, val):
        val = (val - 50) / 50.0 * np.pi

        if id < 45:
            self.model.left_hand_pose[0][id] = val
        elif id > 44 and id < 48:
            self.model.leye_pose[0][id - 45] = val
        self.draw()

    def _update_pose_right(self, id, val):
        val = (val - 50) / 50.0 * np.pi

        if id < 45:
            self.model.right_hand_pose[0][id] = val
        elif id > 44 and id < 48:
            self.model.reye_pose[0][id - 45] = val
        self.draw()

    def _reset_pose(self):
        self._update_canvas = False
        for key, pose in self._poses():
            pose.setValue(50)
        self._update_canvas = True
        self.draw()

    def _update_position(self, id, val):
        self.model.transl[0][id] = val
        self.draw()

    def _reset_position(self):
        self._update_canvas = False
        self.pos_0.setValue(self.model.transl[0][0])
        self.pos_1.setValue(self.model.transl[0][1])
        self.pos_2.setValue(self.model.transl[0][2])
        self._update_canvas = True
        self.draw()

    def _poses(self):
        return enumerate(
            [self.__dict__['pose_{}'.format(i)] for i in range(72)])

    def _shapes(self):
        return enumerate(
            [self.__dict__['shape_{}'.format(i)] for i in range(10)])

    @staticmethod
    def _to_pixmap(im):
        if im.dtype == np.float32 or im.dtype == np.float64:
            im = np.uint8(im * 255)

        if len(im.shape) < 3 or im.shape[-1] == 1:
            im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
        else:
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

        qimg = QtGui.QImage(im, im.shape[1], im.shape[0], im.strides[0],
                            QtGui.QImage.Format_RGB888)

        return QtGui.QPixmap(qimg)
コード例 #17
0
    def __init__(self, args):
        super(self.__class__, self).__init__()

        self.save_params_path = args.save_model_params if args.save_model_params is not None\
            else args.fit_result

        # Result retrieve
        with open(args.fit_result, 'rb') as f:
            fitting_results = pickle.load(f, encoding='latin1')
        self.cam_rot = fitting_results['camera_rotation'][0]
        self.cam_trans = fitting_results['camera_translation'][0]

        # Background image setup
        self.base_img = load_image(
            args.image_path) if args.image_path else None
        if self.base_img is not None:
            self.canvas_size = np.array(self.base_img.shape[:2][::-1],
                                        dtype=np.float32)
            if self.canvas_size[1] > MAX_CANVAS_HEIGHT:
                self.canvas_size *= (MAX_CANVAS_HEIGHT /
                                     float(self.canvas_size[1]))
            if self.canvas_size[0] > MAX_CANVAS_WIDTH:
                self.canvas_size *= (MAX_CANVAS_WIDTH /
                                     float(self.canvas_size[0]))
            self.canvas_size = tuple(self.canvas_size.astype(int))
        else:
            self.canvas_size = None

        # Model setup
        self.model = self._init_model(fitting_results, args.gender)

        # Scene setup
        self.scene = Scene(bg_color=[1.0, 1.0, 1.0])
        self.material = MetallicRoughnessMaterial(metallicFactor=0.0,
                                                  alphaMode='BLEND',
                                                  baseColorFactor=(5.0, 5.0,
                                                                   5.0, 1.0))
        im_size = self.canvas_size if self.canvas_size is not None else (
            MAX_CANVAS_WIDTH, MAX_CANVAS_HEIGHT)
        # self.camera = PerspectiveCamera(yfov=np.pi/3.0,
        #                                 aspectRatio=float(self.canvas_size[0])/self.canvas_size[1])
        self.camera = IntrinsicsCamera(fx=5000.,
                                       fy=5000.,
                                       cx=im_size[0] / 2,
                                       cy=im_size[1] / 2)
        # self.camera = CustomIntrinsicsCamera(fx=5000., fy=5000.,
        #                                cx=im_size[0]/2, cy=im_size[1]/2)

        self.camera_params = {
            't': self.cam_trans,
            'rt': cv2.Rodrigues(self.cam_rot)[0],
            'c': np.array(im_size).astype(float) / 2,
            'f': np.array((im_size[0], ) * 2) * 1.0,
            'k': np.zeros(5),
            'frustum': {
                'near': self.camera.znear,
                'far': self.camera.zfar,
                'width': im_size[0],
                'height': im_size[1]
            }
        }
        camera_pos = np.eye(4)
        # print(self.cam_trans)
        # print(self.camera.get_projection_matrix(im_size[0], im_size[1]))
        # print(self.camera.cx, self.camera.cy)
        camera_pos[:3, :3] = self.cam_rot
        self.cam_trans[0] *= -1
        camera_pos[:3, 3] = self.cam_trans
        slight = SpotLight(color=[1.0, 1.0, 1.0], intensity=10.0)
        slight_pos = np.eye(4)
        slight_pos[:3, 3] = np.array([0, 0, 5])
        self.scene.add(self.camera, pose=camera_pos)
        self.scene.add(slight, pose=slight_pos)
        self.renderer = OffscreenRenderer(viewport_width=im_size[0],
                                          viewport_height=im_size[1],
                                          point_size=1.0)

        # Rest setup
        self.setupUi(self)
        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera_widget = Ui_CameraWidget(self.camera,
                                             self.camera_params['frustum'],
                                             self.draw, self.camera_params)
        self._bind_actions()
        self._update_canvas = True
コード例 #18
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 4.5
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #19
0
ファイル: example.py プロジェクト: KailinLi/pyrender
# Using the viewer with a default camera
# ==============================================================================

v = Viewer(scene, shadows=True)

# ==============================================================================
# Using the viewer with a pre-specified camera
# ==============================================================================
cam_node = scene.add(cam, pose=cam_pose)
v = Viewer(scene, central_node=drill_node)

# ==============================================================================
# Rendering offscreen from that camera
# ==============================================================================

r = OffscreenRenderer(viewport_width=640 * 2, viewport_height=480 * 2)
color, depth = r.render(scene)

import matplotlib.pyplot as plt

plt.figure()
plt.imshow(color)
plt.show()

# ==============================================================================
# Segmask rendering
# ==============================================================================

nm = {node: 20 * (i + 1) for i, node in enumerate(scene.mesh_nodes)}
seg = r.render(scene, RenderFlags.SEG, nm)[0]
plt.figure()
コード例 #20
0
def main():
    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_dice(scene,
               mnist_images,
               discrete_position=args.discrete_position,
               rotate_dice=args.rotate_dice)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "mnist_dice"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_dice:
        filename += "_rotate_dice"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
コード例 #21
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #22
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    scene, cube_nodes = build_scene(args.num_cubes, color_candidates)
    camera = OrthographicCamera(xmag=0.9, ymag=0.9)
    camera_node = Node(camera=camera)
    scene.add_node(camera_node)
    renderer = OffscreenRenderer(
        viewport_width=args.image_size, viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

        # Change cube color and position
        update_cube_color_and_position(cube_nodes, color_candidates)

        # Transfer changes to the vertex buffer on gpu
        udpate_vertex_buffer(cube_nodes)

    renderer.delete()
コード例 #23
0
            if inside(e):
                if not inside(s):
                    outputList.append(computeIntersection())
                outputList.append(e)
            elif inside(s):
                outputList.append(computeIntersection())
            s = e
        cp1 = cp2
    return (outputList)


# View the scene
view_render_flags = {'cull_faces': False}
v = Viewer(scene,
           viewport_size=(width, height),
           render_flags=view_render_flags)
r = OffscreenRenderer(viewport_width=width, viewport_height=height)
color, depth = r.render(scene, flags=RenderFlags.SKIP_CULL_FACES)
# color = cvtColor(color, COLOR_RGB2BGR)


def draw_point(img, imgpts):
    for point in imgpts:
        img = circle(img, tuple(point), 4, (255, 0, 0), 2)
    return img


# draw the computed (x,y) coords on the image
color = draw_point(color, points)
plt.imshow(color)
plt.show()
コード例 #24
0
#==============================================================================
# Attempt to convert 3D to 2D position
#==============================================================================

# View the scene
view_render_flags = {'cull_faces': False}
v = Viewer(scene, render_flags=view_render_flags)

#==============================================================================
# Rendering offscreen from that camera
#==============================================================================

dimensions = 600
# dimensions = 1280
r = OffscreenRenderer(viewport_width=dimensions, viewport_height=dimensions)

# Generate dataset ?
question_string = "Generate chess board dataset? (select no because it's already there) [y/n]: "
choice = input(question_string)
accepted_inputs = ['y', 'n']
while (not (choice in accepted_inputs)):
    choice = input(question_string)

# Parameters
# alpha = 0
# beta = np.pi/8
iterations = 20
# distance = 0.7
# force_height = 0.0
mode = CameraPose.SIDE
コード例 #25
0
ファイル: unproj_example.py プロジェクト: yushiangw/pyrender
    proj = cam.get_projection_matrix()
    mvp = proj @ np.linalg.inv(cam_pose)
    inv_mvp = np.linalg.inv(mvp)
    #==============================================================================
    # Scene creation
    #==============================================================================

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    cam_node = scene.add(cam, pose=cam_pose)

    scene.main_camera_node = cam_node

    #==============================================================================
    drill_node = scene.add(drill_mesh)
    r = OffscreenRenderer(viewport_width=640, viewport_height=480)

    rf = pyrender.RenderFlags.NONE
    rf |= pyrender.RenderFlags.USE_RAW_DEPTH
    color, raw_depth = r.render(scene, flags=rf)
    r.delete()

    # unproject to get point cloud
    pcd = unproj(inv_mvp, raw_depth)

    #==============================================================================
    #------------------------------------------------------------------------------
    # Creating meshes from point clouds
    #------------------------------------------------------------------------------
    points_mesh = Mesh.from_points(pcd, colors=np.array((0.0, 1.0, 0.0, 1.0)))
    pcd_node = scene.add(points_mesh)
コード例 #26
0
 def render_camera_image(self):
     """ Render the camera image for the current scene. """
     renderer = OffscreenRenderer(self.camera.width, self.camera.height)
     image = renderer.render(self._scene, flags=RenderFlags.DEPTH_ONLY)
     renderer.delete()
     return image
コード例 #27
0
ファイル: scenes.py プロジェクト: zuoguoqing/paz
class DictionaryView():
    """Render-ready scene composed of a single object and a single moving camera.

    # Arguments
        filepath: String containing the path to an OBJ file.
        viewport_size: List, specifying [H, W] of rendered image.
        y_fov: Float indicating the vertical field of view in radians.
        distance: List of floats indicating [max_distance, min_distance]
        top_only: Boolean. If True images are only take from the top.
        light: List of floats indicating [max_light, min_light]
    """
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.,
                 distance=0.30,
                 top_only=False,
                 light=5.0,
                 theta_steps=10,
                 phi_steps=10):
        self.scene = Scene(bg_color=[0, 0, 0])
        self.camera = self.scene.add(
            PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
        self.world_origin = self.mesh.mesh.centroid
        self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
        self.distance = distance
        # 0.1 values are to avoid gimbal lock
        theta_max = np.pi / 2.0 if top_only else np.pi
        self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
        self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA

    def render(self):
        dictionary_data = []
        for theta_arg, theta in enumerate(self.thetas):
            for phi_arg, phi in enumerate(self.phis):
                x = self.distance * np.sin(theta) * np.cos(phi)
                y = self.distance * np.sin(theta) * np.sin(phi)
                z = self.distance * np.cos(theta)
                matrices = compute_modelview_matrices(np.array([x, z, y]),
                                                      self.world_origin)
                camera_to_world, world_to_camera = matrices
                self.scene.set_pose(self.camera, camera_to_world)
                self.scene.set_pose(self.light, camera_to_world)
                camera_to_world = camera_to_world.flatten()
                world_to_camera = world_to_camera.flatten()
                image, depth = self.renderer.render(self.scene,
                                                    flags=self.RGBA)
                image, alpha = split_alpha_channel(image)
                matrices = np.vstack([world_to_camera, camera_to_world])
                sample = {
                    'image': image,
                    'alpha': alpha,
                    'depth': depth,
                    'matrices': matrices
                }
                dictionary_data.append(sample)
        return dictionary_data
コード例 #28
0
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0, 3] = 0.1
    drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
        [0.0, 0.0, 0.0, 1.0],
    ])

    boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
    boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2, 1, 1))
    poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
    poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh)
    scene.add_node(boxf_node)

    _ = scene.add(drill_mesh, pose=drill_pose)
    _ = scene.add(bottle_mesh, pose=bottle_pose)
    _ = scene.add(wood_mesh)
    _ = scene.add(direc_l, pose=cam_pose)
    _ = scene.add(spot_l, pose=cam_pose)
    _ = scene.add(points_mesh)

    _ = scene.add(cam, pose=cam_pose)

    r = OffscreenRenderer(viewport_width=640, viewport_height=480)
    color, depth = r.render(scene)

    assert color.shape == (480, 640, 3)
    assert depth.shape == (480, 640)
    assert np.max(depth.data) > 0.05
    assert np.count_nonzero(depth.data) > (0.2 * depth.size)
    r.delete()
コード例 #29
0
# point_l_node = scene.add(point_l_cam, pose=new_campose)

#==============================================================================
# Use viewer to display scene
#==============================================================================

# View the scene
view_render_flags = {'cull_faces': False, 'vertex_normals': False}
v = Viewer(scene, render_flags=view_render_flags, viewport_size=(IMAGE_WIDTH, IMAGE_HEIGHT))

#==============================================================================
# Prepare for dataset generation
#==============================================================================

flags = RenderFlags.SKIP_CULL_FACES
r = OffscreenRenderer(viewport_width=IMAGE_WIDTH, viewport_height=IMAGE_HEIGHT)
# color, _ = r.render(scene, flags=flags)

# Generate dataset ?
choice = input("Generate dataset? [y/n]: ")
accepted_inputs = ['y','n']
while (not (choice in accepted_inputs)):
    choice = input("Generate dataset? [y/n]: ")

iterations = 5
mode = CameraPose.NONE
csvfile = './outputs/flowers_dataset.csv'
csvMode = 'w' # 'a' => append / 'w' => write (overwrites existing csv file)
meshes = [(flower_mesh, 'flower'), (stem_mesh, 'stem'), (center_mesh, 'center')]
skip_default_view = False
salt = '001'
コード例 #30
0
def main():
    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    floor_textures = [
        "../textures/lg_floor_d.tga",
        "../textures/lg_style_01_floor_blue_d.tga",
        "../textures/lg_style_01_floor_orange_bright_d.tga",
    ]

    wall_textures = [
        "../textures/lg_style_01_wall_cerise_d.tga",
        "../textures/lg_style_01_wall_green_bright_d.tga",
        "../textures/lg_style_01_wall_red_bright_d.tga",
        "../textures/lg_style_02_wall_yellow_d.tga",
        "../textures/lg_style_03_wall_orange_bright_d.tga",
    ]

    objects = [
        pyrender.objects.Capsule,
        pyrender.objects.Cylinder,
        pyrender.objects.Icosahedron,
        pyrender.objects.Box,
        pyrender.objects.Sphere,
    ]

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  objects,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")