예제 #1
0
    def render_segmentation_images(self):
        """Renders segmentation masks (modal and amodal) for each object in the state.
        """

        full_depth = self.observation        
        modal_data = np.zeros((full_depth.shape[0], full_depth.shape[1], len(self.obj_keys)), dtype=np.uint8)
        amodal_data = np.zeros((full_depth.shape[0], full_depth.shape[1], len(self.obj_keys)), dtype=np.uint8)
        renderer = OffscreenRenderer(self.camera.width, self.camera.height)
        flags = RenderFlags.DEPTH_ONLY

        # Hide all meshes
        obj_mesh_nodes = [next(iter(self._scene.get_nodes(name=k))) for k in self.obj_keys]
        for mn in self._scene.mesh_nodes:
            mn.mesh.is_visible = False

        for i, node in enumerate(obj_mesh_nodes):
            node.mesh.is_visible = True

            depth = renderer.render(self._scene, flags=flags)
            amodal_mask = depth > 0.0
            modal_mask = np.logical_and(
                (np.abs(depth - full_depth) < 1e-6), full_depth > 0.0
            )
            amodal_data[amodal_mask,i] = np.iinfo(np.uint8).max
            modal_data[modal_mask,i] = np.iinfo(np.uint8).max
            node.mesh.is_visible = False

        renderer.delete()
        
        # Show all meshes
        for mn in self._scene.mesh_nodes:
            mn.mesh.is_visible = True

        return amodal_data, modal_data
 def render_camera_image(self, color=True):
     """Render the camera image for the current scene."""
     renderer = OffscreenRenderer(self.camera.width, self.camera.height)
     flags = RenderFlags.NONE if color else RenderFlags.DEPTH_ONLY
     image = renderer.render(self._scene, flags=flags)
     renderer.delete()
     return image
예제 #3
0
def render_big_gallery(results_dir,
                       nb=30,
                       pts_colors=[0.5, 0.5, 0.5],
                       draw_text=False):
    '''
    pts_colors: [0,0,0]
    return np array of a big image
    '''

    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames = get_all_filnames(results_dir, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for _, input_pf in enumerate(input_ply_filenames):

        input_pc = read_ply_xyz(input_pf)

        colors = np.array(pts_colors)
        colors = np.tile(colors, (input_pc.shape[0], 1))

        input_pc_node = add_point_cloud_mesh_to_scene(input_pc, scene, pc_pose,
                                                      colors)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node)

        if draw_text:
            im_here = Image.fromarray(renderred_color)
            d = ImageDraw.Draw(im_here)
            fnt = ImageFont.truetype(
                font='/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',
                size=100)
            d.text((0, 0),
                   input_pf.split('/')[-1],
                   fill=(0, 0, 0, 255),
                   font=fnt)
            renderred_color = np.array(im_here)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
def main():
    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    rad_step = math.pi / 18
    total_frames = int(math.pi * 2 / rad_step)
    camera_distance = 2

    fig = plt.figure(figsize=(3, 3))
    ims = []

    for num_cubes in range(1, 8):
        scene = build_scene(num_cubes, color_candidates)[0]
        camera = OrthographicCamera(xmag=0.9, ymag=0.9)
        camera_node = Node(camera=camera)
        scene.add_node(camera_node)

        current_rad = 0

        for _ in range(total_frames):
            camera_position = np.array(
                (math.sin(current_rad), math.sin(math.pi / 6),
                 math.cos(current_rad)))
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            im = plt.imshow(image, interpolation="none", animated=True)
            ims.append([im])

            current_rad += rad_step

    renderer.delete()

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    ani.save("shepard_metzler.gif", writer="imagemagick")
예제 #5
0
 def __init__(self, path_OBJ, viewport_size=(128, 128), y_fov=3.14159 / 4.0,
              distance=[0.3, 0.5], light=[0.5, 30], top_only=False,
              roll=None, shift=None):
     self.distance, self.roll, self.shift = distance, roll, shift
     self.light_intensity, self.top_only = light, top_only
     self._build_scene(path_OBJ, viewport_size, light, y_fov)
     self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])
     self.flags_RGBA = RenderFlags.RGBA
     self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
     self.epsilon = 0.01
예제 #6
0
def render_big_gallery_overlay(dir_1,
                               dir_2,
                               pts_color_1=[0.5, 0.5, 0.5],
                               pts_color_2=[0.5, 0.5, 0.5],
                               nb=30):
    '''
    return np array of a big image
    '''
    cam = PerspectiveCamera(yfov=(YFOV))
    cam_pose = CAM_POSE

    point_l = PointLight(color=np.ones(3), intensity=POINT_LIGHT_INTENSITY)
    scene = Scene(bg_color=np.array([1, 1, 1, 0]))

    # cam and light
    _ = scene.add(cam, pose=cam_pose)
    _ = scene.add(point_l, pose=cam_pose)

    input_ply_filenames_1 = get_all_filnames(dir_1, nb)
    input_ply_filenames_2 = get_all_filnames(dir_2, nb)

    r = OffscreenRenderer(viewport_width=640 * 2,
                          viewport_height=480 * 2,
                          point_size=POINT_SIZE)
    pc_pose = PC_POSE

    images = []
    for idx, input_pf in enumerate(input_ply_filenames_1):

        input_pc_1 = read_ply_xyz(input_pf)
        input_pc_2 = read_ply_xyz(input_ply_filenames_2[idx])

        color_1 = np.array(pts_color_1)
        color_1 = np.tile(color_1, (input_pc_1.shape[0], 1))

        color_2 = np.array(pts_color_2)
        color_2 = np.tile(color_2, (input_pc_2.shape[0], 1))

        input_pc_node_1 = add_point_cloud_mesh_to_scene(
            input_pc_1, scene, pc_pose, color_1)
        input_pc_node_2 = add_point_cloud_mesh_to_scene(
            input_pc_2, scene, pc_pose, color_2)

        renderred_color, _ = r.render(scene)

        scene.remove_node(input_pc_node_1)
        scene.remove_node(input_pc_node_2)

        images.append(renderred_color)

    big_gallery = np.concatenate(images, axis=0)

    r.delete()

    return big_gallery
예제 #7
0
    def __init__(self, path_OBJ, camera_pose, min_corner, max_corner,
                 symmetric_transforms, viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0, light_intensity=[0.5, 30]):
        self.light_intensity = light_intensity
        self.symmetric_transforms = symmetric_transforms
        self.min_corner, self.max_corner = min_corner, max_corner
        self.scene = Scene(bg_color=[0, 0, 0, 0])
        self.light = self._build_light(light_intensity, camera_pose)
        self.camera = self._build_camera(y_fov, viewport_size, camera_pose)
        self.pixel_mesh = self.scene.add(color_object(path_OBJ))
        self.mesh = self.scene.add(
            Mesh.from_trimesh(trimesh.load(path_OBJ), smooth=True))

        self.renderer = OffscreenRenderer(viewport_size[0], viewport_size[1])

        self.flags_RGBA = RenderFlags.RGBA
        self.flags_FLAT = RenderFlags.RGBA | RenderFlags.FLAT
예제 #8
0
def render_sensor(
    point_set,
    render_sensor_path="/Users/macbookpro15/Documents/mujoco_hand_exps/data/sensor_render"
):
    """
    pointset: it is collectiono of sensor points for all timesteps
    """
    # first take one of the point, subtract the center from it which
    # I know is the 0-th position out of the 220 points
    # form the mesh from this
    if not os.path.exists(render_sensor_path):
        os.makedirs(render_sensor_path)
    time_steps = len(point_set)
    for t in range(time_steps):
        sensor = trimesh.load_mesh(
            f'../data/mesh_dir/mesh_{t}_out/mc_mesh_out.ply')
        sensor_mesh = Mesh.from_trimesh(sensor)
        # Light for the scene
        direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
        spot_l = SpotLight(color=np.ones(3),
                           intensity=10.0,
                           innerConeAngle=np.pi / 16,
                           outerConeAngle=np.pi / 6)
        point_l = PointLight(color=np.ones(3), intensity=10.0)

        # add camera to the scene
        cam = PerspectiveCamera(yfov=(np.pi / 3.0))
        cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                             [0.0, np.sqrt(2) / 2,
                              np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

        # create the scene
        scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
        point_mesh_node = scene.add(sensor_mesh)
        direc_l_node = scene.add(direc_l, pose=cam_pose)
        spot_l_node = scene.add(spot_l, pose=cam_pose)
        cam_node = scene.add(cam, pose=cam_pose)
        print('rendering the scene offline')
        r = OffscreenRenderer(viewport_width=640, viewport_height=480)
        color, depth = r.render(scene)
        r.delete()

        plt.figure()
        plt.imshow(color)
        plt.savefig(f'{render_sensor_path}/img_{t}.jpg')
예제 #9
0
파일: scene.py 프로젝트: zuoguoqing/paz
    def __init__(self,
                 filepath,
                 viewport_size=(128, 128),
                 y_fov=3.14159 / 4.0,
                 distance=0.3,
                 light=5.0,
                 top_only=True,
                 scale=10.0,
                 roll=None,
                 shift=None):

        self._build_scene(filepath, viewport_size, light, y_fov)
        self.distance, self.roll = distance, roll
        self.top_only, self.shift, self.scale = top_only, shift, scale
        self.renderer = OffscreenRenderer(*viewport_size)
        self.RGBA = RenderFlags.RGBA
        self.epsilon = 0.01
예제 #10
0
def render_mesh(mesh, h=256, w=256):
    """https://pyrender.readthedocs.io/en/latest/examples/quickstart.html"""
    mesh = pyrender.Mesh.from_trimesh(mesh.trimesh())
    scene = Scene()
    scene.add(mesh)

    # z-axis away from the scene, x-axis right, y-axis up
    pose = np.eye(4)
    pose[2, 3] = 250

    # add camera
    camera = PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    scene.add(camera, pose=pose)

    # add light
    # light = DirectionalLight(color=np.ones(3), intensity=5.0)
    light = PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
    scene.add(light, pose=pose)

    r = OffscreenRenderer(h, w)
    color, depth = r.render(scene)
    return color
예제 #11
0
파일: scenes.py 프로젝트: zuoguoqing/paz
 def __init__(self,
              filepath,
              viewport_size=(128, 128),
              y_fov=3.14159 / 4.,
              distance=0.30,
              top_only=False,
              light=5.0,
              theta_steps=10,
              phi_steps=10):
     self.scene = Scene(bg_color=[0, 0, 0])
     self.camera = self.scene.add(
         PerspectiveCamera(y_fov, aspectRatio=np.divide(*viewport_size)))
     self.mesh = self.scene.add(
         Mesh.from_trimesh(trimesh.load(filepath), smooth=True))
     self.world_origin = self.mesh.mesh.centroid
     self.light = self.scene.add(DirectionalLight([1.0, 1.0, 1.0], light))
     self.distance = distance
     # 0.1 values are to avoid gimbal lock
     theta_max = np.pi / 2.0 if top_only else np.pi
     self.thetas = np.linspace(0.1, theta_max - 0.1, theta_steps)
     self.phis = np.linspace(0.1, 2 * np.pi - 0.1, phi_steps)
     self.renderer = OffscreenRenderer(*viewport_size)
     self.RGBA = RenderFlags.RGBA
예제 #12
0
    def __init__(self, args):
        super(self.__class__, self).__init__()

        self.save_params_path = args.save_model_params if args.save_model_params is not None\
            else args.fit_result

        # Result retrieve
        with open(args.fit_result, 'rb') as f:
            fitting_results = pickle.load(f, encoding='latin1')
        self.cam_rot = fitting_results['camera_rotation'][0]
        self.cam_trans = fitting_results['camera_translation'][0]

        # Background image setup
        self.base_img = load_image(
            args.image_path) if args.image_path else None
        if self.base_img is not None:
            self.canvas_size = np.array(self.base_img.shape[:2][::-1],
                                        dtype=np.float32)
            if self.canvas_size[1] > MAX_CANVAS_HEIGHT:
                self.canvas_size *= (MAX_CANVAS_HEIGHT /
                                     float(self.canvas_size[1]))
            if self.canvas_size[0] > MAX_CANVAS_WIDTH:
                self.canvas_size *= (MAX_CANVAS_WIDTH /
                                     float(self.canvas_size[0]))
            self.canvas_size = tuple(self.canvas_size.astype(int))
        else:
            self.canvas_size = None

        # Model setup
        self.model = self._init_model(fitting_results, args.gender)

        # Scene setup
        self.scene = Scene(bg_color=[1.0, 1.0, 1.0])
        self.material = MetallicRoughnessMaterial(metallicFactor=0.0,
                                                  alphaMode='BLEND',
                                                  baseColorFactor=(5.0, 5.0,
                                                                   5.0, 1.0))
        im_size = self.canvas_size if self.canvas_size is not None else (
            MAX_CANVAS_WIDTH, MAX_CANVAS_HEIGHT)
        # self.camera = PerspectiveCamera(yfov=np.pi/3.0,
        #                                 aspectRatio=float(self.canvas_size[0])/self.canvas_size[1])
        self.camera = IntrinsicsCamera(fx=5000.,
                                       fy=5000.,
                                       cx=im_size[0] / 2,
                                       cy=im_size[1] / 2)
        # self.camera = CustomIntrinsicsCamera(fx=5000., fy=5000.,
        #                                cx=im_size[0]/2, cy=im_size[1]/2)

        self.camera_params = {
            't': self.cam_trans,
            'rt': cv2.Rodrigues(self.cam_rot)[0],
            'c': np.array(im_size).astype(float) / 2,
            'f': np.array((im_size[0], ) * 2) * 1.0,
            'k': np.zeros(5),
            'frustum': {
                'near': self.camera.znear,
                'far': self.camera.zfar,
                'width': im_size[0],
                'height': im_size[1]
            }
        }
        camera_pos = np.eye(4)
        # print(self.cam_trans)
        # print(self.camera.get_projection_matrix(im_size[0], im_size[1]))
        # print(self.camera.cx, self.camera.cy)
        camera_pos[:3, :3] = self.cam_rot
        self.cam_trans[0] *= -1
        camera_pos[:3, 3] = self.cam_trans
        slight = SpotLight(color=[1.0, 1.0, 1.0], intensity=10.0)
        slight_pos = np.eye(4)
        slight_pos[:3, 3] = np.array([0, 0, 5])
        self.scene.add(self.camera, pose=camera_pos)
        self.scene.add(slight, pose=slight_pos)
        self.renderer = OffscreenRenderer(viewport_width=im_size[0],
                                          viewport_height=im_size[1],
                                          point_size=1.0)

        # Rest setup
        self.setupUi(self)
        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera_widget = Ui_CameraWidget(self.camera,
                                             self.camera_params['frustum'],
                                             self.draw, self.camera_params)
        self._bind_actions()
        self._update_canvas = True
예제 #13
0
    spot_l_node = scene.add(spot_l, pose=cam_pose)

    # ==============================================================================
    # Using the viewer with a default camera
    # ==============================================================================

    # needs a screen
    # v = Viewer(scene, shadows=True)

    # ==============================================================================
    # Using the viewer with a pre-specified camera
    # ==============================================================================

    # needs a screen
    # cam_node = scene.add(cam, pose=cam_pose)
    # v = Viewer(scene, central_node=drill_node)

    # ==============================================================================
    # Rendering offscreen from that camera
    # ==============================================================================

    cam_node = scene.add(cam, pose=cam_pose)
    r = OffscreenRenderer(viewport_width=640 * 2, viewport_height=480 * 2)
    color, depth = r.render(scene)
    r.delete()

    import matplotlib.pyplot as plt

    plt.figure(figsize=(20, 20))
    plt.imshow(color)
    plt.show()
예제 #14
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)

        camera_distance = 4
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)

            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
예제 #15
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 4.5
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
예제 #16
0
# cam_pose = perturb_rotation(cam_pose, xy_angle_perturb_level, z_angle_perturb_level)
# cam_pose = adjust_zyx_angles(cam_pose, [-1.244, 0.316, -0.025])
cam = get_pyrender_cam((fx, fy, cx, cy), cam_pose, scene)

print('fx, fy, cx, cy: {}, {}, {}, {}'.format(fx, fy, cx, cy))
K = np.array([[fx, 0., cx], [0., fy, cy], [0., 0., 1.]])
np.savetxt(os.path.join(out_dir, 'left_K.txt'), K, delimiter=',')
R, tvec = from_pyrender_pose(cam_pose)
R, tvec = opengl_to_opencv(R, tvec)
np.savetxt(os.path.join(out_dir, 'left_Rt.txt'),
           np.hstack((R, tvec)),
           delimiter=',')

cam_node = scene.add(cam, pose=cam_pose)
flags = RenderFlags.VERTEX_NORMALS | RenderFlags.SHADOWS_DIRECTIONAL
r = OffscreenRenderer(viewport_width=img_width, viewport_height=img_height)
color, depth = r.render(scene, flags=flags)
# color, depth = r.render(scene)
r.delete()
scene.remove_node(cam_node)  # remove camera from the scene

imageio.imwrite(os.path.join(out_dir, 'color_left.png'), color)

h5f = h5py.File(os.path.join(out_dir, 'depth_left.h5'), 'w')
h5f.create_dataset('data', data=depth)
h5f.close()

min_val, max_val = np.percentile(depth[depth > 0], (1, 99))
depth_tmp = np.clip(depth, min_val, max_val)
depth_tmp = (depth_tmp - min_val) / (max_val - min_val)
imageio.imwrite(os.path.join(out_dir, 'depth_left.png'),
예제 #17
0
def dump_rendered_scene(input_path, output_path, cam_pose, width, height,
                        focal):
    #==============================================================================
    # Mesh creation
    #==============================================================================

    #------------------------------------------------------------------------------
    # Creating textured meshes from trimeshes
    #------------------------------------------------------------------------------
    object_trimesh = trimesh.load(input_path)
    # https://trimsh.org/trimesh.html#trimesh.PointCloud.bounds
    print("Object extents ", object_trimesh.bounds)
    print("Input path ", input_path)

    #==============================================================================
    # Camera creation
    #==============================================================================
    cam_angle = focal
    cam = PerspectiveCamera(yfov=cam_angle)
    # cam_pose = np.array([
    #     [0.0,  -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
    #     [1.0, 0.0,           0.0,           0.0],
    #     [0.0,  np.sqrt(2)/2,  np.sqrt(2)/2, 0.4],
    #     [0.0,  0.0,           0.0,          1.0]
    # ])

    #==============================================================================
    # Scene creation
    #==============================================================================

    # scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
    scene = Scene.from_trimesh_scene(object_trimesh,
                                     bg_color=np.array([0.0, 0.0, 0.0, 1.0]),
                                     ambient_light=np.array(
                                         [1.0, 1.0, 1.0, 1.0]))
    #==============================================================================
    # Rendering offscreen from that camera
    #==============================================================================

    cam_node = scene.add(cam, pose=cam_pose)
    r = OffscreenRenderer(viewport_width=width, viewport_height=height)

    flags = RenderFlags.RGBA
    # color, depth = r.render(scene, flags=flags)
    color, depth = r.render(scene)
    r.delete()

    depth_value = depth.copy()
    img_output = color.copy()
    # depth_value[depth_value <= 0.0001] = 1.5
    check_output = np.sum(color, axis=-1)
    print(color.shape, depth_value.shape, np.min(color), np.max(color),
          np.min(depth_value), np.max(depth_value), check_output.shape)
    print(color[check_output == 0].shape)
    # for i in range(width):
    # 	for j in range(height):
    # 		if(np.sum(color[j,i,:])==0):
    # 			img_output[j,i,0] = 255 - img_output[j,i,0]
    # 			img_output[j,i,1] = 255 - img_output[j,i,1]
    # 			img_output[j,i,2] = 255 - img_output[j,i,2]

    # import matplotlib.pyplot as plt
    # plt.figure(figsize=(20,20))
    # plt.imshow(color)

    img = Image.fromarray(img_output, 'RGB')
    img.save(output_path)

    return cam_angle
예제 #18
0
# point_l_node = scene.add(point_l_cam, pose=new_campose)

#==============================================================================
# Use viewer to display scene
#==============================================================================

# View the scene
view_render_flags = {'cull_faces': False, 'vertex_normals': False}
v = Viewer(scene, render_flags=view_render_flags, viewport_size=(IMAGE_WIDTH, IMAGE_HEIGHT))

#==============================================================================
# Prepare for dataset generation
#==============================================================================

flags = RenderFlags.SKIP_CULL_FACES
r = OffscreenRenderer(viewport_width=IMAGE_WIDTH, viewport_height=IMAGE_HEIGHT)
# color, _ = r.render(scene, flags=flags)

# Generate dataset ?
choice = input("Generate dataset? [y/n]: ")
accepted_inputs = ['y','n']
while (not (choice in accepted_inputs)):
    choice = input("Generate dataset? [y/n]: ")

iterations = 5
mode = CameraPose.NONE
csvfile = './outputs/flowers_dataset.csv'
csvMode = 'w' # 'a' => append / 'w' => write (overwrites existing csv file)
meshes = [(flower_mesh, 'flower'), (stem_mesh, 'stem'), (center_mesh, 'center')]
skip_default_view = False
salt = '001'
예제 #19
0
def main():
    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_dice(scene,
               mnist_images,
               discrete_position=args.discrete_position,
               rotate_dice=args.rotate_dice)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "mnist_dice"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_dice:
        filename += "_rotate_dice"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
예제 #20
0
def main():
    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    floor_textures = [
        "../textures/lg_floor_d.tga",
        "../textures/lg_style_01_floor_blue_d.tga",
        "../textures/lg_style_01_floor_orange_bright_d.tga",
    ]

    wall_textures = [
        "../textures/lg_style_01_wall_cerise_d.tga",
        "../textures/lg_style_01_wall_green_bright_d.tga",
        "../textures/lg_style_01_wall_red_bright_d.tga",
        "../textures/lg_style_02_wall_yellow_d.tga",
        "../textures/lg_style_03_wall_orange_bright_d.tga",
    ]

    objects = [
        pyrender.objects.Capsule,
        pyrender.objects.Cylinder,
        pyrender.objects.Icosahedron,
        pyrender.objects.Box,
        pyrender.objects.Sphere,
    ]

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  objects,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
예제 #21
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
예제 #22
0
#==============================================================================
# Attempt to convert 3D to 2D position
#==============================================================================

# View the scene
view_render_flags = {'cull_faces': False}
v = Viewer(scene, render_flags=view_render_flags)

#==============================================================================
# Rendering offscreen from that camera
#==============================================================================

dimensions = 600
# dimensions = 1280
r = OffscreenRenderer(viewport_width=dimensions, viewport_height=dimensions)

# Generate dataset ?
question_string = "Generate chess board dataset? (select no because it's already there) [y/n]: "
choice = input(question_string)
accepted_inputs = ['y', 'n']
while (not (choice in accepted_inputs)):
    choice = input(question_string)

# Parameters
# alpha = 0
# beta = np.pi/8
iterations = 20
# distance = 0.7
# force_height = 0.0
mode = CameraPose.SIDE
예제 #23
0
def main():

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    os.makedirs(args.output_directory, exist_ok=True)

    with ShardedRecordWriter(args.output_directory + '/{:04d}.tfrecords',
                             args.num_scenes_per_file) as writer:

        for scene_index in tqdm(range(args.total_scenes)):
            full_scene, normals_scene, masks_scene = build_scene(
                floor_textures,
                wall_textures,
                fix_light_position=args.fix_light_position)
            object_nodes, object_mask_nodes = place_objects(
                full_scene,
                masks_scene,
                colors,
                objects,
                max_num_objects=args.max_num_objects,
                min_num_objects=args.min_num_objects,
                discrete_position=args.discrete_position,
                rotate_object=args.rotate_object)
            object_velocities = np.random.uniform(
                -1., 1., [len(object_nodes), 3]) * [1., 0., 1.]
            camera_distance = np.random.uniform(3., 4.8)
            camera = PerspectiveCamera(yfov=math.pi / 4)
            camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
            full_scene.add_node(camera_node)
            normals_scene.add_node(camera_node)
            masks_scene.add_node(camera_node)
            initial_yaw = np.random.uniform(-np.pi, np.pi)
            delta_yaw = np.random.normal(
                0.3, 0.05) * (np.random.randint(2) * 2 - 1.)
            pitch = 0.  # np.random.normal(0., 0.1) - 0.03
            all_frames = []
            all_depths = []
            all_masks = []
            all_normals = []
            all_bboxes = []
            all_camera_positions = []
            all_camera_yaws = []
            all_camera_pitches = []
            all_camera_matrices = []
            for observation_index in range(args.num_observations_per_scene):

                yaw = initial_yaw + delta_yaw * observation_index

                camera_xz = camera_distance * np.array(
                    (math.sin(yaw), math.cos(yaw)))

                camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
                camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
                camera_node.translation = camera_position

                # Image and depths (latter are linear float32 depths in world space, with zero for sky)
                flags = RenderFlags.NONE if args.no_shadows else RenderFlags.SHADOWS_DIRECTIONAL
                if args.anti_aliasing:
                    flags |= RenderFlags.ANTI_ALIASING
                image, depths = renderer.render(full_scene, flags=flags)

                # Background (wall/floor) normals in view space
                normals_world = renderer.render(normals_scene,
                                                flags=RenderFlags.NONE)[0]
                normals_world = np.where(
                    np.sum(normals_world, axis=2, keepdims=True) == 0, 0.,
                    (normals_world.astype(np.float32) / 255. - 0.5) *
                    2)  # this has zeros for the sky
                normals_view = np.einsum(
                    'ij,yxj->yxi', np.linalg.inv(camera_node.matrix[:3, :3]),
                    normals_world)

                # Instance segmentation masks
                masks_image = renderer.render(masks_scene,
                                              flags=RenderFlags.NONE)[0]

                # Instance 3D bboxes in view space (axis-aligned)
                def get_mesh_node_bbox(node):
                    object_to_view_matrix = np.dot(
                        np.linalg.inv(camera_node.matrix),
                        full_scene.get_pose(node))
                    assert len(node.mesh.primitives) == 1
                    vertices_object = np.concatenate([
                        node.mesh.primitives[0].positions,
                        np.ones_like(node.mesh.primitives[0].positions[:, :1])
                    ],
                                                     axis=1)
                    vertices_view = np.einsum('ij,vj->vi',
                                              object_to_view_matrix,
                                              vertices_object)[:, :3]
                    return np.min(vertices_view, axis=0), np.max(vertices_view,
                                                                 axis=0)

                object_bboxes_view = [
                    get_mesh_node_bbox(object_parent.children[0])
                    for object_parent in object_nodes
                ]

                all_frames.append(
                    cv2.imencode('.jpg', image[..., ::-1])[1].tostring())
                all_masks.append(
                    cv2.imencode('.png', masks_image[..., ::-1])[1].tostring())
                all_depths.append(depths)
                all_normals.append(normals_view)
                all_bboxes.append(object_bboxes_view)
                all_camera_positions.append(camera_position)
                all_camera_yaws.append(yaw)
                all_camera_pitches.append(pitch)
                all_camera_matrices.append(camera_node.matrix)

                if args.visualize:
                    plt.clf()
                    plt.imshow(image)
                    plt.pause(1e-10)

                if args.moving_objects:
                    for object_node, object_velocity in zip(
                            object_nodes, object_velocities):
                        new_translation = object_node.translation + object_velocity
                        new_translation = np.clip(new_translation, -3., 3.)
                        object_node.translation = new_translation

            all_bboxes = np.asarray(
                all_bboxes)  # :: frame, obj, min/max, x/y/z
            all_bboxes = np.concatenate([
                all_bboxes,
                np.zeros([
                    all_bboxes.shape[0],
                    args.max_num_objects - all_bboxes.shape[1], 2, 3
                ])
            ],
                                        axis=1)

            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'frames':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_frames)),
                    'masks':
                    tf.train.Feature(bytes_list=tf.train.BytesList(
                        value=all_masks)),
                    'depths':
                    float32_feature(all_depths),
                    'normals':
                    float32_feature(all_normals),
                    'bboxes':
                    float32_feature(all_bboxes),
                    'camera_positions':
                    float32_feature(all_camera_positions),
                    'camera_yaws':
                    float32_feature(all_camera_yaws),
                    'camera_pitches':
                    float32_feature(all_camera_pitches),
                    'camera_matrices':
                    float32_feature(all_camera_matrices),
                }))
            writer.write(example.SerializeToString())

    renderer.delete()
            if inside(e):
                if not inside(s):
                    outputList.append(computeIntersection())
                outputList.append(e)
            elif inside(s):
                outputList.append(computeIntersection())
            s = e
        cp1 = cp2
    return (outputList)


# View the scene
view_render_flags = {'cull_faces': False}
v = Viewer(scene,
           viewport_size=(width, height),
           render_flags=view_render_flags)
r = OffscreenRenderer(viewport_width=width, viewport_height=height)
color, depth = r.render(scene, flags=RenderFlags.SKIP_CULL_FACES)
# color = cvtColor(color, COLOR_RGB2BGR)


def draw_point(img, imgpts):
    for point in imgpts:
        img = circle(img, tuple(point), 4, (255, 0, 0), 2)
    return img


# draw the computed (x,y) coords on the image
color = draw_point(color, points)
plt.imshow(color)
plt.show()
예제 #25
0
    # spot_l = SpotLight(color=np.ones(3), intensity=10.0, innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
    point_l = PointLight(color=np.ones(3), intensity=10.0)

    # Camera creation
    cam = PerspectiveCamera(yfov=(np.pi / 3.0))

    # Scene creation
    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))

    # Adding objects to the scene
    cam_node = scene.add(cam)
    direc_l_node = scene.add(direc_l, parent_node=cam_node)
    # spot_l_node = scene.add(spot_l, parent_node=cam_node)
    point_l_node = scene.add(point_l, parent_node=cam_node)

    r = OffscreenRenderer(viewport_width=g_single_viewport_size[0], viewport_height=g_single_viewport_size[1])

    # Start logging flag
    write_log("Start logging at %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    write_log("Dataset path: %s" % os.path.abspath(g_dataset_dir))
    write_log("Logging path: %s" % os.path.abspath(g_log_filename))
    write_log("Dataset kind: %s" % g_dataset_kind)

    for meshes, save_paths in traverse_dataset(g_dataset_dir, g_dataset_kind):

        for filename, component_list in g_vis_list.items():

            write_log("Start rendering image: %s" % filename)

            image_mat = None
            for components in component_list:
예제 #26
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    scene, cube_nodes = build_scene(args.num_cubes, color_candidates)
    camera = OrthographicCamera(xmag=0.9, ymag=0.9)
    camera_node = Node(camera=camera)
    scene.add_node(camera_node)
    renderer = OffscreenRenderer(
        viewport_width=args.image_size, viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

        # Change cube color and position
        update_cube_color_and_position(cube_nodes, color_candidates)

        # Transfer changes to the vertex buffer on gpu
        udpate_vertex_buffer(cube_nodes)

    renderer.delete()
예제 #27
0
 def render_camera_image(self):
     """ Render the camera image for the current scene. """
     renderer = OffscreenRenderer(self.camera.width, self.camera.height)
     image = renderer.render(self._scene, flags=RenderFlags.DEPTH_ONLY)
     renderer.delete()
     return image
def test_offscreen_renderer(tmpdir):

    # Fuze trimesh
    fuze_trimesh = trimesh.load('examples/models/fuze.obj')
    fuze_mesh = Mesh.from_trimesh(fuze_trimesh)

    # Drill trimesh
    drill_trimesh = trimesh.load('examples/models/drill.obj')
    drill_mesh = Mesh.from_trimesh(drill_trimesh)
    drill_pose = np.eye(4)
    drill_pose[0, 3] = 0.1
    drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])

    # Wood trimesh
    wood_trimesh = trimesh.load('examples/models/wood.obj')
    wood_mesh = Mesh.from_trimesh(wood_trimesh)

    # Water bottle trimesh
    bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
    bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
    bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
    bottle_pose = np.array([
        [1.0, 0.0, 0.0, 0.1],
        [0.0, 0.0, -1.0, -0.16],
        [0.0, 1.0, 0.0, 0.13],
        [0.0, 0.0, 0.0, 1.0],
    ])

    boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
    boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
    boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
    boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
    boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
    boxf_trimesh.visual.face_colors = boxf_face_colors
    # Instanced
    poses = np.tile(np.eye(4), (2, 1, 1))
    poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
    poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
    boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)

    points = trimesh.creation.icosphere(radius=0.05).vertices
    point_colors = np.random.uniform(size=points.shape)
    points_mesh = Mesh.from_points(points, colors=point_colors)

    direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
    spot_l = SpotLight(color=np.ones(3),
                       intensity=10.0,
                       innerConeAngle=np.pi / 16,
                       outerConeAngle=np.pi / 6)

    cam = PerspectiveCamera(yfov=(np.pi / 3.0))
    cam_pose = np.array([[0.0, -np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.5], [1.0, 0.0, 0.0, 0.0],
                         [0.0, np.sqrt(2) / 2,
                          np.sqrt(2) / 2, 0.4], [0.0, 0.0, 0.0, 1.0]])

    scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))

    fuze_node = Node(mesh=fuze_mesh,
                     translation=np.array(
                         [0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]))
    scene.add_node(fuze_node)
    boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
    scene.add_node(boxv_node)
    boxf_node = Node(mesh=boxf_mesh)
    scene.add_node(boxf_node)

    _ = scene.add(drill_mesh, pose=drill_pose)
    _ = scene.add(bottle_mesh, pose=bottle_pose)
    _ = scene.add(wood_mesh)
    _ = scene.add(direc_l, pose=cam_pose)
    _ = scene.add(spot_l, pose=cam_pose)
    _ = scene.add(points_mesh)

    _ = scene.add(cam, pose=cam_pose)

    r = OffscreenRenderer(viewport_width=640, viewport_height=480)
    color, depth = r.render(scene)

    assert color.shape == (480, 640, 3)
    assert depth.shape == (480, 640)
    assert np.max(depth.data) > 0.05
    assert np.count_nonzero(depth.data) > (0.2 * depth.size)
    r.delete()