Пример #1
0
    def _get_frame_camera(save_world2cam,
                          depth_scale=1.0,
                          unit_scaling=1000.,
                          destination_frame=["X", "-Y", "-Z"]):
        """ Returns camera parameters for the active camera.
        
        :return: dict containing info for scene_camera.json 
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K', destination_frame)

        frame_camera_dict = {
            'cam_K': cam_K[0] + cam_K[1] + cam_K[2],
            'depth_scale': depth_scale
        }

        if save_world2cam:
            H_c2w_opencv = Matrix(
                WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam2world_matrix',
                                                destination_frame))

            H_w2c_opencv = H_c2w_opencv.inverted()
            R_w2c_opencv = H_w2c_opencv.to_quaternion().to_matrix()
            t_w2c_opencv = H_w2c_opencv.to_translation() * unit_scaling

            frame_camera_dict['cam_R_w2c'] = list(R_w2c_opencv[0]) + list(
                R_w2c_opencv[1]) + list(R_w2c_opencv[2])
            frame_camera_dict['cam_t_w2c'] = list(t_w2c_opencv)

        return frame_camera_dict
Пример #2
0
    def _get_frame_gt(dataset_objects: List[bpy.types.Mesh],
                      unit_scaling: float,
                      ignore_dist_thres: float,
                      destination_frame: List[str] = ["X", "-Y", "-Z"]):
        """ Returns GT pose annotations between active camera and objects.
        :param dataset_objects: Save annotations for these objects.
        :param unit_scaling: 1000. for outputting poses in mm
        :param ignore_dist_thres: Distance between camera and object after which object is ignored. Mostly due to failed physics.
        :param destination_frame: Transform poses from Blender internal coordinates to OpenCV coordinates
        :return: A list of GT camera-object pose annotations for scene_gt.json
        """

        H_c2w_opencv = Matrix(
            WriterUtility.get_cam_attribute(
                bpy.context.scene.camera,
                'cam2world_matrix',
                local_frame_change=destination_frame))

        frame_gt = []
        for obj in dataset_objects:

            H_m2w = Matrix(
                WriterUtility.get_common_attribute(obj, 'matrix_world'))

            cam_H_m2c = H_c2w_opencv.inverted() @ H_m2w
            cam_R_m2c = cam_H_m2c.to_quaternion().to_matrix()
            cam_t_m2c = cam_H_m2c.to_translation()

            # ignore examples that fell through the plane
            if not np.linalg.norm(list(cam_t_m2c)) > ignore_dist_thres:
                cam_t_m2c = list(cam_t_m2c * unit_scaling)
                frame_gt.append({
                    'cam_R_m2c':
                    list(cam_R_m2c[0]) + list(cam_R_m2c[1]) +
                    list(cam_R_m2c[2]),
                    'cam_t_m2c':
                    cam_t_m2c,
                    'obj_id':
                    obj["category_id"]
                })
            else:
                print('ignored obj, ', obj["category_id"], 'because either ')
                print(
                    '(1) it is further away than parameter "ignore_dist_thres: ",',
                    ignore_dist_thres)
                print(
                    '(e.g. because it fell through a plane during physics sim)'
                )
                print('or')
                print('(2) the object pose has not been given in meters')

        return frame_gt
Пример #3
0
    def _get_frame_gt(dataset_objects,
                      unit_scaling,
                      ignore_dist_thres,
                      destination_frame=["X", "-Y", "-Z"]):
        """ Returns GT pose annotations between active camera and objects.
        
        :return: A list of GT annotations.
        """

        H_c2w_opencv = Matrix(
            WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                            'cam2world_matrix',
                                            destination_frame))

        frame_gt = []
        for obj in dataset_objects:

            H_m2w = Matrix(
                WriterUtility.get_common_attribute(obj, 'matrix_world'))

            cam_H_m2c = H_c2w_opencv.inverted() @ H_m2w
            cam_R_m2c = cam_H_m2c.to_quaternion().to_matrix()
            cam_t_m2c = cam_H_m2c.to_translation()

            # ignore examples that fell through the plane
            if not np.linalg.norm(list(cam_t_m2c)) > ignore_dist_thres:
                cam_t_m2c = list(cam_t_m2c * unit_scaling)
                frame_gt.append({
                    'cam_R_m2c':
                    list(cam_R_m2c[0]) + list(cam_R_m2c[1]) +
                    list(cam_R_m2c[2]),
                    'cam_t_m2c':
                    cam_t_m2c,
                    'obj_id':
                    obj["category_id"]
                })
            else:
                print('ignored obj, ', obj["category_id"], 'because either ')
                print(
                    '(1) it is further away than parameter "ignore_dist_thres: ",',
                    ignore_dist_thres)
                print(
                    '(e.g. because it fell through a plane during physics sim)'
                )
                print('or')
                print('(2) the object pose has not been given in meters')

        return frame_gt
Пример #4
0
    def _get_frame_camera(save_world2cam,
                          depth_scale=1.0,
                          unit_scaling=1000.,
                          destination_frame=["X", "-Y", "-Z"]):
        """ Returns camera parameters for the active camera.
        :param save_world2cam: If true, camera to world transformations "cam_R_w2c", "cam_t_w2c" are saved in scene_camera.json
        :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm.
        :param unit_scaling: 1000. for outputting poses in mm
        :param destination_frame: Transform poses from Blender internal coordinates to OpenCV coordinates
        :return: dict containing info for scene_camera.json 
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K')

        frame_camera_dict = {
            'cam_K': cam_K[0] + cam_K[1] + cam_K[2],
            'depth_scale': depth_scale
        }

        if save_world2cam:
            H_c2w_opencv = Matrix(
                WriterUtility.get_cam_attribute(
                    bpy.context.scene.camera,
                    'cam2world_matrix',
                    local_frame_change=destination_frame))

            H_w2c_opencv = H_c2w_opencv.inverted()
            R_w2c_opencv = H_w2c_opencv.to_quaternion().to_matrix()
            t_w2c_opencv = H_w2c_opencv.to_translation() * unit_scaling

            frame_camera_dict['cam_R_w2c'] = list(R_w2c_opencv[0]) + list(
                R_w2c_opencv[1]) + list(R_w2c_opencv[2])
            frame_camera_dict['cam_t_w2c'] = list(t_w2c_opencv)

        return frame_camera_dict
Пример #5
0
    def _load_and_postprocess(self, file_path, key, version="1.0.0"):
        """
        Loads an image and post process it.

        :param file_path: Image path. Type: string.
        :param key: The image's key with regards to the hdf5 file. Type: string.
        :param version: The version number original data. Type: String. Default: 1.0.0.
        :return: The post-processed image that was loaded using the file path.
        """
        data = WriterUtility.load_output_file(Utility.resolve_path(file_path),
                                              self.write_alpha_channel)
        data, new_key, new_version = self._apply_postprocessing(
            key, data, version)
        print("Key: " + key + " - shape: " + str(data.shape) + " - dtype: " +
              str(data.dtype) + " - path: " + file_path)
        return data, new_key, new_version
Пример #6
0
    def _write_camera(camera_path, depth_scale=0.1):
        """ Writes camera.json into dataset_dir.
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K')
        camera = {
            'cx': cam_K[0][2],
            'cy': cam_K[1][2],
            'depth_scale': depth_scale,
            'fx': cam_K[0][0],
            'fy': cam_K[1][1],
            'height': bpy.context.scene.render.resolution_y,
            'width': bpy.context.scene.render.resolution_x
        }

        BopWriterUtility._save_json(camera_path, camera)
Пример #7
0
    def render(output_dir: Union[str, None] = None, file_prefix: str = "rgb_", output_key: str = "colors",
               load_keys: Set = None, return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Render all frames.

        This will go through all frames from scene.frame_start to scene.frame_end and render each of them.

        :param output_dir: The directory to write files to, if this is None the temporary directory is used. \
                           The temporary directory is usually in the shared memory (only true for linux).
        :param file_prefix: The prefix to use for writing the images.
        :param output_key: The key to use for registering the output.
        :param load_keys: Set of output keys to load when available
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of raw renderer output. Keys can be 'distance', 'colors', 'normals'
        """
        if output_dir is None:
            output_dir = Utility.get_temporary_directory()
        if load_keys is None:
            load_keys = {'colors', 'distance', 'normals'}

        if output_key is not None:
            Utility.add_output_entry({
                "key": output_key,
                "path": os.path.join(output_dir, file_prefix) + "%04d" +
                        RendererUtility.map_file_format_to_file_ending(bpy.context.scene.render.image_settings.file_format),
                "version": "2.0.0"
            })
            load_keys.add(output_key)

        bpy.context.scene.render.filepath = os.path.join(output_dir, file_prefix)

        # Skip if there is nothing to render
        if bpy.context.scene.frame_end != bpy.context.scene.frame_start:
            if len(get_all_blender_mesh_objects()) == 0:
                raise Exception("There are no mesh-objects to render, "
                                "please load an object before invoking the renderer.")
            # As frame_end is pointing to the next free frame, decrease it by one, as
            # blender will render all frames in [frame_start, frame_ned]
            bpy.context.scene.frame_end -= 1
            bpy.ops.render.render(animation=True, write_still=True)
            # Revert changes
            bpy.context.scene.frame_end += 1
        
        return WriterUtility.load_registered_outputs(load_keys) if return_data else {}
Пример #8
0
    def _write_camera(camera_path: str, depth_scale: float = 1.0):
        """ Writes camera.json into dataset_dir.
        :param camera_path: Path to camera.json
        :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm.
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K')
        camera = {
            'cx': cam_K[0][2],
            'cy': cam_K[1][2],
            'depth_scale': depth_scale,
            'fx': cam_K[0][0],
            'fy': cam_K[1][1],
            'height': bpy.context.scene.render.resolution_y,
            'width': bpy.context.scene.render.resolution_x
        }

        BopWriterUtility._save_json(camera_path, camera)
Пример #9
0
    height = np.random.uniform(0.5, 2)
    location, _ = point_sampler.sample(height)
    # Sample rotation (fix around X and Y axis)
    euler_rotation = np.random.uniform([1.2217, 0, 0],
                                       [1.2217, 0, 6.283185307])
    cam2world_matrix = MathUtility.build_transformation_mat(
        location, euler_rotation)

    # Check that obstacles are at least 1 meter away from the camera and make sure the view interesting enough
    if CameraValidation.perform_obstacle_in_view_check(
            cam2world_matrix, {"min": 1.0}, bvh_tree
    ) and CameraValidation.scene_coverage_score(cam2world_matrix) > 0.4:
        CameraUtility.add_camera_pose(cam2world_matrix)
        poses += 1
    tries += 1

# activate normal and distance rendering
RendererUtility.enable_normals_output()
RendererUtility.enable_distance_output()
MaterialLoaderUtility.add_alpha_channel_to_textures(blurry_edges=True)

# render the whole pipeline
data = RendererUtility.render()

data.update(
    SegMapRendererUtility.render(Utility.get_temporary_directory(),
                                 Utility.get_temporary_directory(), "class"))

# write the data to a .hdf5 container
WriterUtility.save_to_hdf5(args.output_dir, data)
Пример #10
0
    def render(output_dir: str,
               temp_dir: str,
               get_forward_flow: bool,
               get_backward_flow: bool,
               blender_image_coordinate_style: bool = False,
               forward_flow_output_file_prefix: str = "forward_flow_",
               forward_flow_output_key: str = "forward_flow",
               backward_flow_output_file_prefix: str = "backward_flow_",
               backward_flow_output_key: str = "backward_flow",
               return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Renders the optical flow (forward and backward) for all frames.

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param get_forward_flow: Whether to render forward optical flow.
        :param get_backward_flow: Whether to render backward optical flow.
        :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False).
        :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file.
        :param forward_flow_output_key: The key which should be used for storing forward optical flow values.
        :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file.
        :param backward_flow_output_key: The key which should be used for storing backward optical flow values.
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of raw renderer outputs. Keys can be 'forward_flow', 'backward_flow'
        """
        if get_forward_flow is False and get_backward_flow is False:
            raise Exception(
                "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!"
            )

        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            FlowRendererUtility._output_vector_field(get_forward_flow,
                                                     get_backward_flow,
                                                     output_dir)

            # only need to render once; both fwd and bwd flow will be saved
            temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_')
            temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_')
            RendererUtility.render(temp_dir, "bwd_flow_", None)

            # After rendering: convert to optical flow or calculate hsv visualization, if desired
            for frame in range(bpy.context.scene.frame_start,
                               bpy.context.scene.frame_end):
                # temporarily save respective vector fields
                if get_forward_flow:
                    file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr"
                    fwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        forward_flow_output_file_prefix) + '%04d' % frame
                    forward_flow = fwd_flow_field * -1  # invert forward flow to point at next frame
                    np.save(fname + '.npy', forward_flow[:, :, :2])

                if get_backward_flow:
                    file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr"
                    bwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        backward_flow_output_file_prefix) + '%04d' % frame
                    np.save(fname + '.npy', bwd_flow_field[:, :, :2])

        load_keys = set()
        # register desired outputs
        if get_forward_flow:
            Utility.register_output(output_dir,
                                    forward_flow_output_file_prefix,
                                    forward_flow_output_key, '.npy', '2.0.0')
            load_keys.add(forward_flow_output_key)
        if get_backward_flow:
            Utility.register_output(output_dir,
                                    backward_flow_output_file_prefix,
                                    backward_flow_output_key, '.npy', '2.0.0')
            load_keys.add(backward_flow_output_key)

        return WriterUtility.load_registered_outputs(
            load_keys) if return_data else {}
Пример #11
0
    def _write_frames(chunks_dir,
                      dataset_objects,
                      depth_scale: float = 1.0,
                      frames_per_chunk: int = 1000,
                      m2mm: bool = True,
                      ignore_dist_thres: float = 100.,
                      save_world2cam: bool = True):
        """ Writes images, GT annotations and camera info.
        """

        # Format of the depth images.
        depth_ext = '.png'

        rgb_tpath = os.path.join(chunks_dir, '{chunk_id:06d}', 'rgb',
                                 '{im_id:06d}' + '{im_type}')
        depth_tpath = os.path.join(chunks_dir, '{chunk_id:06d}', 'depth',
                                   '{im_id:06d}' + depth_ext)
        chunk_camera_tpath = os.path.join(chunks_dir, '{chunk_id:06d}',
                                          'scene_camera.json')
        chunk_gt_tpath = os.path.join(chunks_dir, '{chunk_id:06d}',
                                      'scene_gt.json')

        # Paths to the already existing chunk folders (such folders may exist
        # when appending to an existing dataset).
        chunk_dirs = sorted(glob.glob(os.path.join(chunks_dir, '*')))
        chunk_dirs = [d for d in chunk_dirs if os.path.isdir(d)]

        # Get ID's of the last already existing chunk and frame.
        curr_chunk_id = 0
        curr_frame_id = 0
        if len(chunk_dirs):
            last_chunk_dir = sorted(chunk_dirs)[-1]
            last_chunk_gt_fpath = os.path.join(last_chunk_dir, 'scene_gt.json')
            chunk_gt = BopWriterUtility._load_json(last_chunk_gt_fpath,
                                                   keys_to_int=True)

            # Last chunk and frame ID's.
            last_chunk_id = int(os.path.basename(last_chunk_dir))
            last_frame_id = int(sorted(chunk_gt.keys())[-1])

            # Current chunk and frame ID's.
            curr_chunk_id = last_chunk_id
            curr_frame_id = last_frame_id + 1
            if curr_frame_id % frames_per_chunk == 0:
                curr_chunk_id += 1
                curr_frame_id = 0

        # Initialize structures for the GT annotations and camera info.
        chunk_gt = {}
        chunk_camera = {}
        if curr_frame_id != 0:
            # Load GT and camera info of the chunk we are appending to.
            chunk_gt = BopWriterUtility._load_json(
                chunk_gt_tpath.format(chunk_id=curr_chunk_id),
                keys_to_int=True)
            chunk_camera = BopWriterUtility._load_json(
                chunk_camera_tpath.format(chunk_id=curr_chunk_id),
                keys_to_int=True)

        # Go through all frames.
        num_new_frames = bpy.context.scene.frame_end - bpy.context.scene.frame_start
        for frame_id in range(bpy.context.scene.frame_start,
                              bpy.context.scene.frame_end):
            # Activate frame.
            bpy.context.scene.frame_set(frame_id)

            # Reset data structures and prepare folders for a new chunk.
            if curr_frame_id == 0:
                chunk_gt = {}
                chunk_camera = {}
                os.makedirs(
                    os.path.dirname(
                        rgb_tpath.format(chunk_id=curr_chunk_id,
                                         im_id=0,
                                         im_type='PNG')))
                os.makedirs(
                    os.path.dirname(
                        depth_tpath.format(chunk_id=curr_chunk_id, im_id=0)))

            # Get GT annotations and camera info for the current frame.

            # Output translation gt in m or mm
            unit_scaling = 1000. if m2mm else 1.

            chunk_gt[curr_frame_id] = BopWriterUtility._get_frame_gt(
                dataset_objects, unit_scaling, ignore_dist_thres)
            chunk_camera[curr_frame_id] = BopWriterUtility._get_frame_camera(
                save_world2cam, depth_scale, unit_scaling)

            # Copy the resulting RGB image.
            rgb_output = Utility.find_registered_output_by_key("colors")
            if rgb_output is None:
                raise Exception("RGB image has not been rendered.")
            image_type = '.png' if rgb_output['path'].endswith(
                'png') else '.jpg'
            rgb_fpath = rgb_tpath.format(chunk_id=curr_chunk_id,
                                         im_id=curr_frame_id,
                                         im_type=image_type)
            shutil.copyfile(rgb_output['path'] % frame_id, rgb_fpath)

            # Load the resulting dist image.
            dist_output = Utility.find_registered_output_by_key("distance")
            if dist_output is None:
                raise Exception("Distance image has not been rendered.")
            distance = WriterUtility.load_output_file(
                Utility.resolve_path(dist_output['path'] % frame_id))
            depth = PostProcessingUtility.dist2depth(distance)

            # Scale the depth to retain a higher precision (the depth is saved
            # as a 16-bit PNG image with range 0-65535).
            depth_mm = 1000.0 * depth  # [m] -> [mm]
            depth_mm_scaled = depth_mm / float(depth_scale)

            # Save the scaled depth image.
            depth_fpath = depth_tpath.format(chunk_id=curr_chunk_id,
                                             im_id=curr_frame_id)
            BopWriterUtility._save_depth(depth_fpath, depth_mm_scaled)

            # Save the chunk info if we are at the end of a chunk or at the last new frame.
            if ((curr_frame_id + 1) % frames_per_chunk == 0) or\
                  (frame_id == num_new_frames - 1):

                # Save GT annotations.
                BopWriterUtility._save_json(
                    chunk_gt_tpath.format(chunk_id=curr_chunk_id), chunk_gt)

                # Save camera info.
                BopWriterUtility._save_json(
                    chunk_camera_tpath.format(chunk_id=curr_chunk_id),
                    chunk_camera)

                # Update ID's.
                curr_chunk_id += 1
                curr_frame_id = 0
            else:
                curr_frame_id += 1
Пример #12
0
    def _write_frames(chunks_dir: str,
                      dataset_objects: list,
                      depths: List[np.ndarray] = [],
                      colors: List[np.ndarray] = [],
                      color_file_format: str = "PNG",
                      depth_scale: float = 1.0,
                      frames_per_chunk: int = 1000,
                      m2mm: bool = True,
                      ignore_dist_thres: float = 100.,
                      save_world2cam: bool = True,
                      jpg_quality: int = 95):
        """Write each frame's ground truth into chunk directory in BOP format

        :param chunks_dir: Path to the output directory of the current chunk.
        :param dataset_objects: Save annotations for these objects.
        :param depths: List of depth images in m to save
        :param colors: List of color images to save
        :param color_file_format: File type to save color images. Available: "PNG", "JPEG"
        :param jpg_quality: If color_file_format is "JPEG", save with the given quality.
        :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm. Used to trade-off between depth accuracy 
            and maximum depth value. Default corresponds to 65.54m maximum depth and 1mm accuracy.
        :param ignore_dist_thres: Distance between camera and object after which object is ignored. Mostly due to failed physics.
        :param m2mm: Original bop annotations and models are in mm. If true, we convert the gt annotations to mm here. This
            is needed if BopLoader option mm2m is used.
        :param frames_per_chunk: Number of frames saved in each chunk (called scene in BOP) 
        """

        # Format of the depth images.
        depth_ext = '.png'

        rgb_tpath = os.path.join(chunks_dir, '{chunk_id:06d}', 'rgb',
                                 '{im_id:06d}' + '{im_type}')
        depth_tpath = os.path.join(chunks_dir, '{chunk_id:06d}', 'depth',
                                   '{im_id:06d}' + depth_ext)
        chunk_camera_tpath = os.path.join(chunks_dir, '{chunk_id:06d}',
                                          'scene_camera.json')
        chunk_gt_tpath = os.path.join(chunks_dir, '{chunk_id:06d}',
                                      'scene_gt.json')

        # Paths to the already existing chunk folders (such folders may exist
        # when appending to an existing dataset).
        chunk_dirs = sorted(glob.glob(os.path.join(chunks_dir, '*')))
        chunk_dirs = [d for d in chunk_dirs if os.path.isdir(d)]

        # Get ID's of the last already existing chunk and frame.
        curr_chunk_id = 0
        curr_frame_id = 0
        if len(chunk_dirs):
            last_chunk_dir = sorted(chunk_dirs)[-1]
            last_chunk_gt_fpath = os.path.join(last_chunk_dir, 'scene_gt.json')
            chunk_gt = BopWriterUtility._load_json(last_chunk_gt_fpath,
                                                   keys_to_int=True)

            # Last chunk and frame ID's.
            last_chunk_id = int(os.path.basename(last_chunk_dir))
            last_frame_id = int(sorted(chunk_gt.keys())[-1])

            # Current chunk and frame ID's.
            curr_chunk_id = last_chunk_id
            curr_frame_id = last_frame_id + 1
            if curr_frame_id % frames_per_chunk == 0:
                curr_chunk_id += 1
                curr_frame_id = 0

        # Initialize structures for the GT annotations and camera info.
        chunk_gt = {}
        chunk_camera = {}
        if curr_frame_id != 0:
            # Load GT and camera info of the chunk we are appending to.
            chunk_gt = BopWriterUtility._load_json(
                chunk_gt_tpath.format(chunk_id=curr_chunk_id),
                keys_to_int=True)
            chunk_camera = BopWriterUtility._load_json(
                chunk_camera_tpath.format(chunk_id=curr_chunk_id),
                keys_to_int=True)

        # Go through all frames.
        num_new_frames = bpy.context.scene.frame_end - bpy.context.scene.frame_start

        if len(depths) != len(colors) != num_new_frames:
            raise Exception(
                "The amount of images stored in the depths/colors does not correspond to the amount"
                "of images specified by frame_start to frame_end.")

        for frame_id in range(bpy.context.scene.frame_start,
                              bpy.context.scene.frame_end):
            # Activate frame.
            bpy.context.scene.frame_set(frame_id)

            # Reset data structures and prepare folders for a new chunk.
            if curr_frame_id == 0:
                chunk_gt = {}
                chunk_camera = {}
                os.makedirs(
                    os.path.dirname(
                        rgb_tpath.format(chunk_id=curr_chunk_id,
                                         im_id=0,
                                         im_type='PNG')))
                os.makedirs(
                    os.path.dirname(
                        depth_tpath.format(chunk_id=curr_chunk_id, im_id=0)))

            # Get GT annotations and camera info for the current frame.

            # Output translation gt in m or mm
            unit_scaling = 1000. if m2mm else 1.

            chunk_gt[curr_frame_id] = BopWriterUtility._get_frame_gt(
                dataset_objects, unit_scaling, ignore_dist_thres)
            chunk_camera[curr_frame_id] = BopWriterUtility._get_frame_camera(
                save_world2cam, depth_scale, unit_scaling)

            if colors:
                color_rgb = colors[frame_id]
                color_bgr = color_rgb[..., ::-1].copy()
                if color_file_format == 'PNG':
                    rgb_fpath = rgb_tpath.format(chunk_id=curr_chunk_id,
                                                 im_id=curr_frame_id,
                                                 im_type='.png')
                    cv2.imwrite(rgb_fpath, color_bgr)
                elif color_file_format == 'JPEG':
                    rgb_fpath = rgb_tpath.format(chunk_id=curr_chunk_id,
                                                 im_id=curr_frame_id,
                                                 im_type='.jpg')
                    cv2.imwrite(rgb_fpath, color_bgr,
                                [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality])
            else:
                rgb_output = Utility.find_registered_output_by_key("colors")
                if rgb_output is None:
                    raise Exception("RGB image has not been rendered.")
                color_ext = '.png' if rgb_output['path'].endswith(
                    'png') else '.jpg'
                # Copy the resulting RGB image.
                rgb_fpath = rgb_tpath.format(chunk_id=curr_chunk_id,
                                             im_id=curr_frame_id,
                                             im_type=color_ext)
                shutil.copyfile(rgb_output['path'] % frame_id, rgb_fpath)

            if depths:
                depth = depths[frame_id]
            else:
                # Load the resulting dist image.
                dist_output = Utility.find_registered_output_by_key("distance")
                if dist_output is None:
                    raise Exception("Distance image has not been rendered.")
                distance = WriterUtility.load_output_file(Utility.resolve_path(
                    dist_output['path'] % frame_id),
                                                          remove=False)
                depth = PostProcessingUtility.dist2depth(distance)

            # Scale the depth to retain a higher precision (the depth is saved
            # as a 16-bit PNG image with range 0-65535).
            depth_mm = 1000.0 * depth  # [m] -> [mm]
            depth_mm_scaled = depth_mm / float(depth_scale)

            # Save the scaled depth image.
            depth_fpath = depth_tpath.format(chunk_id=curr_chunk_id,
                                             im_id=curr_frame_id)
            BopWriterUtility._save_depth(depth_fpath, depth_mm_scaled)

            # Save the chunk info if we are at the end of a chunk or at the last new frame.
            if ((curr_frame_id + 1) % frames_per_chunk == 0) or\
                  (frame_id == num_new_frames - 1):

                # Save GT annotations.
                BopWriterUtility._save_json(
                    chunk_gt_tpath.format(chunk_id=curr_chunk_id), chunk_gt)

                # Save camera info.
                BopWriterUtility._save_json(
                    chunk_camera_tpath.format(chunk_id=curr_chunk_id),
                    chunk_camera)

                # Update ID's.
                curr_chunk_id += 1
                curr_frame_id = 0
            else:
                curr_frame_id += 1
Пример #13
0
    def render(output_dir: str, temp_dir: str, used_attributes: Union[str, List[str]],
               used_default_values: Union[Dict[str, str]] = None, file_prefix: str = "segmap_",
               output_key: str = "segmap", segcolormap_output_file_prefix: str = "class_inst_col_map",
               segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False,
               render_colorspace_size_per_dimension: int = 2048, return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Renders segmentation maps for all frames

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param used_attributes: The attributes to be used for color mapping.
        :param used_default_values: The default values used for the keys used in used_attributes.
        :param file_prefix: The prefix to use for writing the images.
        :param output_key: The key to use for registering the output.
        :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv.
        :param segcolormap_output_key: The key to use for registering the segmentation-color map output.
        :param use_alpha_channel: If true, the alpha channel stored in .png textures is used.
        :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \
                                                     integers which can be precisely stored is [-2048, 2048]. As \
                                                     blender does not allow negative values for colors, we use \
                                                     [0, 2048] ** 3 as our color space which allows ~8 billion \
                                                     different colors/objects. This should be enough.
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of segmaps and (for instance segmentation) segcolormaps
        """
        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            # Get objects with meshes (i.e. not lights or cameras)
            objs_with_mats = get_all_blender_mesh_objects()

            colors, num_splits_per_dimension, used_objects = \
                SegMapRendererUtility._colorize_objects_for_instance_segmentation(objs_with_mats,
                                                                                  use_alpha_channel,
                                                                                  render_colorspace_size_per_dimension)

            bpy.context.scene.cycles.filter_width = 0.0

            if use_alpha_channel:
                MaterialLoaderUtility.add_alpha_channel_to_textures(blurry_edges=False)

            # Determine path for temporary and for final output
            temporary_segmentation_file_path = os.path.join(temp_dir, "seg_")
            final_segmentation_file_path = os.path.join(output_dir, file_prefix)

            RendererUtility.set_output_format("OPEN_EXR", 16)
            RendererUtility.render(temp_dir, "seg_", None)

            # Find optimal dtype of output based on max index
            for dtype in [np.uint8, np.uint16, np.uint32]:
                optimal_dtype = dtype
                if np.iinfo(optimal_dtype).max >= len(colors) - 1:
                    break
            if used_default_values is None:
                used_default_values = {}
            elif 'class' in used_default_values:
                used_default_values['cp_category_id'] = used_default_values['class']

            if isinstance(used_attributes, str):
                # only one result is requested
                result_channels = 1
                used_attributes = [used_attributes]
            elif isinstance(used_attributes, list):
                result_channels = len(used_attributes)
            else:
                raise Exception("The type of this is not supported here: {}".format(used_attributes))

            save_in_csv_attributes = {}
            # define them for the avoid rendering case
            there_was_an_instance_rendering = False
            list_of_used_attributes = []

            # Check if stereo is enabled
            if bpy.context.scene.render.use_multiview:
                suffixes = ["_L", "_R"]
            else:
                suffixes = [""]

            # After rendering
            for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end):  # for each rendered frame
                for suffix in suffixes:
                    file_path = temporary_segmentation_file_path + ("%04d" % frame) + suffix + ".exr"
                    segmentation = load_image(file_path)
                    print(file_path, segmentation.shape)

                    segmap = Utility.map_back_from_equally_spaced_equidistant_values(segmentation,
                                                                                     num_splits_per_dimension,
                                                                                     render_colorspace_size_per_dimension)
                    segmap = segmap.astype(optimal_dtype)

                    used_object_ids = np.unique(segmap)
                    max_id = np.max(used_object_ids)
                    if max_id >= len(used_objects):
                        raise Exception("There are more object colors than there are objects")
                    combined_result_map = []
                    there_was_an_instance_rendering = False
                    list_of_used_attributes = []
                    used_channels = []
                    for channel_id in range(result_channels):
                        resulting_map = np.empty((segmap.shape[0], segmap.shape[1]))
                        was_used = False
                        current_attribute = used_attributes[channel_id]
                        org_attribute = current_attribute

                        # if the class is used the category_id attribute is evaluated
                        if current_attribute == "class":
                            current_attribute = "cp_category_id"
                        # in the instance case the resulting ids are directly used
                        if current_attribute == "instance":
                            there_was_an_instance_rendering = True
                            resulting_map = segmap
                            was_used = True
                            # a non default value was also used
                            non_default_value_was_used = True
                        else:
                            if current_attribute != "cp_category_id":
                                list_of_used_attributes.append(current_attribute)
                            # for the current attribute remove cp_ and _csv, if present
                            used_attribute = current_attribute
                            if used_attribute.startswith("cp_"):
                                used_attribute = used_attribute[len("cp_"):]
                            # check if a default value was specified
                            default_value_set = False
                            if current_attribute in used_default_values or used_attribute in used_default_values:
                                default_value_set = True
                                if current_attribute in used_default_values:
                                    default_value = used_default_values[current_attribute]
                                elif used_attribute in used_default_values:
                                    default_value = used_default_values[used_attribute]
                            last_state_save_in_csv = None
                            # this avoids that for certain attributes only the default value is written
                            non_default_value_was_used = False
                            # iterate over all object ids
                            for object_id in used_object_ids:
                                is_default_value = False
                                # get the corresponding object via the id
                                current_obj = used_objects[object_id]
                                # if the current obj has a attribute with that name -> get it
                                if hasattr(current_obj, used_attribute):
                                    used_value = getattr(current_obj, used_attribute)
                                # if the current object has a custom property with that name -> get it
                                elif current_attribute.startswith("cp_") and used_attribute in current_obj:
                                    used_value = current_obj[used_attribute]
                                elif current_attribute.startswith("cf_"):
                                    if current_attribute == "cf_basename":
                                        used_value = current_obj.name
                                        if "." in used_value:
                                            used_value = used_value[:used_value.rfind(".")]
                                elif default_value_set:
                                    # if none of the above applies use the default value
                                    used_value = default_value
                                    is_default_value = True
                                else:
                                    # if the requested current_attribute is not a custom property or a attribute
                                    # or there is a default value stored
                                    # it throws an exception
                                    raise Exception("The obj: {} does not have the "
                                                    "attribute: {}, striped: {}. Maybe try a default "
                                                    "value.".format(current_obj.name, current_attribute, used_attribute))

                                # check if the value should be saved as an image or in the csv file
                                save_in_csv = False
                                try:
                                    resulting_map[segmap == object_id] = used_value
                                    was_used = True
                                    if not is_default_value:
                                        non_default_value_was_used = True
                                    # save everything which is not instance also in the .csv
                                    if current_attribute != "instance":
                                        save_in_csv = True
                                except ValueError:
                                    save_in_csv = True

                                if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv:
                                    raise Exception("During creating the mapping, the saving to an image or a csv file "
                                                    "switched, this might indicated that the used default value, does "
                                                    "not have the same type as the returned value, "
                                                    "for: {}".format(current_attribute))
                                last_state_save_in_csv = save_in_csv
                                if save_in_csv:
                                    if object_id in save_in_csv_attributes:
                                        save_in_csv_attributes[object_id][used_attribute] = used_value
                                    else:
                                        save_in_csv_attributes[object_id] = {used_attribute: used_value}
                        if was_used and non_default_value_was_used:
                            used_channels.append(org_attribute)
                            combined_result_map.append(resulting_map)

                    fname = final_segmentation_file_path + ("%04d" % frame) + suffix
                    # combine all resulting images to one image
                    resulting_map = np.stack(combined_result_map, axis=2)
                    # remove the unneeded third dimension
                    if resulting_map.shape[2] == 1:
                        resulting_map = resulting_map[:, :, 0]
                    np.save(fname, resulting_map)

            if not there_was_an_instance_rendering:
                if len(list_of_used_attributes) > 0:
                    raise Exception("There were attributes specified in the may_by, which could not be saved as "
                                    "there was no \"instance\" may_by key used. This is true for this/these "
                                    "keys: {}".format(", ".join(list_of_used_attributes)))
                # if there was no instance rendering no .csv file is generated!
                # delete all saved infos about .csv
                save_in_csv_attributes = {}

            # write color mappings to file
            if save_in_csv_attributes:
                csv_file_path = os.path.join(output_dir, segcolormap_output_file_prefix + ".csv")
                with open(csv_file_path, 'w', newline='') as csvfile:
                    # get from the first element the used field names
                    fieldnames = ["idx"]
                    # get all used object element keys
                    for object_element in save_in_csv_attributes.values():
                        fieldnames.extend(list(object_element.keys()))
                        break
                    for channel_name in used_channels:
                        fieldnames.append("channel_{}".format(channel_name))
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    # save for each object all values in one row
                    for obj_idx, object_element in save_in_csv_attributes.items():
                        object_element["idx"] = obj_idx
                        for i, channel_name in enumerate(used_channels):
                            object_element["channel_{}".format(channel_name)] = i
                        writer.writerow(object_element)

        Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0")
        load_keys = {output_key}
        if save_in_csv_attributes:
            Utility.register_output(output_dir,
                                    segcolormap_output_file_prefix,
                                    segcolormap_output_key,
                                    ".csv",
                                    "2.0.0",
                                    unique_for_camposes=False)
            load_keys.add(segcolormap_output_key)
        
        return WriterUtility.load_registered_outputs(load_keys) if return_data else {}
Пример #14
0
    def run(self):
        if self._avoid_output:
            print("Avoid output is on, no output produced!")
            return

        if self._append_to_existing_output:
            frame_offset = 0
            # Look for hdf5 file with highest index
            for path in os.listdir(self._output_dir):
                if path.endswith(".hdf5"):
                    index = path[:-len(".hdf5")]
                    if index.isdigit():
                        frame_offset = max(frame_offset, int(index) + 1)
        else:
            frame_offset = 0

        # Go through all frames
        for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end):

            # Create output hdf5 file
            hdf5_path = os.path.join(self._output_dir, str(frame + frame_offset) + ".hdf5")
            with h5py.File(hdf5_path, "w") as f:

                if not GlobalStorage.is_in_storage("output"):
                    print("No output was designed in prior models!")
                    return
                # Go through all the output types
                print("Merging data for frame " + str(frame) + " into " + hdf5_path)

                for output_type in GlobalStorage.get("output"):
                    # Build path (path attribute is format string)
                    file_path = output_type["path"]
                    if '%' in file_path:
                        file_path = file_path % frame

                    # Check if file exists
                    if not os.path.exists(file_path):
                        # If not try stereo suffixes
                        path_l, path_r = WriterUtility._get_stereo_path_pair(file_path)
                        if not os.path.exists(path_l) or not os.path.exists(path_r):
                            raise Exception("File not found: " + file_path)
                        else:
                            use_stereo = True
                    else:
                        use_stereo = False

                    if use_stereo:
                        path_l, path_r = WriterUtility._get_stereo_path_pair(file_path)

                        img_l, new_key, new_version = self._load_and_postprocess(path_l, output_type["key"],
                                                                                   output_type["version"])
                        img_r, new_key, new_version = self._load_and_postprocess(path_r, output_type["key"],
                                                                                   output_type["version"])

                        if self.config.get_bool("stereo_separate_keys", False):
                            WriterUtility._write_to_hdf_file(f, new_key + "_0", img_l)
                            WriterUtility._write_to_hdf_file(f, new_key + "_1", img_r)
                        else:
                            data = np.array([img_l, img_r])
                            WriterUtility._write_to_hdf_file(f, new_key, data)

                    else:
                        data, new_key, new_version = self._load_and_postprocess(file_path, output_type["key"],
                                                                                output_type["version"])

                        WriterUtility._write_to_hdf_file(f, new_key, data)

                    WriterUtility._write_to_hdf_file(f, new_key + "_version", np.string_([new_version]))

                blender_proc_version = Utility.get_current_version()
                if blender_proc_version:
                    WriterUtility._write_to_hdf_file(f, "blender_proc_version", np.string_(blender_proc_version))