Exemplo n.º 1
0
    def __init__(self,
                 config_path,
                 args,
                 working_dir,
                 temp_dir,
                 should_perform_clean_up=True,
                 avoid_rendering=False):
        """
        Inits the pipeline, by calling the constructors of all modules mentioned in the config.

        :param config_path: path to the config
        :param args: arguments which were provided to the run.py and are specified in the config file
        :param working_dir: the current working dir usually the place where the run.py sits
        :param working_dir: the directory where to put temporary files during the execution
        :param should_perform_clean_up: if the generated temp file should be deleted at the end
        :param avoid_rendering: if this is true all renderes are not executed (except the RgbRenderer, \
                               where only the rendering call to blender is avoided) with this it is possible to debug \
                               properly
        """
        Utility.working_dir = working_dir

        # Clean up example scene or scene created by last run when debugging pipeline inside blender
        if should_perform_clean_up:
            self._cleanup()

        config_parser = ConfigParser(silent=True)
        config = config_parser.parse(Utility.resolve_path(config_path), args)

        if avoid_rendering:
            GlobalStorage.add_to_config_before_init("avoid_rendering", True)

        Utility.temp_dir = Utility.resolve_path(temp_dir)
        os.makedirs(Utility.temp_dir, exist_ok=True)

        self.modules = Utility.initialize_modules(config["modules"])
Exemplo n.º 2
0
    def __init__(self,
                 config_path,
                 args,
                 working_dir,
                 temp_dir,
                 avoid_output=False):
        """
        Inits the pipeline, by calling the constructors of all modules mentioned in the config.

        :param config_path: path to the config
        :param args: arguments which were provided to the run.py and are specified in the config file
        :param working_dir: the current working dir usually the place where the run.py sits
        :param working_dir: the directory where to put temporary files during the execution
        :param avoid_output: if this is true, all modules (renderers and writers) skip producing output. With this it is possible to debug \
                               properly.
        """
        Utility.working_dir = working_dir

        config_parser = ConfigParser(silent=True)
        config = config_parser.parse(Utility.resolve_path(config_path), args)

        # Setup pip packages specified in config
        SetupUtility.setup_pip(config["setup"]["pip"] if "pip" in
                               config["setup"] else [])

        if avoid_output:
            GlobalStorage.add_to_config_before_init("avoid_output", True)

        Utility.temp_dir = Utility.resolve_path(temp_dir)
        os.makedirs(Utility.temp_dir, exist_ok=True)

        self.modules = Utility.initialize_modules(config["modules"])
Exemplo n.º 3
0
    def run(self):
        """ Writes coco annotations in the following steps:
        1. Locat the seg images
        2. Locat the rgb maps
        3. Locat the seg maps
        4. Read color mappings
        5. For each frame write the coco annotation
        """
        if self._avoid_output:
            print("Avoid output is on, no output produced!")
            return

        # Check if a label mapping is registered which could be used for naming the categories.
        if GlobalStorage.is_in_storage("label_mapping"):
            label_mapping = GlobalStorage.get("label_mapping")
        else:
            label_mapping = None

        CocoWriterUtility.write(self._determine_output_dir(False),
                                mask_encoding_format=self.mask_encoding_format,
                                supercategory=self._supercategory,
                                append_to_existing_output=self._append_to_existing_output,
                                segmap_output_key=self.segmap_output_key,
                                segcolormap_output_key=self.segcolormap_output_key,
                                rgb_output_key=self.rgb_output_key,
                                label_mapping=label_mapping)
Exemplo n.º 4
0
    def _write_distance_to_file(self):
        """ Configures the renderer, s.t. the z-values computed for the next rendering are directly written to file. """

        # z-buffer/mist settings
        distance_start = self.config.get_float("distance_start", 0.1)
        distance_range = self.config.get_float("distance_range", 25.0)
        GlobalStorage.add("renderer_distance_end",
                          distance_start + distance_range)

        bpy.context.scene.render.use_compositing = True
        bpy.context.scene.use_nodes = True

        tree = bpy.context.scene.node_tree
        links = tree.links
        # Use existing render layer
        render_layer_node = Utility.get_the_one_node_with_type(
            tree.nodes, 'CompositorNodeRLayers')

        # use either mist rendering or the z-buffer
        # mists uses an interpolation during the sample per pixel
        # while the z buffer only returns the closest object per pixel
        use_mist_as_distance = self.config.get_bool("use_mist_distance", True)
        if use_mist_as_distance:
            bpy.context.scene.world.mist_settings.start = distance_start
            bpy.context.scene.world.mist_settings.depth = distance_range
            bpy.context.scene.world.mist_settings.falloff = self.config.get_string(
                "distance_falloff", "LINEAR")

            bpy.context.view_layer.use_pass_mist = True  # Enable distance pass
            # Create a mapper node to map from 0-1 to SI units
            mapper_node = tree.nodes.new("CompositorNodeMapRange")
            links.new(render_layer_node.outputs["Mist"],
                      mapper_node.inputs['Value'])
            # map the values 0-1 to range distance_start to distance_range
            mapper_node.inputs['To Min'].default_value = distance_start
            mapper_node.inputs[
                'To Max'].default_value = distance_start + distance_range
            final_output = mapper_node.outputs['Value']
        else:
            bpy.context.view_layer.use_pass_z = True
            # add min and max nodes to perform the clipping to the desired range
            min_node = tree.nodes.new("CompositorNodeMath")
            min_node.operation = "MINIMUM"
            min_node.inputs[1].default_value = distance_start + distance_range
            links.new(render_layer_node.outputs["Depth"], min_node.inputs[0])
            max_node = tree.nodes.new("CompositorNodeMath")
            max_node.operation = "MAXIMUM"
            max_node.inputs[1].default_value = distance_start
            links.new(min_node.outputs["Value"], max_node.inputs[0])
            final_output = max_node.outputs["Value"]

        output_file = tree.nodes.new("CompositorNodeOutputFile")
        output_file.base_path = self._determine_output_dir()
        output_file.format.file_format = "OPEN_EXR"
        output_file.file_slots.values()[0].path = self.config.get_string(
            "distance_output_file_prefix", "distance_")

        # Feed the Z-Buffer or Mist output of the render layer to the input of the file IO layer
        links.new(final_output, output_file.inputs['Image'])
Exemplo n.º 5
0
    def __init__(self, config):
        Module.__init__(self, config)

        # setting up the GlobalStorage
        global_config = Config(self.config.get_raw_dict("global", {}))
        GlobalStorage.init_global(global_config)

        # call the init again to make sure all values from the global config where read correctly, too
        self._default_init()
Exemplo n.º 6
0
    def enable_distance_output(output_dir: Union[str, None] = None, file_prefix: str = "distance_",
                               output_key: str = "distance", distance_start: float = 0.1, distance_range: float = 25.0,
                               distance_falloff: str = "LINEAR"):
        """ Enables writing distance images.

        Distance images will be written in the form of .exr files during the next rendering.

        :param output_dir: The directory to write files to, if this is None the temporary directory is used.
        :param file_prefix: The prefix to use for writing the files.
        :param output_key: The key to use for registering the distance output.
        :param distance_start: Starting distance of the distance, measured from the camera.
        :param distance_range: Total distance in which the distance is measured. \
                               distance_end = distance_start + distance_range.
        :param distance_falloff: Type of transition used to fade distance. Available: [LINEAR, QUADRATIC, INVERSE_QUADRATIC]
        """
        if output_dir is None:
            output_dir = Utility.get_temporary_directory()

        bpy.context.scene.render.use_compositing = True
        bpy.context.scene.use_nodes = True
        GlobalStorage.add("renderer_distance_end", distance_start + distance_range)

        tree = bpy.context.scene.node_tree
        links = tree.links
        # Use existing render layer
        render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')

        # Set mist pass limits
        bpy.context.scene.world.mist_settings.start = distance_start
        bpy.context.scene.world.mist_settings.depth = distance_range
        bpy.context.scene.world.mist_settings.falloff = distance_falloff

        bpy.context.view_layer.use_pass_mist = True  # Enable distance pass
        # Create a mapper node to map from 0-1 to SI units
        mapper_node = tree.nodes.new("CompositorNodeMapRange")
        links.new(render_layer_node.outputs["Mist"], mapper_node.inputs['Value'])
        # map the values 0-1 to range distance_start to distance_range
        mapper_node.inputs['To Min'].default_value = distance_start
        mapper_node.inputs['To Max'].default_value = distance_start + distance_range
        final_output = mapper_node.outputs['Value']

        # Build output node
        output_file = tree.nodes.new("CompositorNodeOutputFile")
        output_file.base_path = output_dir
        output_file.format.file_format = "OPEN_EXR"
        output_file.file_slots.values()[0].path = file_prefix

        # Feed the Z-Buffer or Mist output of the render layer to the input of the file IO layer
        links.new(final_output, output_file.inputs['Image'])

        Utility.add_output_entry({
            "key": output_key,
            "path": os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
            "version": "2.0.0",
            "trim_redundant_channels": True
        })
Exemplo n.º 7
0
    def get_registered_outputs() -> List[Dict[str, Any]]:
        """ Returns a list of outputs which were registered.

        :return: A list of dicts containing all information registered for the outputs. 
        """
        outputs = []
        if GlobalStorage.is_in_storage("output"):
            outputs = GlobalStorage.get("output")
        
        return outputs
Exemplo n.º 8
0
    def run(self):
        label_mapping = LabelIdMapping.from_csv(
            Utility.resolve_path(
                os.path.join('resources', 'id_mappings', 'nyu_idset.csv')))
        # Add label mapping to global storage, s.t. it could be used for naming semantic segmentations.
        GlobalStorage.set("label_mapping", label_mapping)

        loaded_objects = SuncgLoader.load(self.house_path, label_mapping,
                                          self.suncg_dir)
        self._set_properties(loaded_objects)
Exemplo n.º 9
0
    def find_registered_output_by_key(key):
        """ Returns the output which was registered with the given key.

        :param key: The output key to look for.
        :return: The dict containing all information registered for that output. If no output with the given key exists, None is returned.
        """
        if GlobalStorage.is_in_storage("output"):
            for output in GlobalStorage.get("output"):
                if output["key"] == key:
                    return output

        return None
Exemplo n.º 10
0
    def __init__(self, config):
        Module.__init__(self, config)

        # Clean up example scene or scene created by last run when debugging pipeline inside blender
        Initializer.cleanup()

        # setting up the GlobalStorage
        global_config = Config(self.config.get_raw_dict("global", {}))
        GlobalStorage.init_global(global_config)

        # call the init again to make sure all values from the global config where read correctly, too
        self._default_init()
Exemplo n.º 11
0
    def run(self):
        if len(self._obj_ids) > 0:
            bpy.context.scene.world.light_settings.use_ambient_occlusion = True  # turn AO on
            # bpy.context.scene.world.light_settings.ao_factor = 0.5  # set it to 0.5
            used_shapenet_objs = [_ for _ in self._obj_list if _['obj_id'] in self._obj_ids]
        else:
            used_shapenet_objs = random.choices(self._obj_list, k = self._num_objects)
        
        for i, selected_obj_info in enumerate(used_shapenet_objs):
            selected_obj_path = os.path.join(
                self._shapenet_path, 
                selected_obj_info['shapenet_synset_id'], 
                selected_obj_info['shapenet_obj_id'], 
                "models", "model_normalized.obj"
            )

            loaded_obj = Utility.import_objects(selected_obj_path)
            obj_id_output = selected_obj_info['obj_id'] + OBJECT_ID_OFFSET

            for obj in loaded_obj:
                obj_name = "obj_%06d" % obj_id_output
                obj.name = obj_name
                obj['category_id'] = obj_id_output
                obj.scale = (self._object_scale, self._object_scale, self._object_scale)

                print("len(obj.material_slots)", len(obj.material_slots))
                cc_asset_names = selected_obj_info['cc_asset_names']
                for j in range(len(obj.material_slots)):
                    if j >= len(cc_asset_names):
                        break
                    mat = obj.material_slots[j]
                    mat_folder_name = cc_asset_names[j]
                    cc_mat = self._load_cc_mat(mat_folder_name)
                    mat.material = cc_mat

                # Remap the object uv coordinates
                bpy.context.view_layer.objects.active = obj
                bpy.ops.object.editmode_toggle()
                bpy.ops.uv.sphere_project()
                bpy.ops.object.editmode_toggle()

                #  # Save the object meshes to .obj file if not already done so. 
                # save_obj_path = os.path.join(self._output_dir, "objects", "%s.obj" % i)
                # if not os.path.exists(os.path.dirname(save_obj_path)):
                #     if not os.path.exists(os.path.dirname(save_obj_path)):
                #         os.makedirs(os.path.dirname(save_obj_path))
                # bpy.ops.export_scene.obj(filepath=save_obj_path, use_selection=True)

                obj_diameter = (obj.dimensions[0]**2 + obj.dimensions[1]**2 + obj.dimensions[2]**2) ** (0.5)
                GlobalStorage.set("obj_diamater", obj_diameter)
                
            self._set_properties(loaded_obj)
Exemplo n.º 12
0
    def run(self):
        label_mapping = LabelIdMapping.from_csv(self.mapping_file)
        # Add label mapping to global storage, s.t. it could be used for naming semantic segmentations.
        GlobalStorage.set("label_mapping", label_mapping)

        loaded_objects = Front3DLoader.load(
            json_path=self.config.get_string("json_path"),
            future_model_path=self.config.get_string("3D_future_model_path"),
            front_3D_texture_path=self.config.get_string(
                "3D_front_texture_path"),
            label_mapping=label_mapping,
            ceiling_light_strength=self.config.get_float(
                "ceiling_light_strength", 0.8),
            lamp_light_strength=self.config.get_float("lamp_light_strength",
                                                      7.0))
        self._set_properties(loaded_objects)
Exemplo n.º 13
0
    def _get_value(self,
                   name,
                   block=None,
                   allow_invoke_provider=False,
                   global_check=True):
        """ Returns the value of the parameter with the given name inside the given block.

        Basically just a recursive dict lookup, making sure the parameter exists, otherwise an error is thrown.

        :param name: The name of the parameter. "/" can be used to represent nested parameters (e.q. "render/iterations" results in ["render"]["iterations]
        :param block: A dict containing the configuration. If none, the whole data of this config object will be used.
        :param allow_invoke_provider: If set to True, then a provider is automatically invoked if the parameter value is a dict.
        :return: The value of the parameter.
        """
        if block is None:
            block = self.data

        if "/" in name:
            delimiter_pos = name.find("/")
            block_name = name[:delimiter_pos]
            if block_name in block and type(block[block_name]) is dict:
                return self._get_value(name[delimiter_pos + 1:],
                                       block[block_name],
                                       allow_invoke_provider)
            else:
                raise NotFoundError("No such configuration block '" +
                                    block_name + "'!")
        else:
            if name in block:

                # Check for whether a provider should be invoked
                if allow_invoke_provider and type(block[name]) is dict:
                    block[
                        name] = Utility.Utility.build_provider_based_on_config(
                            block[name])

                # If the parameter is set to a provider object, call the provider to return the parameter value
                if isinstance(block[name], Provider):
                    return block[name].run()
                else:
                    return block[name]
            elif global_check and GlobalStorage.has_param(name):
                # this might also throw an NotFoundError
                return GlobalStorage.get_global_config()._get_value(
                    name, None, allow_invoke_provider, global_check=False)
            else:
                raise NotFoundError("No such configuration '" + name + "'!")
Exemplo n.º 14
0
    def run(self):
        """
        Run the module, loads all the objects and set the properties correctly (including the category_id)
        """
        label_mapping = LabelIdMapping.from_csv(
            Utility.resolve_path(
                os.path.join('resources', 'id_mappings', 'nyu_idset.csv')))
        # Add label mapping to global storage, s.t. it could be used for naming semantic segmentations.
        GlobalStorage.set("label_mapping", label_mapping)
        # load the objects (Use use_image_search=False as some image names have a "/" prefix which will lead to blender search the whole root directory recursively!
        loaded_objects = SceneNetLoader.load(
            file_path=self._file_path,
            texture_folder=self._texture_folder,
            label_mapping=label_mapping,
            unknown_texture_folder=self._unknown_texture_folder)

        # add custom properties
        self._set_properties(loaded_objects)
Exemplo n.º 15
0
    def _write_depth_to_file(self):
        """ Configures the renderer, s.t. the z-values computed for the next rendering are directly written to file. """

        # Mist settings
        depth_start = self.config.get_float("depth_start", 0.1)
        depth_range = self.config.get_float("depth_range", 25.0)
        GlobalStorage.add("renderer_depth_end", depth_start + depth_range)
        bpy.context.scene.world.mist_settings.start = depth_start
        bpy.context.scene.world.mist_settings.depth = depth_range
        bpy.context.scene.world.mist_settings.falloff = self.config.get_string(
            "depth_falloff", "LINEAR")

        bpy.context.scene.render.use_compositing = True
        bpy.context.scene.use_nodes = True
        bpy.context.view_layer.use_pass_mist = True  # Enable depth pass

        tree = bpy.context.scene.node_tree
        links = tree.links

        # Use existing render layer
        render_layer_node = tree.nodes.get('Render Layers')
        # Create a mapper node to map from 0-1 to SI units
        mapper_node = tree.nodes.new("CompositorNodeMapRange")

        links.new(render_layer_node.outputs["Mist"],
                  mapper_node.inputs['Value'])
        # map the values 0-1 to range depth_start to depth_range
        mapper_node.inputs['To Min'].default_value = depth_start
        mapper_node.inputs['To Max'].default_value = depth_start + depth_range

        output_file = tree.nodes.new("CompositorNodeOutputFile")
        output_file.base_path = self._determine_output_dir()
        output_file.format.file_format = "OPEN_EXR"
        output_file.file_slots.values()[0].path = self.config.get_string(
            "depth_output_file_prefix", "depth_")

        # Feed the Mist output of the render layer to the input of the file IO layer
        links.new(mapper_node.outputs['Value'], output_file.inputs['Image'])
Exemplo n.º 16
0
    def add_output_entry(output):
        """ Registers the given output in the scene's custom properties

        :param output: A dict containing key and path of the new output type.
        """
        if GlobalStorage.is_in_storage("output"):
            if not Utility.output_already_registered(output, GlobalStorage.get("output")): # E.g. multiple camera samplers
                GlobalStorage.get("output").append(output)
        else:
            GlobalStorage.set("output", [output])
Exemplo n.º 17
0
    def run(self):
        """
        :return: Sampled value. Type: Mathutils Vector
        """

        # Radius of the sphere.
        radius = self.config.get_float(
            "radius", GlobalStorage.get("obj_diamater")) * 1.25

        position = mathutils.Vector()
        current = self._count // self._repeat
        # print("UniformSphere current:", current)
        for i in range(3):
            position[i] = self._total_points[current,
                                             i] * radius + self._center[i]
        self._count += 1

        return position
Exemplo n.º 18
0
    def run(self):
        if self._avoid_rendering:
            print("Avoid rendering is on, no output produced!")
            return

        if self.config.get_bool("append_to_existing_output", False):
            frame_offset = 0
            # Look for hdf5 file with highest index
            for path in os.listdir(self._determine_output_dir(False)):
                if path.endswith(".hdf5"):
                    index = path[:-len(".hdf5")]
                    if index.isdigit():
                        frame_offset = max(frame_offset, int(index) + 1)
        else:
            frame_offset = 0

        # Go through all frames
        for frame in range(bpy.context.scene.frame_start,
                           bpy.context.scene.frame_end):

            # Create output hdf5 file
            hdf5_path = os.path.join(self._determine_output_dir(False),
                                     str(frame + frame_offset) + ".hdf5")
            with h5py.File(hdf5_path, "w") as f:

                if not GlobalStorage.is_in_storage("output"):
                    print("No output was designed in prior models!")
                    return
                # Go through all the output types
                print("Merging data for frame " + str(frame) + " into " +
                      hdf5_path)

                for output_type in GlobalStorage.get("output"):
                    # Build path (path attribute is format string)
                    file_path = output_type["path"]
                    if '%' in file_path:
                        file_path = file_path % frame

                    # Check if file exists
                    if not os.path.exists(file_path):
                        # If not try stereo suffixes
                        path_l, path_r = self._get_stereo_path_pair(file_path)
                        if not os.path.exists(path_l) or not os.path.exists(
                                path_r):
                            raise Exception("File not found: " + file_path)
                        else:
                            use_stereo = True
                    else:
                        use_stereo = False

                    if use_stereo:
                        path_l, path_r = self._get_stereo_path_pair(file_path)

                        img_l, new_key, new_version = self._load_and_postprocess(
                            path_l, output_type["key"], output_type["version"])
                        img_r, new_key, new_version = self._load_and_postprocess(
                            path_r, output_type["key"], output_type["version"])

                        if self.config.get_bool("stereo_separate_keys", False):
                            self._write_to_hdf_file(f, new_key + "_0", img_l)
                            self._write_to_hdf_file(f, new_key + "_1", img_r)
                        else:
                            data = np.array([img_l, img_r])
                            self._write_to_hdf_file(f, new_key, data)

                    else:
                        data, new_key, new_version = self._load_and_postprocess(
                            file_path, output_type["key"],
                            output_type["version"])

                        self._write_to_hdf_file(f, new_key, data)

                    self._write_to_hdf_file(f, new_key + "_version",
                                            np.string_([new_version]))
Exemplo n.º 19
0
    def run(self):
        """ Does the stereo global matching in the following steps:
        1. Collect camera object and its state,
        2. For each frame, load left and right images and call the `sgm()` methode.
        3. Write the results to a numpy file.
        """
        if self._avoid_output:
            print("Avoid output is on, no output produced!")
            return

        if GlobalStorage.is_in_storage("renderer_distance_end"):
            self.depth_max = GlobalStorage.get("renderer_distance_end")
        else:
            raise RuntimeError(
                "A distance rendering has to be executed before this module is executed, "
                "else the `renderer_distance_end` is not set!")

        self.rgb_output_path = Utility.find_registered_output_by_key(
            self.rgb_output_key)["path"]

        # Collect camera and camera object
        cam_ob = bpy.context.scene.camera
        cam = cam_ob.data

        self.width = bpy.context.scene.render.resolution_x
        self.height = bpy.context.scene.render.resolution_y
        print('Resolution: {}, {}'.format(self.width, self.height))

        self.baseline = cam.stereo.interocular_distance
        if not self.baseline:
            raise Exception(
                "Stereo parameters are not set. Make sure to enable RGB stereo rendering before this module."
            )

        if self.config.get_bool("infer_focal_length_from_fov", False):
            fov = cam.angle_x if cam.angle_x else cam.angle
            if not fov:
                raise Exception("Could not obtain field of view angle")
            self.focal_length = float(
                (1.0 / tan(fov / 2.0)) * (float(self.width) / 2.0))
        else:
            self.focal_length = self.config.get_float("focal_length", 0.0)
            if self.focal_length == 0.0:
                raise Exception(
                    "Focal length set to 0. This is either intentional or because no value was set by the user. Either way, this needs to be corrected by setting a value > 0 or enabling 'infer_focal_length_from_fov'."
                )

        for frame in range(bpy.context.scene.frame_start,
                           bpy.context.scene.frame_end):
            path_split = self.rgb_output_path.split(".")
            path_l = "{}_L.{}".format(path_split[0], path_split[1])
            path_r = "{}_R.{}".format(path_split[0], path_split[1])

            imgL = load_image(path_l % frame)
            imgR = load_image(path_r % frame)

            depth, disparity = self.sgm(imgL, imgR)

            np.save(
                os.path.join(self.output_dir, "stereo-depth_%04d") % frame,
                depth)

            if self.config.get_bool("output_disparity", False):
                np.save(
                    os.path.join(self.output_dir, "disparity_%04d") % frame,
                    disparity)
        Utility.register_output(self._determine_output_dir(), "stereo-depth_",
                                "stereo-depth", ".npy", "1.0.0")
        if self.config.get_bool("output_disparity", False):
            Utility.register_output(self._determine_output_dir(), "disparity_",
                                    "disparity", ".npy", "1.0.0")
Exemplo n.º 20
0
    def enable_distance_output(output_dir,
                               file_prefix="distance_",
                               output_key="distance",
                               use_mist_as_distance=True,
                               distance_start=0.1,
                               distance_range=25.0,
                               distance_falloff="LINEAR"):
        """ Enables writing distance images.

        Distance images will be written in the form of .exr files during the next rendering.

        :param output_dir: The directory to write files to.
        :param file_prefix: The prefix to use for writing the files.
        :param output_key: The key to use for registering the distance output.
        :param use_mist_as_distance: If true, the distance is sampled over several iterations, useful for motion blur or soft edges, if this is turned off, only one sample is taken to determine the depth. Default: True.
        :param distance_start: Starting distance of the distance, measured from the camera.
        :param distance_range: Total distance in which the distance is measured. distance_end = distance_start + distance_range.
        :param distance_falloff: Type of transition used to fade distance. Available: [LINEAR, QUADRATIC, INVERSE_QUADRATIC]
        """
        bpy.context.scene.render.use_compositing = True
        bpy.context.scene.use_nodes = True
        GlobalStorage.add("renderer_distance_end",
                          distance_start + distance_range)

        tree = bpy.context.scene.node_tree
        links = tree.links
        # Use existing render layer
        render_layer_node = Utility.get_the_one_node_with_type(
            tree.nodes, 'CompositorNodeRLayers')

        # use either mist rendering or the z-buffer
        # mists uses an interpolation during the sample per pixel
        # while the z buffer only returns the closest object per pixel
        if use_mist_as_distance:
            bpy.context.scene.world.mist_settings.start = distance_start
            bpy.context.scene.world.mist_settings.depth = distance_range
            bpy.context.scene.world.mist_settings.falloff = distance_falloff

            bpy.context.view_layer.use_pass_mist = True  # Enable distance pass
            # Create a mapper node to map from 0-1 to SI units
            mapper_node = tree.nodes.new("CompositorNodeMapRange")
            links.new(render_layer_node.outputs["Mist"],
                      mapper_node.inputs['Value'])
            # map the values 0-1 to range distance_start to distance_range
            mapper_node.inputs['To Min'].default_value = distance_start
            mapper_node.inputs[
                'To Max'].default_value = distance_start + distance_range
            final_output = mapper_node.outputs['Value']
        else:
            bpy.context.view_layer.use_pass_z = True
            # add min and max nodes to perform the clipping to the desired range
            min_node = tree.nodes.new("CompositorNodeMath")
            min_node.operation = "MINIMUM"
            min_node.inputs[1].default_value = distance_start + distance_range
            links.new(render_layer_node.outputs["Depth"], min_node.inputs[0])
            max_node = tree.nodes.new("CompositorNodeMath")
            max_node.operation = "MAXIMUM"
            max_node.inputs[1].default_value = distance_start
            links.new(min_node.outputs["Value"], max_node.inputs[0])
            final_output = max_node.outputs["Value"]

        output_file = tree.nodes.new("CompositorNodeOutputFile")
        output_file.base_path = output_dir
        output_file.format.file_format = "OPEN_EXR"
        output_file.file_slots.values()[0].path = file_prefix

        # Feed the Z-Buffer or Mist output of the render layer to the input of the file IO layer
        links.new(final_output, output_file.inputs['Image'])

        Utility.add_output_entry({
            "key":
            output_key,
            "path":
            os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
            "version":
            "2.0.0"
        })
Exemplo n.º 21
0
    def run(self):
        """ Load BOP data """

        datasets_path = os.path.dirname(self.bop_dataset_path)
        dataset = os.path.basename(self.bop_dataset_path)

        print("bob: {}, dataset_path: {}".format(self.bop_dataset_path,
                                                 datasets_path))
        print("dataset: {}".format(dataset))

        try:
            from bop_toolkit_lib import dataset_params, inout
        except ImportError as error:
            print(
                'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!'
            )
            print('https://github.com/thodan/bop_toolkit')
            raise error

        model_p = dataset_params.get_model_params(
            datasets_path,
            dataset,
            model_type=self.model_type if self.model_type else None)
        cam_p = dataset_params.get_camera_params(
            datasets_path,
            dataset,
            cam_type=self.cam_type if self.cam_type else None)

        try:
            split_p = dataset_params.get_split_params(datasets_path,
                                                      dataset,
                                                      split=self.split)
        except ValueError:
            raise Exception(
                "Wrong path or {} split does not exist in {}.".format(
                    self.split, dataset))

        bpy.context.scene.world["category_id"] = 0
        bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
        bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

        loaded_objects = []

        # only load all/selected objects here, use other modules for setting poses
        # e.g. camera.CameraSampler / object.ObjectPoseSampler
        if self.scene_id == -1:

            # TLESS exception because images are cropped
            if self.bop_dataset_name in ['tless']:
                cam_p['K'][0, 2] = split_p['im_size'][0] / 2
                cam_p['K'][1, 2] = split_p['im_size'][1] / 2

            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'],
                                                       split_p['im_size'][0],
                                                       split_p['im_size'][1])

            obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids']
            # if sampling is enabled
            if self.sample_objects:
                loaded_ids = {}
                loaded_amount = 0
                if self.obj_instances_limit != -1 and len(
                        obj_ids
                ) * self.obj_instances_limit < self.num_of_objs_to_sample:
                    raise RuntimeError(
                        "{}'s {} split contains {} objects, {} object where requested to sample with "
                        "an instances limit of {}. Raise the limit amount or decrease the requested "
                        "amount of objects.".format(self.bop_dataset_path,
                                                    self.split, len(obj_ids),
                                                    self.num_of_objs_to_sample,
                                                    self.obj_instances_limit))
                while loaded_amount != self.num_of_objs_to_sample:
                    random_id = choice(obj_ids)
                    if random_id not in loaded_ids.keys():
                        loaded_ids.update({random_id: 0})
                    # if there is no limit or if there is one, but it is not reached for this particular object
                    if self.obj_instances_limit == -1 or loaded_ids[
                            random_id] < self.obj_instances_limit:
                        cur_obj = self._load_mesh(random_id,
                                                  model_p,
                                                  scale=self.scale)
                        loaded_ids[random_id] += 1
                        loaded_amount += 1
                        loaded_objects.append(cur_obj)
                    else:
                        print(
                            "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                            "being requested".format(
                                random_id, loaded_ids[random_id],
                                self.obj_instances_limit, loaded_amount,
                                self.num_of_objs_to_sample))
            else:
                for obj_id in obj_ids:
                    cur_obj = self._load_mesh(obj_id,
                                              model_p,
                                              scale=self.scale)
                    loaded_objects.append(cur_obj)
            self._set_properties(loaded_objects)

            if self._render_grid:
                # Record the object diameter for future use
                obj_diameter = (cur_obj.dimensions[0]**2 +
                                cur_obj.dimensions[1]**2 +
                                cur_obj.dimensions[2]**2)**(0.5)
                GlobalStorage.set("obj_diamater",
                                  obj_diameter * self.scale * 1.2)
                bpy.context.scene.world.light_settings.use_ambient_occlusion = True  # turn AO on

        # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
        else:
            sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(
                **{'scene_id': self.scene_id}))
            sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(
                **{'scene_id': self.scene_id}))
            for i, (cam_id, insts) in enumerate(sc_gt.items()):
                cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics(
                    sc_camera, cam_id, insts, self.scale)

                if i == 0:
                    # define world = first camera
                    cam_H_m2w_ref = cam_H_m2c_ref.copy()

                    cur_objs = []
                    # load scene objects and set their poses
                    for inst in insts:
                        cur_objs.append(
                            self._load_mesh(inst['obj_id'],
                                            model_p,
                                            scale=self.scale))
                        self.set_object_pose(cur_objs[-1], inst, self.scale)

                cam_H_c2w = self._compute_camera_to_world_trafo(
                    cam_H_m2w_ref, cam_H_m2c_ref)
                # set camera intrinsics
                CameraUtility.set_intrinsics_from_K_matrix(
                    cam_K, split_p['im_size'][0], split_p['im_size'][1])

                # set camera extrinsics as next frame
                frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

                # Add key frame for camera shift, as it changes from frame to frame in the tless replication
                cam = bpy.context.scene.camera.data
                cam.keyframe_insert(data_path='shift_x', frame=frame_id)
                cam.keyframe_insert(data_path='shift_y', frame=frame_id)

                # Copy object poses to key frame (to be sure)
                for cur_obj in cur_objs:
                    self._insert_key_frames(cur_obj, frame_id)