Ejemplo n.º 1
0
    def set_default_parameters():
        """ Loads and sets default parameters defined in DefaultConfig.py """
        # Set default intrinsics
        CameraUtility.set_intrinsics_from_blender_params(
            DefaultConfig.fov, DefaultConfig.resolution_x,
            DefaultConfig.resolution_y, DefaultConfig.clip_start,
            DefaultConfig.clip_end, DefaultConfig.pixel_aspect_x,
            DefaultConfig.pixel_aspect_y, DefaultConfig.shift_x,
            DefaultConfig.shift_y, "FOV")
        CameraUtility.set_stereo_parameters(
            DefaultConfig.stereo_convergence_mode,
            DefaultConfig.stereo_convergence_distance,
            DefaultConfig.stereo_interocular_distance)

        # Init renderer
        RendererUtility.init()
        RendererUtility.set_samples(DefaultConfig.samples)
        addon_utils.enable("render_auto_tile_size")
        RendererUtility.toggle_auto_tile_size(True)

        # Set number of cpu cores used for rendering (1 thread is always used for coordination => 1
        # cpu thread means GPU-only rendering)
        RendererUtility.set_cpu_threads(1)
        RendererUtility.set_denoiser(DefaultConfig.denoiser)

        RendererUtility.set_simplify_subdivision_render(
            DefaultConfig.simplify_subdivision_render)

        RendererUtility.set_light_bounces(
            DefaultConfig.diffuse_bounces, DefaultConfig.glossy_bounces,
            DefaultConfig.ao_bounces_render, DefaultConfig.max_bounces,
            DefaultConfig.transmission_bounces,
            DefaultConfig.transparency_bounces, DefaultConfig.volume_bounces)
Ejemplo n.º 2
0
    def _configure_renderer(self,
                            default_samples=256,
                            use_denoiser=False,
                            default_denoiser="Intel"):
        """
        Sets many different render parameters which can be adjusted via the config.

        :param default_samples: Default number of samples to render for each pixel
        :param use_denoiser: If true, a denoiser is used, only use this on color information
        :param default_denoiser: Either "Intel" or "Blender", "Intel" performs much better in most cases
        """
        RendererUtility.init()
        RendererUtility.set_samples(
            self.config.get_int("samples", default_samples))

        if self.config.has_param("use_adaptive_sampling"):
            RendererUtility.set_adaptive_sampling(
                self.config.get_float("use_adaptive_sampling"))

        if self.config.get_bool("auto_tile_size", True):
            RendererUtility.toggle_auto_tile_size(True)
        else:
            RendererUtility.toggle_auto_tile_size(False)
            RendererUtility.set_tile_size(self.config.get_int("tile_x"),
                                          self.config.get_int("tile_y"))

        # Set number of cpu cores used for rendering (1 thread is always used for coordination => 1
        # cpu thread means GPU-only rendering)
        RendererUtility.set_cpu_threads(self.config.get_int("cpu_threads", 1))

        print('Resolution: {}, {}'.format(
            bpy.context.scene.render.resolution_x,
            bpy.context.scene.render.resolution_y))

        RendererUtility.set_denoiser(None if not use_denoiser else self.config.
                                     get_string("denoiser", default_denoiser))

        RendererUtility.set_simplify_subdivision_render(
            self.config.get_int("simplify_subdivision_render", 3))

        RendererUtility.set_light_bounces(
            self.config.get_int("diffuse_bounces", 3),
            self.config.get_int("glossy_bounces", 0),
            self.config.get_int("ao_bounces_render", 3),
            self.config.get_int("max_bounces", 3),
            self.config.get_int("transmission_bounces", 0),
            self.config.get_int("transparency_bounces", 8),
            self.config.get_int("volume_bounces", 0))

        RendererUtility.toggle_stereo(self.config.get_bool("stereo", False))

        self._use_alpha_channel = self.config.get_bool('use_alpha', False)
Ejemplo n.º 3
0
    rotation = np.random.uniform([1.0, 0, 0], [1.4217, 0, 6.283185307])
    cam2world_matrix = MathUtility.build_transformation_mat(location, rotation)

    # Check that obstacles are at least 1 meter away from the camera and make sure the view interesting enough
    if CameraValidation.perform_obstacle_in_view_check(cam2world_matrix, {"min": 1.2}, bvh_tree) and \
            CameraValidation.scene_coverage_score(cam2world_matrix) > 0.4 and \
            floor.position_is_above_object(location):
        # Persist camera pose
        CameraUtility.add_camera_pose(cam2world_matrix)
        poses += 1
    tries += 1

# activate distance rendering
RendererUtility.enable_distance_output()
# set the amount of samples, which should be used for the color rendering
RendererUtility.set_samples(350)
RendererUtility.set_light_bounces(max_bounces=200,
                                  diffuse_bounces=200,
                                  glossy_bounces=200,
                                  transmission_bounces=200,
                                  transparent_max_bounces=200)
# render the whole pipeline
data = RendererUtility.render()

# post process the data and remove the redundant channels in the distance image
data["depth"] = PostProcessingUtility.dist2depth(data["distance"])
del data["distance"]

# write the data to a .hdf5 container
WriterUtility.save_to_hdf5(args.output_dir, data)
    def render(output_dir,
               temp_dir,
               get_forward_flow,
               get_backward_flow,
               blender_image_coordinate_style=False,
               forward_flow_output_file_prefix="forward_flow_",
               forward_flow_output_key="forward_flow",
               backward_flow_output_file_prefix="backward_flow_",
               backward_flow_output_key="backward_flow"):
        """ Renders the optical flow (forward and backward) for all frames.

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param get_forward_flow: Whether to render forward optical flow.
        :param get_backward_flow: Whether to render backward optical flow.
        :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False).
        :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file.
        :param forward_flow_output_key: The key which should be used for storing forward optical flow values.
        :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file.
        :param backward_flow_output_key: The key which should be used for storing backward optical flow values.
        """
        if get_forward_flow is False and get_backward_flow is False:
            raise Exception(
                "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!"
            )

        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            FlowRendererUtility._output_vector_field(get_forward_flow,
                                                     get_backward_flow,
                                                     output_dir)

            # only need to render once; both fwd and bwd flow will be saved
            temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_')
            temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_')
            RendererUtility.render(temp_dir, "bwd_flow_", None)

            # After rendering: convert to optical flow or calculate hsv visualization, if desired
            for frame in range(bpy.context.scene.frame_start,
                               bpy.context.scene.frame_end):
                # temporarily save respective vector fields
                if get_forward_flow:
                    file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr"
                    fwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        forward_flow_output_file_prefix) + '%04d' % frame
                    forward_flow = fwd_flow_field * -1  # invert forward flow to point at next frame
                    np.save(fname + '.npy', forward_flow[:, :, :2])

                if get_backward_flow:
                    file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr"
                    bwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        backward_flow_output_file_prefix) + '%04d' % frame
                    np.save(fname + '.npy', bwd_flow_field[:, :, :2])

        # register desired outputs
        if get_forward_flow:
            Utility.register_output(output_dir,
                                    forward_flow_output_file_prefix,
                                    forward_flow_output_key, '.npy', '2.0.0')
        if get_backward_flow:
            Utility.register_output(output_dir,
                                    backward_flow_output_file_prefix,
                                    backward_flow_output_key, '.npy', '2.0.0')
Ejemplo n.º 5
0
                    help="Path to where the data should be saved")
args = parser.parse_args()

if not os.path.exists(args.front) or not os.path.exists(args.future_folder):
    raise Exception("One of the two folders does not exist!")

Initializer.init()
mapping_file = Utility.resolve_path(
    os.path.join("resources", "front_3D", "3D_front_mapping.csv"))
mapping = LabelIdMapping.from_csv(mapping_file)

# set the light bounces
RendererUtility.set_light_bounces(diffuse_bounces=200,
                                  glossy_bounces=200,
                                  ao_bounces_render=200,
                                  max_bounces=200,
                                  transmission_bounces=200,
                                  transparent_max_bounces=200,
                                  volume_bounces=0)

# load the front 3D objects
loaded_objects = Front3DLoader.load(
    json_path=args.front,
    future_model_path=args.future_folder,
    front_3D_texture_path=args.front_3D_texture_path,
    label_mapping=mapping,
    ceiling_light_strength=0.8,
    lamp_light_strength=0.8)

# Init sampler for sampling locations inside the loaded front3D house
point_sampler = Front3DPointInRoomSampler(loaded_objects)
Ejemplo n.º 6
0
    def render(output_dir,
               temp_dir,
               used_attributes,
               used_default_values={},
               file_prefix="segmap_",
               output_key="segmap",
               segcolormap_output_file_prefix="class_inst_col_map",
               segcolormap_output_key="segcolormap",
               use_alpha_channel=False,
               render_colorspace_size_per_dimension=2048):
        """ Renders segmentation maps for all frames.

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param used_attributes: The attributes to be used for color mapping.
        :param used_default_values: The default values used for the keys used in used_attributes.
        :param file_prefix: The prefix to use for writing the images.
        :param output_key: The key to use for registering the output.
        :param segcolormap_output_file_prefix: The prefix to use for writing the segmation-color map csv.
        :param segcolormap_output_key: The key to use for registering the segmation-color map output.
        :param use_alpha_channel: If true, the alpha channel stored in .png textures is used.
        :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of integers which can be precisely stored is [-2048, 2048]. As blender does not allow negative values for colors, we use [0, 2048] ** 3 as our color space which allows ~8 billion different colors/objects. This should be enough.
        """
        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            # Get objects with meshes (i.e. not lights or cameras)
            objs_with_mats = get_all_blender_mesh_objects()

            colors, num_splits_per_dimension, used_objects = SegMapRendererUtility._colorize_objects_for_instance_segmentation(
                objs_with_mats, use_alpha_channel,
                render_colorspace_size_per_dimension)

            bpy.context.scene.cycles.filter_width = 0.0

            if use_alpha_channel:
                MaterialLoaderUtility.add_alpha_channel_to_textures(
                    blurry_edges=False)

            # Determine path for temporary and for final output
            temporary_segmentation_file_path = os.path.join(temp_dir, "seg_")
            final_segmentation_file_path = os.path.join(
                output_dir, file_prefix)

            RendererUtility.set_output_format("OPEN_EXR", 16)
            RendererUtility.render(temp_dir, "seg_", None)

            # Find optimal dtype of output based on max index
            for dtype in [np.uint8, np.uint16, np.uint32]:
                optimal_dtype = dtype
                if np.iinfo(optimal_dtype).max >= len(colors) - 1:
                    break

            if 'class' in used_default_values:
                used_default_values['cp_category_id'] = used_default_values[
                    'class']

            if isinstance(used_attributes, str):
                # only one result is requested
                result_channels = 1
                used_attributes = [used_attributes]
            elif isinstance(used_attributes, list):
                result_channels = len(used_attributes)
            else:
                raise Exception(
                    "The type of this is not supported here: {}".format(
                        used_attributes))

            save_in_csv_attributes = {}
            # define them for the avoid rendering case
            there_was_an_instance_rendering = False
            list_of_used_attributes = []

            # Check if stereo is enabled
            if bpy.context.scene.render.use_multiview:
                suffixes = ["_L", "_R"]
            else:
                suffixes = [""]

            # After rendering
            for frame in range(
                    bpy.context.scene.frame_start,
                    bpy.context.scene.frame_end):  # for each rendered frame
                for suffix in suffixes:
                    file_path = temporary_segmentation_file_path + (
                        "%04d" % frame) + suffix + ".exr"
                    segmentation = load_image(file_path)
                    print(file_path, segmentation.shape)

                    segmap = Utility.map_back_from_equally_spaced_equidistant_values(
                        segmentation, num_splits_per_dimension,
                        render_colorspace_size_per_dimension)
                    segmap = segmap.astype(optimal_dtype)

                    used_object_ids = np.unique(segmap)
                    max_id = np.max(used_object_ids)
                    if max_id >= len(used_objects):
                        raise Exception(
                            "There are more object colors than there are objects"
                        )
                    combined_result_map = []
                    there_was_an_instance_rendering = False
                    list_of_used_attributes = []
                    used_channels = []
                    for channel_id in range(result_channels):
                        resulting_map = np.empty(
                            (segmap.shape[0], segmap.shape[1]))
                        was_used = False
                        current_attribute = used_attributes[channel_id]
                        org_attribute = current_attribute

                        # if the class is used the category_id attribute is evaluated
                        if current_attribute == "class":
                            current_attribute = "cp_category_id"
                        # in the instance case the resulting ids are directly used
                        if current_attribute == "instance":
                            there_was_an_instance_rendering = True
                            resulting_map = segmap
                            was_used = True
                            # a non default value was also used
                            non_default_value_was_used = True
                        else:
                            if current_attribute != "cp_category_id":
                                list_of_used_attributes.append(
                                    current_attribute)
                            # for the current attribute remove cp_ and _csv, if present
                            used_attribute = current_attribute
                            if used_attribute.startswith("cp_"):
                                used_attribute = used_attribute[len("cp_"):]
                            # check if a default value was specified
                            default_value_set = False
                            if current_attribute in used_default_values or used_attribute in used_default_values:
                                default_value_set = True
                                if current_attribute in used_default_values:
                                    default_value = used_default_values[
                                        current_attribute]
                                elif used_attribute in used_default_values:
                                    default_value = used_default_values[
                                        used_attribute]
                            last_state_save_in_csv = None
                            # this avoids that for certain attributes only the default value is written
                            non_default_value_was_used = False
                            # iterate over all object ids
                            for object_id in used_object_ids:
                                is_default_value = False
                                # get the corresponding object via the id
                                current_obj = used_objects[object_id]
                                # if the current obj has a attribute with that name -> get it
                                if hasattr(current_obj, used_attribute):
                                    used_value = getattr(
                                        current_obj, used_attribute)
                                # if the current object has a custom property with that name -> get it
                                elif current_attribute.startswith(
                                        "cp_"
                                ) and used_attribute in current_obj:
                                    used_value = current_obj[used_attribute]
                                elif current_attribute.startswith("cf_"):
                                    if current_attribute == "cf_basename":
                                        used_value = current_obj.name
                                        if "." in used_value:
                                            used_value = used_value[:used_value
                                                                    .rfind("."
                                                                           )]
                                elif default_value_set:
                                    # if none of the above applies use the default value
                                    used_value = default_value
                                    is_default_value = True
                                else:
                                    # if the requested current_attribute is not a custom property or a attribute
                                    # or there is a default value stored
                                    # it throws an exception
                                    raise Exception(
                                        "The obj: {} does not have the "
                                        "attribute: {}, striped: {}. Maybe try a default "
                                        "value.".format(
                                            current_obj.name,
                                            current_attribute, used_attribute))

                                # check if the value should be saved as an image or in the csv file
                                save_in_csv = False
                                try:
                                    resulting_map[segmap ==
                                                  object_id] = used_value
                                    was_used = True
                                    if not is_default_value:
                                        non_default_value_was_used = True
                                    # save everything which is not instance also in the .csv
                                    if current_attribute != "instance":
                                        save_in_csv = True
                                except ValueError:
                                    save_in_csv = True

                                if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv:
                                    raise Exception(
                                        "During creating the mapping, the saving to an image or a csv file "
                                        "switched, this might indicated that the used default value, does "
                                        "not have the same type as the returned value, "
                                        "for: {}".format(current_attribute))
                                last_state_save_in_csv = save_in_csv
                                if save_in_csv:
                                    if object_id in save_in_csv_attributes:
                                        save_in_csv_attributes[object_id][
                                            used_attribute] = used_value
                                    else:
                                        save_in_csv_attributes[object_id] = {
                                            used_attribute: used_value
                                        }
                        if was_used and non_default_value_was_used:
                            used_channels.append(org_attribute)
                            combined_result_map.append(resulting_map)

                    fname = final_segmentation_file_path + ("%04d" %
                                                            frame) + suffix
                    # combine all resulting images to one image
                    resulting_map = np.stack(combined_result_map, axis=2)
                    # remove the unneeded third dimension
                    if resulting_map.shape[2] == 1:
                        resulting_map = resulting_map[:, :, 0]
                    np.save(fname, resulting_map)

            if not there_was_an_instance_rendering:
                if len(list_of_used_attributes) > 0:
                    raise Exception(
                        "There were attributes specified in the may_by, which could not be saved as "
                        "there was no \"instance\" may_by key used. This is true for this/these "
                        "keys: {}".format(", ".join(list_of_used_attributes)))
                # if there was no instance rendering no .csv file is generated!
                # delete all saved infos about .csv
                save_in_csv_attributes = {}

            # write color mappings to file
            if save_in_csv_attributes:
                csv_file_path = os.path.join(
                    output_dir, segcolormap_output_file_prefix + ".csv")
                with open(csv_file_path, 'w', newline='') as csvfile:
                    # get from the first element the used field names
                    fieldnames = ["idx"]
                    # get all used object element keys
                    for object_element in save_in_csv_attributes.values():
                        fieldnames.extend(list(object_element.keys()))
                        break
                    for channel_name in used_channels:
                        fieldnames.append("channel_{}".format(channel_name))
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    # save for each object all values in one row
                    for obj_idx, object_element in save_in_csv_attributes.items(
                    ):
                        object_element["idx"] = obj_idx
                        for i, channel_name in enumerate(used_channels):
                            object_element["channel_{}".format(
                                channel_name)] = i
                        writer.writerow(object_element)

        Utility.register_output(output_dir, file_prefix, output_key, ".npy",
                                "2.0.0")
        if save_in_csv_attributes:
            Utility.register_output(output_dir,
                                    segcolormap_output_file_prefix,
                                    segcolormap_output_key,
                                    ".csv",
                                    "2.0.0",
                                    unique_for_camposes=False)