Exemplo n.º 1
0
    def _load_room(node: dict, metadata: dict, material_adjustments: list,
                   transform: Matrix, house_id: str, parent: MeshObject,
                   room_per_object: dict,
                   label_mapping: LabelIdMapping) -> List[MeshObject]:
        """ Load the room specified in the given node.

        :param node: The node dict which contains information from house.json..
        :param metadata: A dict of metadata which will be written into the object's custom data.
        :param material_adjustments: Adjustments to the materials which were specified inside house.json.
        :param transform: The transformation that should be applied to the loaded objects.
        :param house_id: The id of the current house.
        :param parent: The parent object to which the room should be linked
        :param room_per_object: A dict for object -> room lookup (Will be written into)
        :return: The list of loaded mesh objects.
        """
        # Build empty room object which acts as a parent for all objects inside
        room_obj = Entity.create_empty("Room#" + node["id"])
        room_obj.set_cp("type", "Room")
        room_obj.set_cp("bbox", SuncgLoader._correct_bbox_frame(node["bbox"]))
        room_obj.set_cp("roomTypes", node["roomTypes"])
        room_obj.set_parent(parent)
        loaded_objects = [room_obj]

        # Store indices of all contained objects in
        if "nodeIndices" in node:
            for child_id in node["nodeIndices"]:
                room_per_object[child_id] = room_obj

        if "hideFloor" not in node or node["hideFloor"] != 1:
            metadata["type"] = "Floor"
            metadata["category_id"] = label_mapping.id_from_label("floor")
            metadata["fine_grained_class"] = "floor"
            loaded_objects += SuncgLoader._load_obj(
                os.path.join(SuncgLoader._suncg_dir, "room", house_id,
                             node["modelId"] + "f.obj"), metadata,
                material_adjustments, transform, room_obj)

        if "hideCeiling" not in node or node["hideCeiling"] != 1:
            metadata["type"] = "Ceiling"
            metadata["category_id"] = label_mapping.id_from_label("ceiling")
            metadata["fine_grained_class"] = "ceiling"
            loaded_objects += SuncgLoader._load_obj(
                os.path.join(SuncgLoader._suncg_dir, "room", house_id,
                             node["modelId"] + "c.obj"), metadata,
                material_adjustments, transform, room_obj)

        if "hideWalls" not in node or node["hideWalls"] != 1:
            metadata["type"] = "Wall"
            metadata["category_id"] = label_mapping.id_from_label("wall")
            metadata["fine_grained_class"] = "wall"
            loaded_objects += SuncgLoader._load_obj(
                os.path.join(SuncgLoader._suncg_dir, "room", house_id,
                             node["modelId"] + "w.obj"), metadata,
                material_adjustments, transform, room_obj)

        return loaded_objects
Exemplo n.º 2
0
    def run(self):
        """ Processes defined conditions and compiles a list of objects.

        :return: List of objects that met the conditional requirement. Type: list.
        """
        conditions = self.config.get_raw_dict('conditions')

        # the list of conditions is treated as or condition
        if not isinstance(conditions, list):
            conditions = [conditions]

        all_objects = EntityUtility.convert_to_entities(
            bpy.context.scene.objects)
        filtered_objects = []
        # each single condition is treated as and condition
        for and_condition in conditions:
            new_filtered_objects = self.perform_and_condition_check(
                and_condition, all_objects)
            # Add objects to the total list, if they are not already present there
            filtered_objects.extend([
                obj for obj in new_filtered_objects
                if obj not in filtered_objects
            ])

        random_samples = self.config.get_int("random_samples", 0)
        has_index = self.config.has_param("index")

        if has_index and random_samples:
            raise RuntimeError(
                "Please, define only one of two: `index` or `random_samples`.")
        elif has_index:
            filtered_objects = [filtered_objects[self.config.get_int("index")]]
        elif random_samples:
            filtered_objects = sample(filtered_objects,
                                      k=min(random_samples,
                                            len(filtered_objects)))

        check_if_return_is_empty = self.config.get_bool("check_empty", False)
        if check_if_return_is_empty and not filtered_objects:
            raise Exception(
                f"There were no objects selected with the following "
                f"condition: \n{self._get_conditions_as_string()}")

        # Map back to blender objects for now (TODO: Remove in the future)
        filtered_objects = [
            filtered_object.blender_obj for filtered_object in filtered_objects
        ]

        return filtered_objects
Exemplo n.º 3
0
    def run(self):
        """ Adds specified basic empty objects to the scene and sets at least their names to the user-defined ones.
            1. Get configuration parameters' values.
            2. Add an object.
            3. Set attribute values.
        """
        empties_to_add = self.config.get_list("empties_to_add")
        for empty in empties_to_add:
            empty_conf = Config(empty)
            obj_name = empty_conf.get_string("name")
            obj_type = empty_conf.get_string("type", "plain_axes")

            entity = Entity.create_empty(obj_name, obj_type)
            entity.set_location(empty_conf.get_vector3d("location", [0, 0, 0]))
            entity.set_rotation_euler(
                empty_conf.get_vector3d("rotation", [0, 0, 0]))
            entity.set_scale(empty_conf.get_vector3d("scale", [1, 1, 1]))
Exemplo n.º 4
0
    def merge_object_list(objects: [Entity],
                          merged_object_name: str = 'merged_object'):
        """ Generates an empty object and sets this as parent object for all objects in the list which do not already have a parent set.

        :param objects: A list of objects to be merged.
        :param merged_object_name: The name of the parent object.
        """
        assert merged_object_name != "", "Parent object name cannot be empty!"
        print('name', merged_object_name)

        # create new empty object which acts as parent, and link it to the collection
        parent_obj = Entity.create_empty(merged_object_name)

        # select all relevant objects
        for obj in objects:
            # objects with a parent will be skipped, as this relationship will otherwise be broken
            # if a parent exists this object should be grandchild of parent_obj (or grandgrand...)
            if obj.get_parent() is not None:
                continue
            # if the object doesn't have a parent we can set its parent
            obj.set_parent(parent_obj)

        return parent_obj
Exemplo n.º 5
0
    def load(path: str,
             obj_types: Union[list, str] = ["mesh", "empty"],
             name_regrex: str = None,
             data_blocks: Union[list, str] = "objects") -> List[MeshObject]:
        """
        Loads entities (everything that can be stored in a .blend file's folders, see Blender's documentation for
        bpy.types.ID for more info) that match a name pattern from a specified .blend file's section/datablock.

        :param path: Path to a .blend file.
        :param obj_types: The type of objects to load. This parameter is only relevant when `data_blocks` is set to `"objects"`.
                          Available options are: ['mesh', 'curve', 'hair', 'armature', 'empty', 'light', 'camera']
        :param name_regrex: Regular expression representing a name pattern of entities' (everything that can be stored in a .blend
                         file's folders, see Blender's documentation for bpy.types.ID for more info) names.
        :param data_blocks: The datablock or a list of datablocks which should be loaded from the given .blend file.
                            Available options are: ['armatures', 'cameras', 'curves', 'hairs', 'images', 'lights', 'materials', 'meshes', 'objects', 'textures']
        :return: The list of loaded mesh objects.
        """
        # get a path to a .blend file
        path = Utility.resolve_path(path)
        data_blocks = BlendLoader._validate_and_standardizes_configured_list(
            data_blocks, BlendLoader.valid_datablocks, "data block")
        obj_types = BlendLoader._validate_and_standardizes_configured_list(
            obj_types, BlendLoader.valid_object_types, "object type")

        # Remember which orphans existed beforehand
        orphans_before = collect_all_orphan_datablocks()

        # Start importing blend file. All objects that should be imported need to be copied from "data_from" to "data_to"
        with bpy.data.libraries.load(path) as (data_from, data_to):
            for data_block in data_blocks:
                # Verify that the given data block is valid
                if hasattr(data_from, data_block):
                    # Find all entities of this data block that match the specified pattern
                    data_to_entities = []
                    for entity_name in getattr(data_from, data_block):
                        if not name_regrex or re.fullmatch(
                                name_regrex, entity_name) is not None:
                            data_to_entities.append(entity_name)
                    # Import them
                    setattr(data_to, data_block, data_to_entities)
                    print("Imported " + str(len(data_to_entities)) + " " +
                          data_block)
                else:
                    raise Exception("No such data block: " + data_block)

        # Go over all imported objects again
        loaded_objects = []
        for data_block in data_blocks:
            # Some adjustments that only affect objects
            if data_block == "objects":
                for obj in getattr(data_to, data_block):
                    # Check that the object type is desired
                    if obj.type.lower() in obj_types:
                        # Link objects to the scene
                        bpy.context.collection.objects.link(obj)
                        if obj.type == 'MESH':
                            loaded_objects.append(MeshObject(obj))
                        elif obj.type == 'LIGHT':
                            loaded_objects.append(Light(obj))
                        else:
                            loaded_objects.append(Entity(obj))

                        # If a camera was imported
                        if obj.type == 'CAMERA':
                            # Make it the active camera in the scene
                            bpy.context.scene.camera = obj

                            # Find the maximum frame number of its key frames
                            max_keyframe = -1
                            if obj.animation_data is not None:
                                fcurves = obj.animation_data.action.fcurves
                                for curve in fcurves:
                                    keyframe_points = curve.keyframe_points
                                    for keyframe in keyframe_points:
                                        max_keyframe = max(
                                            max_keyframe, keyframe.co[0])

                            # Set frame_end to the next free keyframe
                            bpy.context.scene.frame_end = max_keyframe + 1
                    else:
                        # Remove object again if its type is not desired
                        bpy.data.objects.remove(obj, do_unlink=True)
                print("Selected " + str(len(loaded_objects)) +
                      " of the loaded objects by type")
            else:
                loaded_objects.extend(getattr(data_to, data_block))

        # As some loaded objects were deleted again due to their type, we need also to remove the dependent datablocks that were also loaded and are now orphans
        BlendLoader._purge_added_orphans(orphans_before, data_to)
        return loaded_objects
Exemplo n.º 6
0
    def construct(used_floor_area: float,
                  interior_objects: [MeshObject],
                  materials: [Material],
                  amount_of_extrusions: int = 0,
                  fac_from_square_room: float = 0.3,
                  corridor_width: float = 0.9,
                  wall_height: float = 2.5,
                  amount_of_floor_cuts: int = 2,
                  only_use_big_edges: bool = True,
                  create_ceiling: bool = True,
                  assign_material_to_ceiling: bool = False,
                  placement_tries_per_face: int = 3,
                  amount_of_objects_per_sq_meter: float = 3.0):
        # internally the first basic rectangular is counted as one
        amount_of_extrusions += 1

        bvh_cache_for_intersection = {}
        placed_objects = []

        # construct a random room
        floor_obj, wall_obj, ceiling_obj = RandomRoomConstructor.construct_random_room(
            used_floor_area, amount_of_extrusions, fac_from_square_room,
            corridor_width, wall_height, amount_of_floor_cuts,
            only_use_big_edges, create_ceiling)
        placed_objects.append(wall_obj)
        if ceiling_obj is not None:
            placed_objects.append(ceiling_obj)

        # assign materials to all existing objects
        RandomRoomConstructor.assign_materials_to_floor_wall_ceiling(
            floor_obj, wall_obj, ceiling_obj, assign_material_to_ceiling,
            materials)

        # get all floor faces and save their size and bounding box for the round robin
        floor_obj.edit_mode()
        bm = floor_obj.mesh_as_bmesh()
        bm.faces.ensure_lookup_table()

        list_of_face_sizes = []
        list_of_face_bb = []
        for face in bm.faces:
            list_of_face_sizes.append(face.calc_area())
            list_of_verts = [v.co for v in face.verts]
            bb_min_point, bb_max_point = np.min(list_of_verts,
                                                axis=0), np.max(list_of_verts,
                                                                axis=0)
            list_of_face_bb.append((bb_min_point, bb_max_point))
        floor_obj.update_from_bmesh(bm)
        floor_obj.object_mode()
        bpy.ops.object.select_all(action='DESELECT')
        total_face_size = sum(list_of_face_sizes)

        # sort them after size
        interior_objects.sort(key=lambda obj: obj.get_bound_box_volume())
        interior_objects.reverse()

        list_of_deleted_objects = []

        step_size = 1.0 / amount_of_objects_per_sq_meter * float(
            len(interior_objects))
        current_step_size_counter = random.uniform(-step_size, step_size)
        for selected_obj in interior_objects:
            current_obj = selected_obj
            is_duplicated = False

            # if the step size is bigger than the room size, certain objects need to be skipped
            if step_size > total_face_size:
                current_step_size_counter += total_face_size
                if current_step_size_counter > step_size:
                    current_step_size_counter = random.uniform(
                        -step_size, step_size)
                    continue

            # walk over all faces in a round robin fashion
            total_acc_size = 0
            # select a random start point
            current_i = random.randrange(len(list_of_face_sizes))
            current_accumulated_face_size = random.uniform(0, step_size + 1e-7)
            # check if the accumulation of all visited faces is bigger than the sum of all of them
            while total_acc_size < total_face_size:
                face_size = list_of_face_sizes[current_i]
                face_bb = list_of_face_bb[current_i]
                if face_size < step_size:
                    # face size is bigger than one step
                    current_accumulated_face_size += face_size
                    if current_accumulated_face_size > step_size:
                        for _ in range(placement_tries_per_face):
                            found_spot = RandomRoomConstructor.sample_new_object_poses_on_face(
                                current_obj, face_bb,
                                bvh_cache_for_intersection, placed_objects,
                                wall_obj)
                            if found_spot:
                                placed_objects.append(current_obj)
                                current_obj = current_obj.duplicate()
                                is_duplicated = True
                                break
                        current_accumulated_face_size -= step_size
                else:
                    # face size is bigger than one step
                    amount_of_steps = int(
                        (face_size + current_accumulated_face_size) /
                        step_size)
                    for i in range(amount_of_steps):
                        for _ in range(placement_tries_per_face):
                            found_spot = RandomRoomConstructor.sample_new_object_poses_on_face(
                                current_obj, face_bb,
                                bvh_cache_for_intersection, placed_objects,
                                wall_obj)
                            if found_spot:
                                placed_objects.append(current_obj)
                                current_obj = current_obj.duplicate()
                                is_duplicated = True
                                break
                    # left over value is used in next round
                    current_accumulated_face_size = face_size - (
                        amount_of_steps * step_size)
                current_i = (current_i + 1) % len(list_of_face_sizes)
                total_acc_size += face_size

            # remove current obj from the bvh cache
            if current_obj.get_name() in bvh_cache_for_intersection:
                del bvh_cache_for_intersection[current_obj.get_name()]
            # if there was no collision save the object in the placed list
            if is_duplicated:
                # delete the duplicated object
                list_of_deleted_objects.append(current_obj)

        # Add the loaded objects, which couldn't be placed
        list_of_deleted_objects.extend(
            [obj for obj in interior_objects if obj not in placed_objects])
        # Delete them all
        Entity.delete_multiple(list_of_deleted_objects)

        if floor_obj is not None:
            placed_objects.append(floor_obj)
        return placed_objects
Exemplo n.º 7
0
    def load(house_path: str,
             label_mapping: LabelIdMapping,
             suncg_dir: str = None) -> List[MeshObject]:
        """ Loads a house.json file into blender.

        - Loads all objects files specified in the house.json file.
        - Orders them hierarchically (level -> room -> object)
        - Writes metadata into the custom properties of each object

        :param house_path: The path to the house.json file which should be loaded.
        :param suncg_dir: The path to the suncg root directory which should be used for loading objects, rooms, textures etc.
        :return: The list of loaded mesh objects.
        """
        # If not suncg root directory has been given, determine it via the given house directory.
        if suncg_dir is None:
            suncg_dir = os.path.join(os.path.dirname(house_path), "../..")

        SuncgLoader._suncg_dir = suncg_dir
        SuncgLoader._collection_of_loaded_objs = {}
        # there are only two types of materials, textures and diffuse
        SuncgLoader._collection_of_loaded_mats = {"texture": {}, "diffuse": {}}

        with open(Utility.resolve_path(house_path), "r") as f:
            config = json.load(f)

        object_label_map, object_fine_grained_label_map, object_coarse_grained_label_map = SuncgLoader._read_model_category_mapping(
            os.path.join('resources', 'suncg', 'Better_labeling_for_NYU.csv'))

        house_id = config["id"]
        loaded_objects = []

        for level in config["levels"]:
            # Build empty level object which acts as a parent for all rooms on the level
            level_obj = Entity.create_empty("Level#" + level["id"])
            level_obj.set_cp("type", "Level")
            if "bbox" in level:
                level_obj.set_cp(
                    "bbox", SuncgLoader._correct_bbox_frame(level["bbox"]))
            else:
                print(
                    "Warning: The level with id " + level["id"] +
                    " is missing the bounding box attribute in the given house.json file!"
                )
            loaded_objects.append(level_obj)

            room_per_object = {}

            for node in level["nodes"]:
                # Skip invalid nodes (This is the same behavior as in the SUNCG Toolbox)
                if "valid" in node and node["valid"] == 0:
                    continue

                # Metadata is directly stored in the objects custom data
                metadata = {"type": node["type"], "is_suncg": True}

                if "modelId" in node:
                    metadata["modelId"] = node["modelId"]

                    if node["modelId"] in object_fine_grained_label_map:
                        metadata[
                            "fine_grained_class"] = object_fine_grained_label_map[
                                node["modelId"]]
                        metadata[
                            "coarse_grained_class"] = object_coarse_grained_label_map[
                                node["modelId"]]
                        metadata["category_id"] = label_mapping.id_from_label(
                            object_label_map[node["modelId"]])

                if "bbox" in node:
                    metadata["bbox"] = SuncgLoader._correct_bbox_frame(
                        node["bbox"])

                if "transform" in node:
                    transform = Matrix([
                        node["transform"][i * 4:(i + 1) * 4] for i in range(4)
                    ])
                    # Transpose, as given transform matrix was col-wise, but blender expects row-wise
                    transform.transpose()
                else:
                    transform = None

                if "materials" in node:
                    material_adjustments = node["materials"]
                else:
                    material_adjustments = []

                # Lookup if the object belongs to a room
                object_id = int(node["id"].split("_")[-1])
                if object_id in room_per_object:
                    parent = room_per_object[object_id]
                else:
                    parent = level_obj

                if node["type"] == "Room":
                    loaded_objects += SuncgLoader._load_room(
                        node, metadata, material_adjustments, transform,
                        house_id, level_obj, room_per_object, label_mapping)
                elif node["type"] == "Ground":
                    loaded_objects += SuncgLoader._load_ground(
                        node, metadata, material_adjustments, transform,
                        house_id, parent, label_mapping)
                elif node["type"] == "Object":
                    loaded_objects += SuncgLoader._load_object(
                        node, metadata, material_adjustments, transform,
                        parent)
                elif node["type"] == "Box":
                    loaded_objects += SuncgLoader._load_box(
                        node, material_adjustments, transform, parent,
                        label_mapping)
        SuncgLoader._rename_materials()
        return loaded_objects