def load(file_path: str, texture_folder: str, unknown_texture_folder: str = "unknown") -> List[MeshObject]: """ Loads all SceneNet objects at the given "file_path". The textures for each object are sampled based on the name of the object, if the name is not represented in the texture folder the unknown folder is used. This folder does not exists, after downloading the texture dataset. Make sure to create and put some textures, you want to use for these instances there. All objects get "category_id" set based on the data in the "resources/id_mappings/nyu_idset.csv" Each object will have the custom property "is_scene_net_obj". :param file_path: The path to the .obj file from SceneNet. :param texture_folder: The path to the texture folder used to sample the textures. :param unknown_texture_folder: The path to the textures, which are used if the the texture type is unknown. The default path does not exist if the dataset was just downloaded, it has to be created manually. :return: The list of loaded mesh objects. """ # load the objects (Use use_image_search=False as some image names have a "/" prefix which will lead to blender search the whole root directory recursively! loaded_objects = ObjectLoader.load(filepath=file_path, use_image_search=False) loaded_objects.sort(key=lambda ele: ele.get_name()) # sample materials for each object SceneNetLoader._random_sample_materials_for_each_obj( loaded_objects, texture_folder, unknown_texture_folder) # set the category ids for each object SceneNetLoader._set_category_ids(loaded_objects) for obj in loaded_objects: obj.set_cp("is_scene_net_obj", True) return loaded_objects
def _load_obj(path: str, metadata: dict, material_adjustments: list, transform: Matrix = None, parent: MeshObject = None) -> List[MeshObject]: """ Load the wavefront object file from the given path and adjust according to the given arguments. :param path: The path to the .obj file. :param metadata: A dict of metadata which will be written into the object's custom data. :param material_adjustments: Adjustments to the materials which were specified inside house.json. :param transform: The transformation that should be applied to the loaded objects. :param parent: The parent object to which the object should be linked :return: The list of loaded mesh objects. """ if not os.path.exists(path): print("Warning: " + path + " is missing") return [] else: object_already_loaded = path in SuncgLoader._collection_of_loaded_objs loaded_objects = ObjectLoader.load(filepath=path, cached_objects=SuncgLoader._collection_of_loaded_objs) if object_already_loaded: print("Duplicate object: {}".format(path)) for object in loaded_objects: # the original object matrix from the .obj loader -> is not an identity matrix object.set_local2world_mat(Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])) # remove all custom properties object.clear_all_cps() # Go through all imported objects for object in loaded_objects: for key in metadata.keys(): object.set_cp(key, metadata[key]) SuncgLoader._transform_and_colorize_object(object, material_adjustments, transform, parent) return loaded_objects
def load(used_category: str, data_path: str = 'resources/pix3d') -> List[MeshObject]: """ Loads one random Pix3D object from the given category. :param used_category: The category to use for example: 'bed', check the data_path/model folder for more categories. Available: ['bed', 'bookcase', 'chair', 'desk', 'misc', 'sofa', 'table', 'tool', 'wardrobe'] :param data_path: The path to the Pix3D folder. :return: The list of loaded mesh objects. """ data_path = Utility.resolve_path(data_path) files_with_fitting_category = Pix3DLoader.get_files_with_category( used_category, data_path) selected_obj = random.choice(files_with_fitting_category) loaded_obj = ObjectLoader.load(selected_obj) Pix3DLoader._correct_materials(loaded_obj) if "void" in LabelIdMapping.label_id_map: # Check if using an id map for obj in loaded_obj: obj.set_cp('category_id', LabelIdMapping.label_id_map["void"]) # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] for obj in loaded_obj: obj.remove_x_axis_rotation() # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` for obj in loaded_obj: obj.move_origin_to_bottom_mean_point() bpy.ops.object.select_all(action='DESELECT') return loaded_obj
def load(data_path: str, used_synset_id: str, used_source_id: str = "") -> List[MeshObject]: """ This loads an object from ShapeNet based on the given synset_id, which specifies the category of objects to use. From these objects one is randomly sampled and loaded. Finally it sets all objects to have a category_id corresponding to the void class, so it wouldn't trigger an exception in the SegMapRenderer. Todo: not good: Note: if this module is used with another loader that loads objects with semantic mapping, make sure the other module is loaded first in the config file. :param data_path: The path to the ShapeNetCore.v2 folder. :param used_synset_id: The synset id for example: '02691156', check the data_path folder for more ids. :param used_source_id: object identifier of the a particular ShapeNet category, see inside any ShapeNet category for identifiers :return: The list of loaded mesh objects. """ data_path = Utility.resolve_path(data_path) taxonomy_file_path = os.path.join(data_path, "taxonomy.json") files_with_fitting_synset = ShapeNetLoader._get_files_with_synset( used_synset_id, used_source_id, taxonomy_file_path, data_path) selected_obj = random.choice(files_with_fitting_synset) loaded_obj = ObjectLoader.load(selected_obj) for obj in loaded_obj: obj.set_cp("used_synset_id", used_synset_id) obj.set_cp("used_source_id", pathlib.PurePath(selected_obj).parts[-3]) ShapeNetLoader._correct_materials(loaded_obj) if "void" in LabelIdMapping.label_id_map: # Check if using an id map for obj in loaded_obj: obj.set_cp("category_id", LabelIdMapping.label_id_map["void"]) # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] for obj in loaded_obj: obj.persist_transformation_into_mesh(location=False, rotation=True, scale=False) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` for obj in loaded_obj: obj.move_origin_to_bottom_mean_point() bpy.ops.object.select_all(action='DESELECT') return loaded_obj
def run(self): if self.config.has_param('path') and self.config.has_param('paths'): raise Exception("Objectloader can not use path and paths in the same module!") if self.config.has_param('path'): file_path = Utility.resolve_path(self.config.get_string("path")) loaded_objects = ObjectLoader.load(filepath=file_path) elif self.config.has_param('paths'): file_paths = self.config.get_list('paths') loaded_objects = [] # the file paths are mapped here to object names cache_objects = {} for file_path in file_paths: resolved_file_path = Utility.resolve_path(file_path) current_objects = ObjectLoader.load(filepath=resolved_file_path, cached_objects=cache_objects) loaded_objects.extend(current_objects) else: raise Exception("Loader module needs either a path or paths config value") if not loaded_objects: raise Exception("No objects have been loaded here, check the config.") # Set the add_properties of all imported objects self._set_properties(loaded_objects)
def load(data_path: str, data_set_name: str, use_smooth_shading: bool = False) -> List[MeshObject]: """ Just imports the configured .ply file straight into blender for the replica case. :param data_path: The path to the data folder, where all rooms are saved. :param data_set_name: Name of the room (for example: apartment_0). :param use_smooth_shading: Enable smooth shading on all surfaces, instead of flat shading. :return: The list of loaded mesh objects. """ file_path = os.path.join(data_path, data_set_name, 'mesh.ply') loaded_objects = ObjectLoader.load(file_path) if use_smooth_shading: for obj in loaded_objects: obj.set_shading_mode(True) return loaded_objects
def load(data_path: str, used_synset_id: str, used_source_id: str = "", move_object_origin: bool = True) -> List[MeshObject]: """ This loads an object from ShapeNet based on the given synset_id, which specifies the category of objects to use. From these objects one is randomly sampled and loaded. Todo: not good: Note: if this module is used with another loader that loads objects with semantic mapping, make sure the other module is loaded first in the config file. :param data_path: The path to the ShapeNetCore.v2 folder. :param used_synset_id: The synset id for example: '02691156', check the data_path folder for more ids. :param used_source_id: Object identifier of the a particular ShapeNet category, see inside any ShapeNet category for identifiers :param move_object_origin: Moves the object center to the bottom of the bounding box in Z direction and also in the middle of the X and Y plane, this does not change the `.location` of the object. Default: True :return: The list of loaded mesh objects. """ data_path = Utility.resolve_path(data_path) taxonomy_file_path = os.path.join(data_path, "taxonomy.json") files_with_fitting_synset = ShapeNetLoader._get_files_with_synset(used_synset_id, used_source_id, taxonomy_file_path, data_path) selected_obj = random.choice(files_with_fitting_synset) loaded_obj = ObjectLoader.load(selected_obj) for obj in loaded_obj: obj.set_cp("used_synset_id", used_synset_id) obj.set_cp("used_source_id", pathlib.PurePath(selected_obj).parts[-3]) ShapeNetLoader._correct_materials(loaded_obj) # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] for obj in loaded_obj: obj.persist_transformation_into_mesh(location=False, rotation=True, scale=False) # check if the move_to_world_origin flag is set if move_object_origin: # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` for obj in loaded_obj: obj.move_origin_to_bottom_mean_point() bpy.ops.object.select_all(action='DESELECT') return loaded_obj
def load(data_path: str, used_sub_dataset_id: str, temp_dir: str, used_body_model_gender: str = None, used_subject_id: str = "", used_sequence_id: int = -1, used_frame_id: int = -1, num_betas: int = 10, num_dmpls: int = 10) -> List[MeshObject]: """ use the pose parameters to generate the mesh and loads it to the scene. :param data_path: The path to the AMASS Dataset folder in resources folder. :param used_sub_dataset_id: Identifier for the sub dataset, the dataset which the human pose object should be extracted from. Available: ['CMU', 'Transitions_mocap', 'MPI_Limits', 'SSM_synced', 'TotalCapture', 'Eyes_Japan_Dataset', 'MPI_mosh', 'MPI_HDM05', 'HumanEva', 'ACCAD', 'EKUT', 'SFU', 'KIT', 'H36M', 'TCD_handMocap', 'BML'] :param temp_dir: A temp directory which is used for writing the temporary .obj file. :param used_body_model_gender: The model gender, pose will represented using male, female or neutral body shape. Available:[male, female, neutral]. If None is selected a random one is choosen. :param used_subject_id: Type of motion from which the pose should be extracted, this is dataset dependent parameter. If left empty a random subject id is picked. :param used_sequence_id: Sequence id in the dataset, sequences are the motion recorded to represent certain action. If set to -1 a random sequence id is selected. :param used_frame_id: Frame id in a selected motion sequence. If none is selected a random one is picked :param num_betas: Number of body parameters :param num_dmpls: Number of DMPL parameters :return: The list of loaded mesh objects. """ if used_body_model_gender is None: used_body_model_gender = random.choice( ["male", "female", "neutral"]) # Get the currently supported mocap datasets by this loader taxonomy_file_path = os.path.join(data_path, "taxonomy.json") supported_mocap_datasets = AMASSLoader._get_supported_mocap_datasets( taxonomy_file_path, data_path) # selected_obj = self._files_with_fitting_ids pose_body, betas = AMASSLoader._get_pose_parameters( supported_mocap_datasets, num_betas, used_sub_dataset_id, used_subject_id, used_sequence_id, used_frame_id) # load parametric Model body_model, faces = AMASSLoader._load_parametric_body_model( data_path, used_body_model_gender, num_betas, num_dmpls) # Generate Body representations using SMPL model body_repr = body_model(pose_body=pose_body, betas=betas) # Generate .obj file represents the selected pose generated_obj = AMASSLoader._write_body_mesh_to_obj_file( body_repr, faces, temp_dir) loaded_obj = ObjectLoader.load(generated_obj) AMASSLoader._correct_materials(loaded_obj) # set the shading mode explicitly to smooth for obj in loaded_obj: obj.set_shading_mode(True) if "void" in LabelIdMapping.label_id_map: # Check if using an id map for obj in loaded_obj: obj.set_cp('category_id', LabelIdMapping.label_id_map["void"]) # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] for obj in loaded_obj: obj.remove_x_axis_rotation() # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` for obj in loaded_obj: obj.move_origin_to_bottom_mean_point() bpy.ops.object.select_all(action='DESELECT') return loaded_obj
def _load_furniture_objs(data: dict, future_model_path: str, lamp_light_strength: float, mapping: dict) -> List[MeshObject]: """ Load all furniture objects specified in the json file, these objects are stored as "raw_model.obj" in the 3D_future_model_path. For lamp the lamp_light_strength value can be changed via the config. :param data: json data dir. Should contain "furniture" :param future_model_path: Path to the models used in the 3D-Front dataset. :param lamp_light_strength: Strength of the emission shader used in each lamp. :param mapping: A dict which maps the names of the objects to ids. :return: The list of loaded mesh objects. """ # collect all loaded furniture objects all_objs = [] # for each furniture element for ele in data["furniture"]: # create the paths based on the "jid" folder_path = os.path.join(future_model_path, ele["jid"]) obj_file = os.path.join(folder_path, "raw_model.obj") # if the object exists load it -> a lot of object do not exist # we are unsure why this is -> we assume that not all objects have been made public if os.path.exists(obj_file) and not "7e101ef3-7722-4af8-90d5-7c562834fabd" in obj_file: # load all objects from this .obj file objs = ObjectLoader.load(filepath=obj_file) # extract the name, which serves as category id used_obj_name = ele["category"] for obj in objs: obj.set_name(used_obj_name) # add some custom properties obj.set_cp("uid", ele["uid"]) # this custom property determines if the object was used before # is needed to only clone the second appearance of this object obj.set_cp("is_used", False) obj.set_cp("is_3D_future", True) obj.set_cp("type", "Non-Object") # is an non object used for the interesting score # set the category id based on the used obj name obj.set_cp("category_id", mapping[used_obj_name.lower()]) # walk over all materials for mat in obj.get_materials(): nodes = mat.node_tree.nodes links = mat.node_tree.links principled_node = Utility.get_nodes_with_type(nodes, "BsdfPrincipled") is_lamp = "lamp" in used_obj_name.lower() if len(principled_node) == 0 and is_lamp: # this material has already been transformed continue elif len(principled_node) == 1: principled_node = principled_node[0] else: raise Exception("The amount of principle nodes can not be more than 1, " "for obj: {}!".format(obj.get_name())) # For each a texture node image_node = nodes.new(type='ShaderNodeTexImage') # and load the texture.png base_image_path = os.path.join(folder_path, "texture.png") image_node.image = bpy.data.images.load(base_image_path, check_existing=True) links.new(image_node.outputs['Color'], principled_node.inputs['Base Color']) # if the object is a lamp, do the same as for the ceiling and add an emission shader if is_lamp: mix_node = nodes.new(type='ShaderNodeMixShader') output = Utility.get_the_one_node_with_type(nodes, 'OutputMaterial') Utility.insert_node_instead_existing_link(links, principled_node.outputs['BSDF'], mix_node.inputs[2], mix_node.outputs['Shader'], output.inputs['Surface']) # The light path node returns 1, if the material is hit by a ray coming from the camera, # else it returns 0. In this way the mix shader will use the principled shader for # rendering the color of the lightbulb itself, while using the emission shader # for lighting the scene. lightPath_node = nodes.new(type='ShaderNodeLightPath') links.new(lightPath_node.outputs['Is Camera Ray'], mix_node.inputs['Fac']) emission_node = nodes.new(type='ShaderNodeEmission') lamp_light_strength = lamp_light_strength emission_node.inputs["Strength"].default_value = lamp_light_strength links.new(image_node.outputs['Color'], emission_node.inputs['Color']) links.new(emission_node.outputs["Emission"], mix_node.inputs[1]) all_objs.extend(objs) elif "7e101ef3-7722-4af8-90d5-7c562834fabd" in obj_file: warnings.warn(f"This file {obj_file} was skipped as it can not be read by blender.") return all_objs
from src.utility.RendererUtility import RendererUtility from src.utility.PostProcessingUtility import PostProcessingUtility import argparse parser = argparse.ArgumentParser() parser.add_argument('camera', help="Path to the camera file, should be examples/basic/camera_positions") parser.add_argument('scene', help="Path to the scene.obj file, should be examples/basic/scene.obj") parser.add_argument('output_dir', help="Path to where the final files, will be saved, could be examples/basic/output") args = parser.parse_args() Initializer.init() # load the objects into the scene objs = ObjectLoader.load(args.scene) # define a light and set its location and energy level light = Light() light.set_type("POINT") light.set_location([5, -5, 5]) light.set_energy(1000) # define the camera intrinsics CameraUtility.set_intrinsics_from_blender_params(1, 512, 512, lens_unit="FOV") # read the camera positions file and convert into homogeneous camera-world transformation with open(args.camera, "r") as f: for line in f.readlines(): line = [float(x) for x in line.split()] matrix_world = Matrix.Translation(Vector(line[:3])) @ Euler(line[3:6], 'XYZ').to_matrix().to_4x4()
def load(data_dir: str = 'resources/IKEA', obj_categories: Union[list, str] = None, obj_style: str = None) -> List[MeshObject]: """ Loads ikea objects based on selected type and style. If there are multiple options it picks one randomly or if style or type is None it picks one randomly. :param data_dir: The directory with all the IKEA models. :param obj_categories: The category to use for example: 'bookcase'. This can also be a list of elements. Available: ['bed', 'bookcase', 'chair', 'desk', 'sofa', 'table', 'wardrobe'] :param obj_style: The IKEA style to use for example: 'hemnes'. See data_dir for other options. :return: The list of loaded mesh objects. """ obj_dict = IKEALoader._generate_object_dict(data_dir) if obj_categories is not None and obj_style is not None: object_lst = [] for obj_category in obj_categories: object_lst.extend([obj[0] for (key, obj) in obj_dict.items() \ if obj_style in key.lower() and obj_category in key]) if not object_lst: selected_obj = random.choice( obj_dict.get(random.choice(list(obj_dict.keys())))) warnings.warn( "Could not find object of type: {}, and style: {}. Selecting random object..." .format(obj_categories, obj_style), category=Warning) else: # Multiple objects with same type and style are possible: select randomly from list. selected_obj = random.choice(object_lst) elif obj_categories is not None: object_lst = [] for obj_category in obj_categories: object_lst.extend( IKEALoader._get_object_by_type(obj_category, obj_dict)) selected_obj = random.choice(object_lst) elif obj_style is not None: object_lst = IKEALoader._get_object_by_style(obj_style, obj_dict) selected_obj = random.choice(object_lst) else: random_key = random.choice(list(obj_dict.keys())) # One key can have multiple object files as value: select randomly from list. selected_obj = random.choice(obj_dict.get(random_key)) print("Selected object: ", os.path.basename(selected_obj)) loaded_obj = ObjectLoader.load(selected_obj) # extract the name from the path: selected_dir_name = os.path.dirname(selected_obj) selected_name = "" if os.path.basename(selected_dir_name).startswith("IKEA_"): selected_name = os.path.basename(selected_dir_name) else: selected_dir_name = os.path.dirname(selected_dir_name) if os.path.basename(selected_dir_name).startswith("IKEA_"): selected_name = os.path.basename(selected_dir_name) if selected_name: for obj in loaded_obj: obj.set_name(selected_name) # extract the file unit from the .obj file to convert every object to meters file_unit = "" with open(selected_obj, "r") as file: first_lines = [next(file) for x in range(5)] for line in first_lines: if "File units" in line: file_unit = line.strip().split(" ")[-1] if file_unit not in [ "inches", "meters", "centimeters", "millimeters" ]: raise Exception( "The file unit type could not be found, check the selected " "file: {}".format(selected_obj)) break for obj in loaded_obj: # convert all objects to meters if file_unit == "inches": scale = 0.0254 elif file_unit == "centimeters": scale = 0.01 elif file_unit == "millimeters": scale = 0.001 elif file_unit == "meters": scale = 1.0 else: raise Exception( "The file unit type: {} is not defined".format(file_unit)) if scale != 1.0: # move all object centers to the world origin and set the bounding box correctly bpy.ops.object.select_all(action='DESELECT') obj.select() bpy.context.view_layer.objects.active = obj.blender_obj # scale object down bpy.ops.object.mode_set(mode='EDIT') bpy.ops.transform.resize(value=(scale, scale, scale)) bpy.ops.object.mode_set(mode='OBJECT') bpy.context.view_layer.update() bpy.ops.object.select_all(action='DESELECT') # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] for obj in loaded_obj: obj.persist_transformation_into_mesh(location=False, rotation=True, scale=False) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` for obj in loaded_obj: obj.move_origin_to_bottom_mean_point() bpy.ops.object.select_all(action='DESELECT') return loaded_obj
def run(self): if not self.config.has_param('data_path'): raise Exception("Need the path to be set") if (self.config.has_param('category') and self.config.has_param('categories')): raise Exception("Either set category or categories, but not both.") modelnet_path = self.config.get_string('data_path') modelnet_path = Utility.resolve_path(modelnet_path) if (not os.path.exists(modelnet_path)): raise Exception("ModelNet Path incorrect") train = self.config.get_bool('train', True) train_str = 'train' if train else 'test' samples = self.config.get_int('samples', 1) replacement = self.config.get_bool('replacement', False) # the file paths are mapped here to object names cache_objects = {} if not replacement else None loaded_objects = [] if (self.config.has_param('model_name')): model_name = self.config.get_string('model_name') model_id = model_name + '.off' category = model_name.split('_')[0] category_path = os.path.join(modelnet_path, category) file_path = os.path.join(category_path, train_str, model_id) if (not os.path.exists(file_path)): raise Exception("model {} does not exist in {}".format( model_id, os.path.join(category_path, train_str))) for i in range(samples): current_objects = ObjectLoader.load( filepath=file_path, cached_objects=cache_objects) [ obj.set_cp("category_id", category) for obj in current_objects ] loaded_objects.extend(current_objects) elif (self.config.has_param('category')): category = self.config.get_string('category') category_path = os.path.join(modelnet_path, category) files = glob.glob(os.path.join(category_path, train_str, "*.off")) if replacement: selected_files = random.choices(files, k=samples) else: selected_files = random.sample(files, k=samples) for file_path in selected_files: current_objects = ObjectLoader.load( filepath=file_path, cached_objects=cache_objects) [ obj.set_cp("category_id", category) for obj in current_objects ] loaded_objects.extend(current_objects) elif (self.config.has_param('categories')): files = [] for category in self.config.get_list('categories'): #sample the categories, making sure that the different categories are balanced in their representation files_in_category = glob.glob( os.path.join(modelnet_path, category, train_str, "*.off")) len_files_in_category = len(files_in_category) files += random.sample(files_in_category, k=min(len_files_in_category, 2 * samples)) if replacement: selected_files = random.choices(files, k=samples) else: selected_files = random.sample(files, k=samples) for file_path in selected_files: category = file_path.split('/')[-1].split('_')[0] current_objects = ObjectLoader.load( filepath=file_path, cached_objects=cache_objects) [ obj.set_cp("category_id", category) for obj in current_objects ] loaded_objects.extend(current_objects) else: files = [] for entry in os.listdir(modelnet_path): if not os.path.isdir(entry): continue # sample the categories, making sure that the different categories are balanced in their representation files_in_category = glob.glob( os.path.join(entry, train_str, "*.off")) len_files_in_category = len(files_in_category) files += random.sample(files_in_category, k=min(len_files_in_category, 2 * samples)) if replacement: selected_files = random.choices(files, k=samples) else: selected_files = random.sample(files, k=samples) for file_path in selected_files: category = file_path.split('/')[-1].split('_')[0] current_objects = ObjectLoader.load( filepath=file_path, cached_objects=cache_objects) current_objects = [ obj.set_cp("category_id", category) for obj in current_objects ] loaded_objects.extend(current_objects) if not loaded_objects: raise Exception( "No objects have been loaded here, check the config.") if (self.config.has_param('scale_path')): path = Utility.resolve_path(self.config.get_string('scale_path')) category_scale_dict = json.load(open(path, "rb")) else: category_scale_dict = None bpy.ops.object.select_all(action='DESELECT') for obj in loaded_objects: category = obj.get_cp("category_id") if (self.config.get_float("manual_scale", 0.0) > 0.): s_value = self.config.get_float("manual_scale") elif (category_scale_dict is None): s_value = 1.0 else: if category in category_scale_dict: s_value = random.uniform(*category_scale_dict[category]) else: s_value = random.uniform(*category_scale_dict['fallback']) # obj.select() # bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN') bpy.context.view_layer.objects.active = obj.blender_obj bb = obj.get_bound_box() diagonal = bb[-2] - bb[0] bpy.ops.object.mode_set(mode='EDIT') bpy.ops.transform.resize(value=[ s_value / diagonal.length, s_value / diagonal.length, s_value / diagonal.length ]) bpy.ops.object.mode_set(mode='OBJECT') obj.move_origin_to_bottom_mean_point() # bb = obj.get_bound_box() # diagonal = bb[-2] - bb[0] # setattr(obj.blender_obj, 'scale', [1.0/diagonal.length] * 3) # print('diagonal legnth {}'.format(diagonal.length)) bpy.ops.object.select_all(action='DESELECT') # Set the add_properties of all imported objects self._set_properties(loaded_objects)