def __init__(self, config): LoaderInterface.__init__(self, config) self._data_path = Utility.resolve_path(self.config.get_string("data_path", "resources/pix3d")) self._used_category = self.config.get_string("used_category") self._files_with_fitting_category = Pix3DLoader.get_files_with_category(self._used_category, self._data_path)
def _set_shading(self, entity, value): """ Switches shading mode of the selected entity. :param entity: An entity to modify. Type: bpy.types.Object :param value: Configuration data. Type: dict. """ LoaderInterface.change_shading_mode([entity], value)
def __init__(self, config): LoaderInterface.__init__(self, config) self.house_path = Utility.resolve_path(self.config.get_string("path")) suncg_folder_path = os.path.join(os.path.dirname(self.house_path), "../..") self.suncg_dir = self.config.get_string("suncg_path", suncg_folder_path)
def run(self): """ Extracts floors in the following steps: 1. Searchs for the specified object. 2. Splits the surfaces which point upwards at a specified level away. """ mesh_objects = [] for obj in self.config.get_list("selector"): if obj.type != "MESH": warnings.warn( "The object: {} is not a mesh but was selected in the FloorExtractor!" .format(obj.name)) continue mesh_objects.append(MeshObject(obj)) floors = FloorExtractor.extract( mesh_objects=mesh_objects, compare_angle_degrees=radians( self.config.get_float('compare_angle_degrees', 7.5)), compare_height=self.config.get_float('compare_height', 0.15), new_name_for_object=self.config.get_string("name_for_split_obj", "Floor"), should_skip_if_object_is_already_there=self.config.get_bool( "should_skip_if_object_is_already_there", False)) add_properties = self.config.get_raw_dict("add_properties", {}) if add_properties: config = Config({"add_properties": add_properties}) loader_interface = LoaderInterface(config) loader_interface._set_properties(floors)
def run(self): if not os.path.exists(self.json_path): raise Exception("The given path does not exists: {}".format( self.json_path)) if not self.json_path.endswith(".json"): raise Exception( "The given path does not point to a .json file: {}".format( self.json_path)) if not os.path.exists(self.future_model_path): raise Exception( "The 3D future model path does not exist: {}".format( self.future_model_path)) # load data from json file with open(self.json_path, "r") as json_file: data = json.load(json_file) self._create_mesh_objects_from_file(data) all_loaded_furniture = self._load_furniture_objs(data) self._move_and_duplicate_furniture(data, all_loaded_furniture) # add an identifier to the obj for obj in self.created_objects: obj["is_3d_front"] = True LoaderInterface._set_properties(self.created_objects)
def __init__(self, config): LoaderInterface.__init__(self, config) self._shapenet_path = Utility.resolve_path( self.config.get_string("shapenet_path", SHAPNET_PATH)) self._num_objects = self.config.get_int("num_objects", 3) self._output_dir = Utility.resolve_path( self.config.get_string("output_dir")) self._objects_used = json.load( open(Utility.resolve_path(SHAPENET_OBJECTS_JSON_PATH), 'r')) self._tables_used = json.load( open(Utility.resolve_path(SHAPENET_TABLES_JSON_PATH), 'r')) self._taxonomy = json.load( open(Utility.resolve_path(TAXNOMY_FILE_PATH), 'r')) self._files_used = [] for synset_name, obj_ids in self._objects_used.items(): synset_id = next(tax['synsetId'] for tax in self._taxonomy if tax['name'] == synset_name) for obj_id in obj_ids: self._files_used.append({ "shapenet_synset_id": synset_id, "shapenet_obj_id": obj_id, "shapenet_synset_name": synset_name }) self._cctexture_path = Utility.resolve_path( self.config.get_string("cctexture_path", CCTEXTURE_PATH)) self._cc_assets = [ f.split("/")[-1] for f in glob.glob(os.path.join(self._cctexture_path, "*")) ]
def __init__(self, config): LoaderInterface.__init__(self, config) self._file_path = Utility.resolve_path( self.config.get_string("file_path")) self._texture_folder = Utility.resolve_path( self.config.get_string("texture_folder")) # the default unknown texture folder is not included inside of the scenenet texture folder default_unknown_texture_folder = os.path.join(self._texture_folder, "unknown") # the textures in this folder are used, if the object has no available texture self._unknown_texture_folder = Utility.resolve_path( self.config.get_string("unknown_texture_folder", default_unknown_texture_folder)) LabelIdMapping.assign_mapping( Utility.resolve_path( os.path.join('resources', 'id_mappings', 'nyu_idset.csv'))) if LabelIdMapping.label_id_map: bpy.context.scene.world[ "category_id"] = LabelIdMapping.label_id_map["void"] else: print( "Warning: The category labeling file could not be found -> no semantic segmentation available!" )
def __init__(self, config): LoaderInterface.__init__(self, config) sys_paths = self.config.get_list("sys_paths") for sys_path in sys_paths: if 'bop_toolkit' in sys_path: sys.path.append(sys_path) self.sample_objects = self.config.get_bool("sample_objects", False) if self.sample_objects: self.num_of_objs_to_sample = self.config.get_int( "num_of_objs_to_sample") self.obj_instances_limit = self.config.get_int( "obj_instances_limit", -1) self.cam_type = self.config.get_string("cam_type", "") self.source_frame = self.config.get_list("source_frame", ["X", "-Y", "-Z"]) self.bop_dataset_path = self.config.get_string("bop_dataset_path") self.scene_id = self.config.get_int("scene_id", -1) self.obj_ids = self.config.get_list("obj_ids", []) if self.obj_ids or self.sample_objects: self.allow_duplication = True else: self.allow_duplication = False self.split = self.config.get_string("split", "test") self.model_type = self.config.get_string("model_type", "") self.scale = 0.001 if self.config.get_bool("mm2m", False) else 1 self.bop_dataset_name = os.path.basename(self.bop_dataset_path) self._has_external_texture = self.bop_dataset_name in ["ycbv", "ruapc"]
def run(self): """ Uses the loaded .obj files and picks one randomly and loads it """ selected_obj = random.choice(self._files_with_fitting_synset) loaded_obj = Utility.import_objects(selected_obj) for obj in loaded_obj: obj["used_synset_id"] = self._used_synset_id obj["used_source_id"] = pathlib.PurePath(selected_obj).parts[-3] self._correct_materials(loaded_obj) self._set_properties(loaded_obj) if "void" in LabelIdMapping.label_id_map: # Check if using an id map for obj in loaded_obj: obj["category_id"] = LabelIdMapping.label_id_map["void"] # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] LoaderInterface.remove_x_axis_rotation(loaded_obj) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` LoaderInterface.move_obj_origin_to_bottom_mean_point(loaded_obj) bpy.ops.object.select_all(action='DESELECT')
def run(self): """ use the pose parameters to generate the mesh and loads it to the scene. """ # selected_obj = self._files_with_fitting_ids pose_body, betas = self._get_pose_parameters() # load parametric Model body_model, faces = self._load_parametric_body_model() # Generate Body representations using SMPL model body_repr = body_model(pose_body=pose_body, betas=betas) # Generate .obj file represents the selected pose generated_obj = self._write_body_mesh_to_obj_file(body_repr, faces) loaded_obj = Utility.import_objects(generated_obj) self._correct_materials(loaded_obj) self._set_properties(loaded_obj) # set the shading mode explicitly to smooth self.change_shading_mode(loaded_obj, "SMOOTH") if "void" in LabelIdMapping.label_id_map: # Check if using an id map for obj in loaded_obj: obj['category_id'] = LabelIdMapping.label_id_map["void"] # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0] LoaderInterface.remove_x_axis_rotation(loaded_obj) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` LoaderInterface.move_obj_origin_to_bottom_mean_point(loaded_obj) bpy.ops.object.select_all(action='DESELECT')
def __init__(self, config): LoaderInterface.__init__(self, config) self._data_path = Utility.resolve_path(self.config.get_string("data_path")) self._used_synset_id = self.config.get_string("used_synset_id") self._used_source_id = self.config.get_string("used_source_id", "") taxonomy_file_path = os.path.join(self._data_path, "taxonomy.json") self._files_with_fitting_synset = ShapeNetLoader.get_files_with_synset(self._used_synset_id, self._used_source_id, taxonomy_file_path, self._data_path)
def __init__(self, config): LoaderInterface.__init__(self, config) self.house_path = Utility.resolve_path(self.config.get_string("path")) suncg_folder_path = os.path.join(os.path.dirname(self.house_path), "../..") self.suncg_dir = self.config.get_string("suncg_path", suncg_folder_path) self._collection_of_loaded_objs = {} # there are only two types of materials, textures and diffuse self._collection_of_loaded_mats = {"texture": {}, "diffuse": {}} LabelIdMapping.assign_mapping(Utility.resolve_path(os.path.join('resources', 'id_mappings', 'nyu_idset.csv')))
def __init__(self, config: Config): LoaderInterface.__init__(self, config) self.mapping_file = Utility.resolve_path( self.config.get_string( "mapping_file", os.path.join("resources", "front_3D", "3D_front_mapping.csv"))) if not os.path.exists(self.mapping_file): raise Exception("The mapping file could not be found: {}".format( self.mapping_file))
def __init__(self, config): LoaderInterface.__init__(self, config) self.data_path = self.config.get_string("data_path", os.path.join("resources", "haven")) if os.path.exists(self.data_path): self.data_path = os.path.join(self.data_path, "hdris") if not os.path.exists(self.data_path): raise Exception("The folder: {} does not contain a folder name hdfris. Please use the " "download script.".format(self.data_path)) else: raise Exception("The data path does not exists: {}".format(self.data_path))
def __init__(self, config): LoaderInterface.__init__(self, config) self.house_path = Utility.resolve_path(self.config.get_string("path")) suncg_folder_path = os.path.join(os.path.dirname(self.house_path), "../..") self.suncg_dir = self.config.get_string("suncg_path", suncg_folder_path) LabelIdMapping.assign_mapping( Utility.resolve_path( os.path.join('resources', 'id_mappings', 'nyu_idset.csv')))
def __init__(self, config): LoaderInterface.__init__(self, config) self._data_dir = Utility.resolve_path( self.config.get_string("data_dir", os.path.join("resources", "IKEA"))) self._obj_dict = dict() self._generate_object_dict() self._obj_type = self.config.get_raw_value("obj_type", None) self._obj_style = self.config.get_raw_value("obj_style", None)
def __init__(self, config): LoaderInterface.__init__(self, config) # supported pairs of {ID type/section names: datablock parameter names} self.known_datablock_names = { "/Camera": "cameras", "/Collection": "collections", "/Image": "images", "/Light": "lights", "/Material": "materials", "/Mesh": "meshes", "/Object": "objects", "/Texture": "textures" }
def __init__(self, config): LoaderInterface.__init__(self, config) self._bigbird_path = Utility.resolve_path( self.config.get_string("bigbird_path")) self._num_objects = self.config.get_int("num_objects", 3) self._obj_name2id = {} self._obj_id2name = {} for i, n in enumerate(OBJECT_NAMES): self._obj_name2id[n] = i self._obj_id2name[i] = n self._obj_ids = list(self._obj_id2name.keys())
def __init__(self, config): LoaderInterface.__init__(self, config) self._shapenet_path = Utility.resolve_path( self.config.get_string("shapenet_path", SHAPNET_PATH)) self._mscoco_path = Utility.resolve_path( self.config.get_string("mscoco_path", MSCOCO_PATH)) self._obj_list_path = Utility.resolve_path( self.config.get_string("obj_list_path", OBJ_LIST_PATH)) self._num_objects = self.config.get_int("num_objects", 3) with open(self._obj_list_path) as f: self._obj_list = json.load(f)
def __init__(self, config): LoaderInterface.__init__(self, config) self._data_dir = Utility.resolve_path( self.config.get_string("data_dir", os.path.join("resources", "IKEA"))) if self.config.has_param("category"): self._obj_categories = self.config.get_raw_value("category", None) if not isinstance(self._obj_categories, list): self._obj_categories = [self._obj_categories] else: self._obj_categories = None self._obj_style = self.config.get_raw_value("style", None)
def __init__(self, config): LoaderInterface.__init__(self, config) self._file_path = Utility.resolve_path( self.config.get_string("file_path")) self._texture_folder = Utility.resolve_path( self.config.get_string("texture_folder")) # the default unknown texture folder is not included inside of the scenenet texture folder default_unknown_texture_folder = os.path.join(self._texture_folder, "unknown") # the textures in this folder are used, if the object has no available texture self._unknown_texture_folder = Utility.resolve_path( self.config.get_string("unknown_texture_folder", default_unknown_texture_folder))
def __init__(self, config): LoaderInterface.__init__(self, config) self._data_path = Utility.resolve_path( self.config.get_string("data_path", os.path.join("resources", "AMASS"))) # Body Model Specs self._used_body_model_gender = self.config.get_string( "body_model_gender", random.choice(["male", "female", "neutral"])) # These numbers are based on a recommendation from the authors. refer to visualization tutorial from the # authors: https://github.com/nghorbani/amass/blob/master/notebooks/01-AMASS_Visualization.ipynb self._num_betas = 10 # number of body parameters self._num_dmpls = 8 # number of DMPL parameters # Pose Specs self._used_sub_dataset_id = self.config.get_string("sub_dataset_id") self._used_subject_id = self.config.get_string("subject_id", "") self._used_sequence_id = self.config.get_int("sequence_id", -1) self._used_frame_id = self.config.get_int("frame_id", -1)
def __init__(self, config: Config): LoaderInterface.__init__(self, config) self.json_path = Utility.resolve_path( self.config.get_string("json_path")) self.future_model_path = Utility.resolve_path( self.config.get_string("3D_future_model_path")) self.mapping_file = Utility.resolve_path( self.config.get_string( "mapping_file", os.path.join("resources", "front_3D", "3D_front_mapping.csv"))) if not os.path.exists(self.mapping_file): raise Exception("The mapping file could not be found: {}".format( self.mapping_file)) _, self.mapping = LabelIdMapping.read_csv_mapping(self.mapping_file) # a list of all newly created objects self.created_objects = []
def __init__(self, config): LoaderInterface.__init__(self, config) self._data_path = Utility.resolve_path( self.config.get_string("data_path", os.path.join("resources", "AMASS"))) # Body Model Specs self._used_body_model_gender = self.config.get_string( "body_model_gender") # These numbers are based on a recommendation from the authors. refer to visualization tutorial from the authors: https://github.com/nghorbani/amass/blob/master/notebooks/01-AMASS_Visualization.ipynb self._num_betas = 10 # number of body parameters self._num_dmpls = 8 # number of DMPL parameters # Pose Specs self._used_sub_dataset_id = self.config.get_string("sub_dataset_id") self._used_subject_id = self.config.get_string("subject_id") self._used_sequence_id = self.config.get_string("sequence_id") self._used_frame_id = self.config.get_string("frame_id", "") # Get the currently supported mocap datasets by this loader taxonomy_file_path = os.path.join(self._data_path, "taxonomy.json") AMASSLoader._get_supported_mocap_datasets(taxonomy_file_path, self._data_path)
def __init__(self, config): LoaderInterface.__init__(self, config) self.bop_dataset_name = "OCRTOC" with open(self.config.get_string('camera_config_path')) as f: self.camera_info = json.load( f) # keys: cx, cy, fx, fy, depth_scale, height, width self.sample_objects = self.config.get_bool("sample_objects", False) if self.sample_objects: self.num_of_objs_to_sample = self.config.get_int( "num_of_objs_to_sample") self.obj_instances_limit = self.config.get_int( "obj_instances_limit", -1) self.dataset = self.config.get_string("dataset", "/home/lsy/dataset/ocrtoc") self.allow_duplication = self.config.get_bool("allow_duplication", False) self.scale = 1 self.obj_df = pd.read_csv(os.path.join(self.dataset, 'objects.csv'))
def __init__(self, config): LoaderInterface.__init__(self, config) sys_paths = self.config.get_list("sys_paths") for sys_path in sys_paths: if 'bop_toolkit' in sys_path: sys.path.append(sys_path) self.sample_objects = self.config.get_bool("sample_objects", False) if self.sample_objects: self.num_of_objs_to_sample = self.config.get_int( "num_of_objs_to_sample") self.obj_instances_limit = self.config.get_int( "obj_instances_limit", -1) self.cam_type = self.config.get_string("cam_type", "") self.source_frame = self.config.get_list("source_frame", ["X", "-Y", "-Z"]) self.bop_dataset_path = self.config.get_string("bop_dataset_path") self.scene_id = self.config.get_int("scene_id", -1) self.obj_ids = self.config.get_list("obj_ids", []) if self.obj_ids or self.sample_objects: self.allow_duplication = True else: self.allow_duplication = False self.split = self.config.get_string("split", "test") self.model_type = self.config.get_string("model_type", "") self.scale = 0.001 if self.config.get_bool("mm2m", False) else 1 self.bop_dataset_name = os.path.basename(self.bop_dataset_path) self._has_external_texture = self.bop_dataset_name in ["ycbv", "ruapc"] ## ugly, but it works. from os.path import expanduser home = expanduser("~") path_to_category_id_path = home + "/BOP/synthetic/path_to_category_id.yaml" with open(path_to_category_id_path, 'r') as f: self.path_to_category_id = yaml.safe_load(f)
def __init__(self, config): LoaderInterface.__init__(self, config) # Supported Datablocks types by Blender Python API self.known_datablock_names = [ cls.__name__ for cls in bpy.types.ID.__subclasses__() ]
def __init__(self, config): LoaderInterface.__init__(self, config) # set a RE-specific material name pattern to look for in the selected objects self.target_material = "re_ground_mat.*"
def __init__(self, config): LoaderInterface.__init__(self, config)
def run(self): """ Extracts floors in the following steps: 1. Searchs for the specified object. 2. Splits the surfaces which point upwards at a specified level away. """ entities = self.config.get_list("selector") compare_angle = radians( self.config.get_float('compare_angle_degrees', 7.5)) compare_height = self.config.get_float('compare_height', 0.15) new_name_for_object = self.config.get_string("name_for_split_obj", "Floor") add_properties = self.config.get_raw_dict("add_properties", {}) # set the up_vector up_vec = mathutils.Vector([0, 0, 1]) up_vec_upwards = self.config.get_bool("up_vector_upwards", True) if not up_vec_upwards: up_vec *= -1.0 height_list = [] if self.config.has_param("height_list_path"): height_file_path = Utility.resolve_path( self.config.get_string('height_list_path')) with open(height_file_path) as file: import ast height_list = [ float(val) for val in ast.literal_eval(file.read()) ] bpy.ops.object.select_all(action='DESELECT') newly_created_objects = [] for obj in entities: if obj.type != "MESH": warnings.warn( "The object: {} is not a mesh but was selected in the FloorExtractor!" .format(obj.name)) continue obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.mode_set(mode='EDIT') mesh = obj.data bm = bmesh.from_edit_mesh(mesh) bm.faces.ensure_lookup_table() if height_list: bpy.ops.mesh.select_all(action='DESELECT') counter = 0 for height_val in height_list: counter = FloorExtractor.split_at_height_value( bm, height_val, compare_height, up_vec, compare_angle, obj.matrix_world) if counter: bpy.ops.mesh.separate(type='SELECTED') else: try: from sklearn.cluster import MeanShift, estimate_bandwidth except ImportError: raise ImportError( "If no height_list_path is defined, the sklearn lib has to be installed: " "By adding \"scikit-learn\" to the \"setup\"/\"pip\" in the config file." ) # no height list was provided, try to estimate them on its own # first get a list of all height values of the median points, which are inside of the defined # compare angle range list_of_median_poses = [ FloorExtractor.get_median_face_pose(f, obj.matrix_world)[2] for f in bm.faces if FloorExtractor.check_face_angle( f, obj.matrix_world, up_vec, compare_angle) ] list_of_median_poses = np.reshape(list_of_median_poses, (-1, 1)) # The following bandwidth can be automatically detected using bandwidth = estimate_bandwidth(list_of_median_poses, quantile=0.2, n_samples=500) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(list_of_median_poses) # if the up vector is negative the maximum value is searched if up_vec_upwards: height_value = np.min(ms.cluster_centers_) else: height_value = np.max(ms.cluster_centers_) bpy.ops.mesh.select_all(action='DESELECT') counter = FloorExtractor.split_at_height_value( bm, height_value, compare_height, up_vec, compare_angle, obj.matrix_world) if counter: bpy.ops.mesh.separate(type='SELECTED') selected_objects = bpy.context.selected_objects if selected_objects: if len(selected_objects) == 2: selected_objects = [ o for o in selected_objects if o != bpy.context.view_layer.objects.active ] selected_objects[0].name = new_name_for_object newly_created_objects.append(selected_objects[0]) else: raise Exception( "There is more than one selection after splitting, this should not happen!" ) else: raise Exception("No floor object was constructed!") bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.select_all(action='DESELECT') if add_properties: config = Config({"add_properties": add_properties}) loader_interface = LoaderInterface(config) loader_interface._set_properties(newly_created_objects)