def _import_object(self): """Import the mesh of the cap from a ply file.""" bpy.ops.object.select_all(action='DESELECT') class_name = expandpath(self.config.scenario_setup.target_object) blendfile = expandpath(self.config.parts[class_name], check_file=False) # try blender file if os.path.exists(blendfile): try: bpy_obj_name = self.config.parts['name'][class_name] except KeyError: bpy_obj_name = class_name blnd.append_object(blendfile, bpy_obj_name) new_obj = bpy.data.objects[bpy_obj_name] scale_type = 'blend_scale' # if none given try ply else: ply_path = expandpath(self.config.parts.ply[class_name], check_file=True) bpy.ops.import_mesh.ply(filepath=ply_path) new_obj = bpy.context.object scale_type = 'ply_scale' new_obj.name = class_name self.obj = new_obj self._rescale_object(scale_type)
def are_xml_element_equal(e1, e2): "test for equality of two given xml elements" if len(e1) != len(e2): return False if e1.tag != e2.tag: return False if expandpath(e1.text) != expandpath(e2.text): return False if e1.tail != e2.tail: return False if e1.attrib != e2.attrib: return False return True
def build_directory_info(base_path: str, **kwargs): """Build a dynamic struct with the directory configuration of a RenderedObject dataset. The base_path should be expanded and not contain global variables or other system dependent abbreviations. Args: base_path (str): path to the root directory of the dataset **dense_features(bool): true if database contains a dense feature representation of the object """ # initialize dir_info = DynamicStruct() dir_info.images = DynamicStruct() dir_info.annotations = DynamicStruct() # setup all path related information dir_info.base_path = expandpath(base_path) dir_info.annotations.base_path = os.path.join(dir_info.base_path , 'Annotations') dir_info.annotations.opengl = os.path.join(dir_info.annotations.base_path , 'OpenGL') dir_info.annotations.opencv = os.path.join(dir_info.annotations.base_path , 'OpenCV') dir_info.images.base_path = os.path.join(dir_info.base_path , 'Images') dir_info.images.rgb = os.path.join(dir_info.images.base_path , 'rgb') dir_info.images.range = os.path.join(dir_info.images.base_path , 'range') dir_info.images.depth = os.path.join(dir_info.images.base_path , 'depth') dir_info.images.mask = os.path.join(dir_info.images.base_path , 'mask') dir_info.images.backdrop = os.path.join(dir_info.images.base_path , 'backdrop') dense_features = kwargs.get('dense_features', False) if dense_features: dir_info.images.dense_features = os.path.join(dir_info.images.base_path, 'dense_features') return dir_info
def test_class(self): # test class integrity self._instance = bsm.BaseSceneManager() self.assertIsInstance(self._instance, bsm.BaseSceneManager) # this is not an active test but it should rise an error if something is wrong self._instance.set_environment_texture(expandpath(self._test_texture_path))
def setup_scene(self): """Set up the entire scene. Here, we simply load the main blender file from disk. """ bpy.ops.wm.open_mainfile(filepath=expandpath(self.config.scene_setup.blend_file)) # we need to hide all dropboxes and dropzones in the viewport, otherwise # occlusion testing will not work, because blender's ray_cast method # returns hits no empties! self.logger.info("Hiding all dropzones from viewport") bpy.data.collections['Dropzones'].hide_viewport = True
def get_environment_textures(base_path): """Determine if the user wants to set specific environment texture, or randomly select from a directory Args: cfg(Configuration): config with render setup """ # this rise a KeyError if 'environment_texture' not in cfg environment_textures = expandpath(base_path) if os.path.isdir(environment_textures): files = os.listdir(environment_textures) environment_textures = [os.path.join(environment_textures, f) for f in files] else: environment_textures = [environment_textures] return environment_textures
def setUp(self): self._testfile = 'test.blend' self._testpath = os.path.join(os.getcwd(), 'tests', 'data', self._testfile) # load test file (2 objects, 1 camera and 1 light) bpy.ops.wm.open_mainfile(filepath=expandpath(self._testpath)) # get objects self._obj1 = bpy.context.scene.objects['Obj1'] self._obj2 = bpy.context.scene.objects['Obj2'] self._obj_non_visible = bpy.context.scene.objects['NonVisibleObj'] self._cam = bpy.context.scene.objects['Camera'] self._zeroing = Vector((0, 0, 0)) self._w = 640 self._h = 480 self._setup_renderer()
def setUp(self): cam_info = DynamicStruct() cam_info.intrinsic = None cam_info.width = 640 cam_info.height = 480 cam_info.sensor_width = 0 cam_info.focal_length = 0 cam_info.hfov = 0 cam_info.intrinsics_conversion_mode = None self._cam_info = cam_info # load blender test file self._testfile = 'test.blend' self._testpath = os.path.join(os.getcwd(), 'tests', 'data', self._testfile) # load test file (2 objects, 1 camera and 1 light) bpy.ops.wm.open_mainfile(filepath=expandpath(self._testpath)) self._cam = bpy.context.scene.objects['Camera']
def main(): parser = get_cmd_argparser() args = parser.parse_known_args(args=get_argv())[0] import_abr(args.abr_path) if not os.path.exists(args.path) and not os.path.isdir(args.path): raise RuntimeError( f'Path "{args.path}" does not exists or is not a directory') dirpath_range = os.path.join(args.path, 'Images', 'range') dirpath_depth = os.path.join(args.path, 'Images', 'depth') if not os.path.exists(dirpath_depth): os.mkdir(dirpath_depth) # get and parse config config = BaseConfiguration() config_filepath = os.path.join(expandpath(args.path), 'Dataset.cfg') if not os.path.exists(config_filepath): raise RuntimeError(f'File {config_filepath} does not exists') config.parse_file(config_filepath) # get specific configs fx, fy, cx, cy = camera_utils._intrinsics_to_numpy(config.camera_info) calibration_matrix = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3) res_x = config.camera_info.width res_y = config.camera_info.height if res_x in [None, 0] or res_y in [None, 0]: res_x = cx * 2 res_y = cy * 2 # loop over files for fpath_in in pathlib.Path(dirpath_range).iterdir(): if not fpath_in.is_file(): continue fpath_out = os.path.join(dirpath_depth, fpath_in.stem + '.png') fpath_in = str(fpath_in) camera_utils.project_pinhole_range_to_rectified_depth( fpath_in, fpath_out, calibration_matrix, res_x, res_y, args.scale)
def randomize_textured_objects_textures(self): for obj_name in self.config.scenario_setup.textured_objects: obj_txt_filepath = expandpath(random.choice(self.objects_textures)) self.renderman.set_object_texture(obj_name, obj_txt_filepath)
def randomize_environment_texture(self): # set some environment texture, randomize, and render env_txt_filepath = expandpath(random.choice(self.environment_textures)) self.renderman.set_environment_texture(env_txt_filepath)
def setup_objects(self, objects: list, bpy_collection: str = 'TargetObjects'): """This method populates the scene with objects. Object types and number of objects will be taken from the configuration. The format to specify objects is ObjectType:Number where ObjectType should be the name of an object that exists in the blender file, and number indicates how often the object shall be duplicated. Args: objects(list): list of ObjectType:Number to setup bpy_collection(str): Name of bpy collection the given objects are linked to in the .blend file. Default: TargetObjects If the given objects are non-target (i.e., they populate the scene but no information regarding them are stored) use a different collection. Returns: objs(list): list of dict to handle desired objects """ # let's start with an empty list objs = [] obk = ObjectBookkeeper() # first reset the render pass index for all panda model objects (links, # hand, etc) links = [f'Link-{i}' for i in range(8)] + ['Finger-Left', 'Finger-Right', 'Hand'] for link in links: bpy.data.objects[link].pass_index = 0 # extract all objects from the configuration. An object has a certain # type, as well as an own id. this information is storeed in the objs # list, which contains a dict. The dict contains the following keys: # id_mask used for mask computation, computed below # object_class_name type-name of the object # object_class_id model type ID (simply incremental numbers) # object_id instance ID of the object # bpy blender object reference for class_id, obj_spec in enumerate(objects): if obj_spec is None or obj_spec == '': return class_name, obj_count = obj_spec.split(':') # here we distinguish if we copy a part from the proto objects # within a scene, or if we have to load it from file is_proto_object = not class_name.startswith('parts.') if not is_proto_object: # split off the prefix for all files that we load from blender class_name = class_name[6:] # TODO: file loading happens only very late in this loop. This might # be an issue for large object counts and could be changed to # load-once copy-often. for j in range(int(obj_count)): # First, deselect everything bpy.ops.object.select_all(action='DESELECT') if is_proto_object: # duplicate proto-object blnd.select_object(class_name) bpy.ops.object.duplicate() new_obj = bpy.context.object else: # we need to load this object from file. This could be # either a blender file, or a PLY file blendfile = expandpath(self.config.parts[class_name], check_file=False) if os.path.exists(blendfile): # this is a blender file, so we should load it # we can now load the object into blender # try-except logic to handle objects from same blend file but different # class names to allow loading same objects with e.g., different scales try: bpy_obj_name = self.config.parts['name'][ class_name] except KeyError: bpy_obj_name = class_name blnd.append_object(blendfile, bpy_obj_name) # NOTE: bpy.context.object is **not** the object that we are # interested in here! We need to select it via original name # first, then we rename it to be able to select additional # objects later on new_obj = bpy.data.objects[bpy_obj_name] new_obj.name = f'{class_name}.{j:03d}' # try to rescale object according to its blend_scale if given in the config try: new_obj.scale = Vector( self.config.parts.blend_scale[class_name]) bpy.ops.object.transform_apply(location=False, rotation=False, scale=True, properties=False) except KeyError: # log and keep going self.logger.info( f'No blend_scale for obj {class_name} given. Skipping!' ) else: # no blender file given, so we will load the PLY file # NOTE: no try-except logic for ply since we are not binded to object names as for .blend ply_path = expandpath( self.config.parts.ply[class_name], check_file=True) bpy.ops.import_mesh.ply(filepath=ply_path) # here we can use bpy.context.object! new_obj = bpy.context.object new_obj.name = f'{class_name}.{j:03d}' # try to rescale object according to its ply_scale if given in the config try: new_obj.scale = Vector( self.config.parts.ply_scale[class_name]) bpy.ops.object.transform_apply(location=False, rotation=False, scale=True, properties=False) except KeyError: # log and keep going self.logger.info( f'No ply_scale for obj {class_name} given. Skipping!' ) # move object to collection: in case of debugging try: collection = bpy.data.collections[bpy_collection] except KeyError: collection = bpy.data.collections.new(bpy_collection) bpy.context.scene.collection.children.link(collection) if new_obj.name not in collection.objects: collection.objects.link(new_obj) # bookkeep instance obk.add(class_name) # append all information objs.append({ 'id_mask': '', 'object_class_name': class_name, 'object_class_id': class_id, 'object_id': j, 'bpy': new_obj, 'visible': None }) # build masks id for compositor of the format _N_M, where N is the model # id, and M is the object id w_class = ceil(log( len(obk), 10)) if len(obk) else 0 # format width for number of model types for i, obj in enumerate(objs): w_obj = ceil(log(obk[obj['object_class_name']]['instances'], 10)) # format width for objs with same model id_mask = f"_{obj['object_class_id']:0{w_class}}_{obj['object_id']:0{w_obj}}" obj['id_mask'] = id_mask return objs
def test_expandpath(self): self.assertEqual(self._test_path, io.expandpath('$TESTVAR/test'))
def main(): # parse command arguments cmd_parser = get_cmd_argparser() cmd_args = cmd_parser.parse_known_args( args=get_argv())[0] # need to parse to get aps and abr # print help if requested # NOTE: we check for config since if config are given also all the avaliable config will be printed. # However, if no configs are given, calling --help will still work if cmd_args.help and 'config' not in cmd_args: cmd_parser.print_help() sys.exit(0) # import abr import_abr(cmd_args.abr_path) # get logger instance logger = configure_logger(cmd_args.logging_level) # pretty print available scenarios? scene_types = get_scene_types() if cmd_args.list_scenes: print("List of possible scenes:") for k, _ in scene_types.items(): print(f" {k}") sys.exit(0) # change all keys to lower-case scene_types = dict((k.lower(), v) for k, v in scene_types.items()) # check scene_type in config scene_type_str = determine_scene_type(cmd_args.config) if scene_type_str.lower() not in scene_types: raise RuntimeError( f"Invalid configuration: Unknown scene_type {scene_type_str}") # instantiate configuration config = scene_types[scene_type_str.lower()]['config']() # combine parsers and parse command line arguments parser = argparse.ArgumentParser(prog="blender -b -P " + __file__, parents=[cmd_parser] + config.get_argparsers(), add_help=False) argv = get_argv() args = parser.parse_args(args=argv) # show help only here, because this will include the help for the dataset # configuration if args.help: parser.print_help() sys.exit(0) # check if the configuration file exists configfile = expandpath(args.config, check_file=True) # parse configuration from file, and then update with arguments config.parse_file(configfile) config.parse_args(argv=argv) # instantiate the scene. # NOTE: we do not automatically create splitting configs anymore. You need # to run the script twice, with two different configurations, to # generate the split. This is significantly easier than internally # maintaining split configurations. scene = scene_types[scene_type_str.lower()]['scene']( config=config, render_mode=cmd_args.render_mode) # save the config early. In case something goes wrong during rendering, we # at least have the config + potentially some images scene.dump_config() # generate the dataset success = False success = scene.generate_dataset() if not success: logger.error("Error while generating dataset") # tear down scene. should be handled by blender, but a scene might have # other things opened that it should close gracefully scene.teardown()