def process_scene_objects(args): for object in bpy.context.scene.objects: if object.name in ['Camera', 'Lamp']: continue bpy.context.scene.objects.active = object object.select = True if object.name == 'sphere': # do not touch the sphere model, which intends to give white albedo color for the background continue else: if args.remove_doubles: bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.remove_doubles() bpy.ops.object.mode_set(mode='OBJECT') if args.remove_iso_verts: bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.delete_loose(use_verts=True, use_edges=True, use_faces=False) bpy.ops.object.mode_set(mode='OBJECT') if args.edge_split: bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers["EdgeSplit"].split_angle = 1.32645 bpy.ops.object.modifier_apply(apply_as='DATA', modifier="EdgeSplit") if args.normalization_mode is not None: # scale to be within a unit sphere (r=0.5, d=1) ''' v = object.data.vertices verts_np = util.read_verts(object) # NOTE: get vertices in object local space trans_v, scale_f = util.pc_normalize(verts_np, norm_type=args.normalization_mode) # the axis conversion of importing does not change the data in-place, # so we do it manually trans_v_axis_replaced = trans_v.copy() trans_v_axis_replaced[0] = trans_v[0] trans_v_axis_replaced[1] = -trans_v[2] trans_v_axis_replaced[2] = trans_v[1] ''' verts_np = get_obj_verts(object, read_global=True) trans_v, scale_f = util.pc_normalize( verts_np, norm_type=args.normalization_mode) trans_v_axis_replaced = trans_v bpy.ops.transform.translate(value=(trans_v_axis_replaced[0], trans_v_axis_replaced[1], trans_v_axis_replaced[2])) bpy.ops.object.transform_apply(location=True) bpy.ops.transform.resize(value=(scale_f, scale_f, scale_f)) bpy.ops.object.transform_apply(scale=True) bpy.ops.export_scene.obj(filepath='./test.obj', use_selection=True) object.select = False
if args.remove_iso_verts: bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.delete_loose(use_verts=True, use_edges=True, use_faces=False) bpy.ops.object.mode_set(mode='OBJECT') if args.edge_split: bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers["EdgeSplit"].split_angle = 1.32645 bpy.ops.object.modifier_apply(apply_as='DATA', modifier="EdgeSplit") if args.normalization_mode is not None: # scale to be within a unit sphere (r=0.5, d=1) v = object.data.vertices verts_np = util.read_verts(object) trans_v, scale_f = util.pc_normalize( verts_np, norm_type=args.normalization_mode) # the axis conversion of importing does not change the data in-place, # so we do it manually trans_v_axis_replaced = trans_v.copy() trans_v_axis_replaced[1] = -trans_v[2] trans_v_axis_replaced[2] = trans_v[1] bpy.ops.transform.translate(value=(trans_v_axis_replaced[0], trans_v_axis_replaced[1], trans_v_axis_replaced[2])) bpy.ops.object.transform_apply(location=True) bpy.ops.transform.resize(value=(scale_f, scale_f, scale_f)) bpy.ops.object.transform_apply(scale=True) #bpy.ops.export_scene.obj(filepath='test.obj', use_selection=True) object.select = False
def process_scene_objects_CYCLES(args): # only worry about data in the startup scene # remove lamps and cameras for bpy_data_iter in ( bpy.data.lamps, bpy.data.cameras ): for id_data in bpy_data_iter: bpy_data_iter.remove(id_data) for ob in bpy.context.scene.objects: if ob.type != 'MESH': ob.select = True else: ob.select = False bpy.ops.object.delete() # join all objects togather # make the first one active object # select the rest ones, then join ''' for ob in bpy.context.scene.objects: if ob.type == 'MESH': ob.select = True bpy.context.scene.objects.active = ob else: ob.select = False bpy.ops.object.join() ''' #assert len(bpy.context.scene.objects) == 1 ''' bpy.ops.object.select_all(action='DESELECT') obj_names = [] for i, object in enumerate(bpy.context.scene.objects): obj_names.append(object.name) bpy.ops.object.mode_set(mode='EDIT') for obj_name in obj_names: bpy.data.objects[obj_name].select = True bpy.ops.mesh.separate(type='LOOSE') bpy.ops.object.select_all(action='DESELECT') ''' # normalization # apply all modifiers in all objects to get the actual meshes for i, object in enumerate(bpy.context.scene.objects): bpy.context.scene.objects.active = object object.select = True for modifier in object.modifiers: bpy.ops.object.modifier_apply(modifier=modifier.name) # get all actual points for calculating transformation all_verts = [] for i, object in enumerate(bpy.context.scene.objects): verts_np = get_obj_verts(object, read_global=True) all_verts += list(verts_np) all_verts_np = np.array(all_verts) trans_v, scale_f = util.pc_normalize(all_verts_np, norm_type=args.normalization_mode) #bpy.ops.wm.save_as_mainfile(filepath='test0.blend') for i, object in enumerate(bpy.context.scene.objects): bpy.context.scene.objects.active = object object.select = True if args.normalization_mode is not None: # too complicated to scale the object, so we only translate the object to the original without touching the scaling translate_obj(object, trans_v) # debug rotate_obj(object, [0,0,180]) #scale_obj(object, [scale_f, scale_f, scale_f]) #bpy.ops.transform.resize(value=(scale_f, scale_f, scale_f)) #bpy.ops.object.transform_apply(scale=True) #bpy.ops.export_scene.obj(filepath='test.obj', use_selection=True) object.select = False # save to debug bpy.ops.wm.save_as_mainfile(filepath='test1.blend') diag_length = 1 / scale_f return diag_length
bpy.ops.wm.open_mainfile(filepath=args.scene) # process the object in the scene if not args.no_obj_normalization: # Get the filename only from the initial file path. filename = os.path.basename(args.scene) # Use splitext() to get filename and extension separately. (target_obj_name, ext) = os.path.splitext(filename) for object in bpy.context.scene.objects: if object.name != target_obj_name: continue bpy.context.scene.objects.active = object object.select = True verts_np = blender_util.get_obj_verts(object, read_global=True) trans_v, scale_f = util.pc_normalize(verts_np, norm_type='diag2sphere') trans_v_axis_replaced = trans_v bpy.ops.transform.translate(value=(trans_v_axis_replaced[0], trans_v_axis_replaced[1], trans_v_axis_replaced[2])) bpy.ops.object.transform_apply(location=True) bpy.ops.transform.resize(value=(scale_f, scale_f, scale_f)) bpy.ops.object.transform_apply(scale=True) #bpy.ops.export_scene.obj(filepath='./test.obj', use_selection=True) object.select = False bpy.data.scenes["Scene"].render.engine = 'CYCLES' depth_file_output, normal_file_output, albedo_file_output, matidx_file_output = blender_util.rendering_pass_setup( args)
bpy.ops.wm.quit_blender() vox_mesh = util.mesh_from_voxels( vox_mat_filename, int(256 / args.vox_resolution)) # already diagonal=1, center at zero #vox_mesh.export('vox_mesh.obj') # read in segmentation points and labels # normalize import shapenet_part_io as spart if not spart.segmentation_exists(cls_id, modelname): print('Segmentation not exist, skip!') bpy.ops.wm.quit_blender() seg_points_labels, num_label = spart.read_pts_label_data(cls_id, modelname) seg_points_labels[:, :3] = util.transform_points( seg_points_labels[:, :3], util.transformation_ShapeNet_v1tov2) trans_v, scale_f = util.pc_normalize(seg_points_labels[:, :3], norm_type=args.normalization_mode) seg_points_labels[:, :3] = seg_points_labels[:, :3] + trans_v seg_points_labels[:, :3] = seg_points_labels[:, :3] * scale_f #util.write_ply(seg_points_labels[:, :3], 'seg_points.ply') blender_util.clear_scene_objects() depth_file_output, normal_file_output, albedo_file_output, matidx_file_output = blender_util.rendering_pass_setup( args) # this axis conversion does not change the data in-place bpy.ops.import_scene.obj(filepath=args.obj, use_smooth_groups=False, use_split_objects=False, use_split_groups=False) blender_util.process_scene_objects(args) # including normalization