transformation_list.append(transformation) # static_point_cloud = util.get_point_cloud(lidar_points) # static_point_cloud.transform( # np.linalg.inv(transformation)) # point_cloud_mapping += static_point_cloud ax.clear() plot_start_self_box_points, plot_end_self_box_points = util.get_start_end_box_points( self_box) plot_start_self_box_points = util.transform_points( transformation, plot_start_self_box_points) plot_end_self_box_points = util.transform_points( transformation, plot_end_self_box_points) util.plot_box_lines(ax, plot_start_self_box_points, plot_end_self_box_points, 'green') plot_ground_points = util.transform_points( transformation, plot_ground_points) ax.scatter(plot_ground_points[:, 0], plot_ground_points[:, 1], plot_ground_points[:, 2], c='orange', s=1)
def render_passes_CYCLES(depth_file_output, normal_file_output, albedo_file_output, matidx_file_output, glossdir_file_output, args, rot_angles_list, diag_length=1., subfolder_name='gt', output_format='exr'): scaling_factor_unit2scene = diag_length / 1. scaling_facotr_scene2unit = 1. / diag_length scene = bpy.context.scene ######### filename for output ############## if 'ShapeNetCore' not in args.obj: model_identifier = args.obj.split('/')[-1].split('.')[0] else: model_identifier = args.obj.split('/')[-3] fp = os.path.join(args.output_folder, subfolder_name, model_identifier) scene.render.image_settings.file_format = 'PNG' # set output format to .png # setup camera and render cam_init_location = (0.0 * scaling_factor_unit2scene, 0.5 * scaling_factor_unit2scene, 0.0 * scaling_factor_unit2scene) cam = get_default_camera() cam.location = cam_init_location cam.data.type = 'ORTHO' cam.data.ortho_scale = args.orth_scale * scaling_factor_unit2scene cam.data.clip_start = 0 cam.data.clip_end = 100 # a value that is large enough cam_constraint = cam.constraints.new(type='TRACK_TO') cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' cam_constraint.up_axis = 'UP_Y' b_empty = get_lookat_target(cam) cam_constraint.target = b_empty # track to a empty object at the origin # setup light sun_lamp = setup_sunlamp(b_empty) for xyz_angle in rot_angles_list: # rotate camera euler_rot_mat = euler2mat(radians(xyz_angle[0]), radians(xyz_angle[1]), radians(xyz_angle[2]), 'sxyz') new_cam_location = np.dot(euler_rot_mat, np.array(cam_init_location)) cam.location = new_cam_location # the sun lamp follows sun_lamp.location = new_cam_location scene.render.filepath = fp + '-rotx=%.2f_roty=%.2f_rotz=%.2f'%(xyz_angle[0], xyz_angle[1], xyz_angle[2]) depth_file_output.file_slots[0].path = scene.render.filepath + "_depth" normal_file_output.file_slots[0].path = scene.render.filepath + "_normal" albedo_file_output.file_slots[0].path = scene.render.filepath + "_albedo" matidx_file_output.file_slots[0].path = scene.render.filepath + "_matidx" glossdir_file_output.file_slots[0].path = scene.render.filepath + "_glossdir" # render and write out bpy.ops.render.render(write_still=True, animation=False) # render still depth_arr, hard_mask_arr = util.read_depth_and_get_mask(scene.render.filepath + "_depth0001.exr", depth_scaling_factor=scaling_facotr_scene2unit) normal_arr = util.read_normal(scene.render.filepath + "_normal0001.exr", mask_arr=hard_mask_arr) # in CYCLES, the normal is in world system, rotate it to camera system normal_cam_arr = util.transform_points(np.reshape(normal_arr, (-1, 3)), blender_camera_util.get_world2bcam_R_matrix_from_blender(cam)) normal_cam_arr = np.reshape(normal_cam_arr, normal_arr.shape) normal_arr = normal_cam_arr albedo_arr = util.read_exr_image(scene.render.filepath + "_albedo0001.exr") matidx_arr = util.read_exr_image(scene.render.filepath + "_matidx0001.exr")[:,:,0] glossdir_arr = util.read_exr_image(scene.render.filepath + "_glossdir0001.exr") # and the clip value range depth_arr = np.clip(depth_arr, a_min=0, a_max=1) normal_arr = np.clip(normal_arr, a_min=-1, a_max=1) albedo_arr = np.clip(albedo_arr, a_min=0, a_max=1) glossdir_arr = np.clip(glossdir_arr, a_min=0, a_max=1) # roughness_arr = assemble_roughness_map(matidx_arr) # write out passes if output_format == 'exr': util.write_exr_image(depth_arr, scene.render.filepath + "_depth.exr") #util.write_exr_image(xyz_sworld_arr, scene.render.filepath + "_wxyz.exr") util.write_exr_image(normal_arr, scene.render.filepath + "_normal.exr") #util.write_exr_image(normal_sworld_arr, scene.render.filepath + "_wnormal.exr") util.write_exr_image(albedo_arr, scene.render.filepath + "_albedo.exr") util.write_exr_image(hard_mask_arr, scene.render.filepath + "_mask.exr") util.write_exr_image(glossdir_arr, scene.render.filepath + "_glossdir.exr") util.write_exr_image(roughness_arr, scene.render.filepath + "_roughness.exr") elif output_format == 'png': depth_arr = np.array(depth_arr*255, dtype=np.uint8) depth_pil = Image.fromarray(depth_arr) depth_pil.save(scene.render.filepath + "_depth.png") normal_arr = np.array((normal_arr+1)/2.*255, dtype=np.uint8) normal_pil = Image.fromarray(normal_arr) normal_pil.save(scene.render.filepath + "_normal.png") albedo_arr = np.array(albedo_arr*255, dtype=np.uint8) albedo_pil = Image.fromarray(albedo_arr) albedo_pil.save(scene.render.filepath + "_albedo.png") hard_mask_arr = np.array(hard_mask_arr*255, dtype=np.uint8) mask_pil = Image.fromarray(hard_mask_arr) mask_pil.save(scene.render.filepath + "_mask.png") glossdir_arr = np.array(glossdir_arr*255, dtype=np.uint8) glossdir_pil = Image.fromarray(glossdir_arr) glossdir_pil.save(scene.render.filepath + "_glossdir.png") roughness_arr = np.array(roughness_arr*255, dtype=np.uint8) roughness_pil = Image.fromarray(roughness_arr) roughness_pil.save(scene.render.filepath + "_roughness.png") # remove renderings #os.remove(scene.render.filepath+'.png') os.remove(scene.render.filepath + "_normal0001.exr") os.remove(scene.render.filepath + "_depth0001.exr") os.remove(scene.render.filepath + "_albedo0001.exr") os.remove(scene.render.filepath + "_matidx0001.exr") os.remove(scene.render.filepath + "_glossdir0001.exr")
def scan_point_cloud(depth_file_output, normal_file_output, albedo_file_output, matidx_file_output, args): scene = bpy.context.scene ######### filename for output ############## if 'ShapeNetCore' not in args.obj: model_identifier = args.obj.split('/')[-1].split('.')[0] correct_normal = False else: model_identifier = args.obj.split('/')[-3] correct_normal = True fp = os.path.join(args.output_folder, model_identifier) scene.render.image_settings.file_format = 'PNG' # set output format to .png # scan shapenet shape into point cloud with features all_points_normals_colors_mindices = None for i in range(0, 3): cam = get_default_camera() if i == 1: cam_init_location = (0, 0, 0.5) else: cam_init_location = (0, 0.5, 0) cam.data.type = 'ORTHO' cam.data.ortho_scale = args.orth_scale cam.data.clip_start = 0 cam.data.clip_end = 100 # a value that is large enough cam_constraint = cam.constraints.new(type='TRACK_TO') cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' cam_constraint.up_axis = 'UP_Y' b_empty = get_lookat_target(cam) cam_constraint.target = b_empty # track to a empty object at the origin for rot_angle in range(0, 359, 20): if i == 0: xyz_angle = [rot_angle, 0, 0] elif i == 1: xyz_angle = [0, rot_angle, 0] elif i == 2: xyz_angle = [0, 0, rot_angle] # rotate camera euler_rot_mat = euler2mat(radians(xyz_angle[0]), radians(xyz_angle[1]), radians(xyz_angle[2]), 'sxyz') new_cam_location = np.dot(euler_rot_mat, np.array(cam_init_location)) cam.location = new_cam_location scene.render.filepath = fp + '-rotx=%.2f_roty=%.2f_rotz=%.2f' % ( xyz_angle[0], xyz_angle[1], xyz_angle[2]) depth_file_output.file_slots[ 0].path = scene.render.filepath + "_depth" normal_file_output.file_slots[ 0].path = scene.render.filepath + "_normal" albedo_file_output.file_slots[ 0].path = scene.render.filepath + "_albedo" matidx_file_output.file_slots[ 0].path = scene.render.filepath + "_matidx" # render and write out bpy.ops.render.render(write_still=True) # render still depth_arr, hard_mask_arr = util.read_depth_and_get_mask( scene.render.filepath + "_depth0001.exr") normal_arr = util.read_and_correct_normal( scene.render.filepath + "_normal0001.exr", correct_normal=correct_normal, mask_arr=hard_mask_arr) albedo_arr = util.read_exr_image(scene.render.filepath + "_albedo0001.exr") matidx_arr = util.read_exr_image(scene.render.filepath + "_matidx0001.exr")[:, :, 0] # and the clip value range depth_arr = np.clip(depth_arr, a_min=0, a_max=1) normal_arr = np.clip(normal_arr, a_min=-1, a_max=1) albedo_arr = np.clip(albedo_arr, a_min=0, a_max=1) # process renderings to get the point cloud xyz_arr = util.get_3D_points_from_ortho_depth( depth_arr, args.orth_scale) xyz_normal_rgb_midx = np.reshape( np.concatenate([ xyz_arr, normal_arr, albedo_arr, np.expand_dims(matidx_arr, -1) ], axis=-1), (-1, 10)) xyz_normal_rgb_midx = util.remove_bg_points(xyz_normal_rgb_midx) # transform from depth to 3D world point cloud RT_bcam2world = blender_camera_util.get_bcam2world_RT_matrix_from_blender( cam) # matrix for switching back axis of the obj file when output xyz_normal_rgb_midx[:, :3] = util.transform_points( xyz_normal_rgb_midx[:, :3], np.dot(R_axis_switching_BtoS, RT_bcam2world)) xyz_normal_rgb_midx[:, 3:6] = util.transform_points( xyz_normal_rgb_midx[:, 3:6], np.dot(R_axis_switching_BtoS, RT_bcam2world)) if all_points_normals_colors_mindices is None: all_points_normals_colors_mindices = xyz_normal_rgb_midx else: all_points_normals_colors_mindices = np.concatenate( [all_points_normals_colors_mindices, xyz_normal_rgb_midx], axis=0) # remove renderings os.remove(scene.render.filepath + '.png') os.remove(scene.render.filepath + "_normal0001.exr") os.remove(scene.render.filepath + "_depth0001.exr") os.remove(scene.render.filepath + "_albedo0001.exr") os.remove(scene.render.filepath + "_matidx0001.exr") return all_points_normals_colors_mindices
if not os.path.exists(vox_mat_filename): print('Voxelization file not exist, skip!') bpy.ops.wm.quit_blender() vox_mesh = util.mesh_from_voxels( vox_mat_filename, int(256 / args.vox_resolution)) # already diagonal=1, center at zero #vox_mesh.export('vox_mesh.obj') # read in segmentation points and labels # normalize import shapenet_part_io as spart if not spart.segmentation_exists(cls_id, modelname): print('Segmentation not exist, skip!') bpy.ops.wm.quit_blender() seg_points_labels, num_label = spart.read_pts_label_data(cls_id, modelname) seg_points_labels[:, :3] = util.transform_points( seg_points_labels[:, :3], util.transformation_ShapeNet_v1tov2) trans_v, scale_f = util.pc_normalize(seg_points_labels[:, :3], norm_type=args.normalization_mode) seg_points_labels[:, :3] = seg_points_labels[:, :3] + trans_v seg_points_labels[:, :3] = seg_points_labels[:, :3] * scale_f #util.write_ply(seg_points_labels[:, :3], 'seg_points.ply') blender_util.clear_scene_objects() depth_file_output, normal_file_output, albedo_file_output, matidx_file_output = blender_util.rendering_pass_setup( args) # this axis conversion does not change the data in-place bpy.ops.import_scene.obj(filepath=args.obj, use_smooth_groups=False, use_split_objects=False, use_split_groups=False)