down_factor = args.downsample_factor intrinsic /= down_factor intrinsic[2, 2] = 1 # Only use pose 0 for 1-th frame for alignment. # DO NOT use other gt poses here T_cam_to_world = gt_poses[0] T_gt = [] T_est = [] for i in range(args.start_idx, args.end_idx + 1): print('loading frame {}'.format(i)) depth = o3d.io.read_image('{}/{}.png'.format(depth_path, i)) depth = np.asarray(depth) / depth_scale depth = depth[::down_factor, ::down_factor] vertex_map = transforms.unproject(depth, intrinsic) color_map = np.asarray(o3d.io.read_image('{}/{}.png'.format(rgb_path, i))).astype(float) / 255.0 color_map = color_map[::down_factor, ::down_factor] normal_map = np.load('{}/{}.npy'.format(normal_path, i)) normal_map = normal_map[::down_factor, ::down_factor] if i > 1: print('Frame-to-model icp') T_world_to_cam = np.linalg.inv(T_cam_to_world) T_world_to_cam = icp(m.points[::down_factor], m.normals[::down_factor], vertex_map, normal_map, intrinsic,
intrinsic_struct = o3d.io.read_pinhole_camera_intrinsic('intrinsics.json') intrinsic = np.array(intrinsic_struct.intrinsic_matrix) depth_path = os.path.join(args.path, 'depth') normal_path = os.path.join(args.path, 'normal') # TUM convention -- uint16 value to float meters depth_scale = 5000.0 # Source: load depth and rescale to meters source_depth = o3d.io.read_image('{}/{}.png'.format( depth_path, args.source_idx)) source_depth = np.asarray(source_depth) / depth_scale # Unproject depth to vertex map (H, W, 3) and reshape to a point cloud (H*W, 3) source_vertex_map = transforms.unproject(source_depth, intrinsic) source_points = source_vertex_map.reshape((-1, 3)) # Load normal map (H, W, 3) and reshape to point cloud normals (H*W, 3) source_normal_map = np.load('{}/{}.npy'.format(normal_path, args.source_idx)) source_normals = source_normal_map.reshape((-1, 3)) # Similar preparation for target, but keep the image format for projective association target_depth = o3d.io.read_image('{}/{}.png'.format( depth_path, args.target_idx)) target_depth = np.asarray(target_depth) / depth_scale target_vertex_map = transforms.unproject(target_depth, intrinsic) target_normal_map = np.load('{}/{}.npy'.format(normal_path, args.target_idx))