def preprocess_and_convert_to_numpy(nifti_scan: nib.Nifti1Image, nifti_mask: nib.Nifti1Image) -> list: """ Convert scan and label to numpy arrays and perform preprocessing Return: Tuple(np.array, np.array) """ np_scan = nifti_scan.get_fdata() np_label = nifti_mask.get_fdata() nifti_mask.uncache() nifti_scan.uncache() np_scan = preprocess_scan(np_scan) np_label = rotate_label(np_label) assert np_scan.shape == np_label.shape return np_scan, np_label
def analyze_kidney(seg:Nifti1Image,align=False)->list: '''每个肾脏在数组中的体素点分布范围 args: seg:Nifti1Image 体素点标注 align:bool 两肾选框是否对其 return: list:list[tuple(slice,N)...] 两肾在各个维度上的范围切片对象,N为维度 centers:ndarray 两肾质心坐标 ''' data=seg.get_fdata() indexs=argwhere(data>0) kmeans=KMeans(n_clusters=2,precompute_distances=True).fit(indexs) if align: ranges=[] for center in kmeans.cluster_centers_: d,w,h=center d_min,d_max=d-1,d+1 w_min,w_max=w-1,w+1 h_min,h_max=h-1,h+1 while d_min >0: if KINDEY not in data[d_min,:,:]: break d_min-=1 while w_min >0: if KINDEY not in data[:,w_min,:]: break w_min-=1 while h_min >0: if KINDEY not in data[:,:,h_min]: break h_min-=1 while d_max < data.shape[0]: if KINDEY not in data[d_max,:,:]: break d_max+=1 while w_max < data.shape[1]: if KINDEY not in data[:,w_max,:]: break w_max+=1 while h_max <data.shape[2]: if KINDEY not in data[:,:,h_max]: break h_max+=1 ranges.append((slice(d_min,d_max),slice(w_min,w_max),slice(h_min,h_max))) return ranges,array(kmeans.cluster_centers_,dtype=int) else: labels=kmeans.predict(indexs) kindey1=indexs[labels==0] kindey2=indexs[labels==1] d_min_1,w_min_1,h_min_1=kindey1.min(axis=0) d_max_1,w_max_1,h_max_1=kindey1.max(axis=0) d_min_2,w_min_2,h_min_2=kindey2.min(axis=0) d_max_2,w_max_2,h_max_2=kindey2.max(axis=0) return [ (slice(d_min_1,d_max_1),slice(w_min_1,w_max_1),slice(h_min_1,h_max_1)), (slice(d_min_2,d_max_2),slice(w_min_2,w_max_2),slice(h_min_2,h_max_2)) ],array(kmeans.cluster_centers_,dtype=int)
def decompose(img: nib.Nifti1Image, name: str, orientation: str, scale: int, pad_crop: list): """ Splits a nibabel Nifti1Image into separate slices given orientation to make slices on """ # Force RAS+ orientation #img = nib.as_closest_canonical(img) # get orient code orient_code = {'S': 0, 'C': 1, 'T': 2}[orientation] # get volume data data = img.get_fdata() dims = data.shape # scale data data = np.round((data / scale) * 65535) # write images to disk for n in range(dims[orient_code]): output_filename = name + '_slice-{:0>4d}.png'.format(n) data_slice = np.flip(simple_slice(data, n, orient_code).T.astype('uint16'), axis=0) if pad_crop: # do padding/crop if enabled data_slice = resize_slice(data_slice, pad_crop) # write slice to file imwrite(output_filename, data_slice)
def _clear_steady_state(img: Nifti1Image, confounds: pd.DataFrame, drop_trs: Optional[int] = None): """ Remove steady state volumes """ if not drop_trs: steady_cols = [c for c in confounds.columns if "steady" in c] if steady_cols: steady_df = confounds[steady_cols].sum(axis=1).diff() steady_ind = np.where(steady_df < 0)[0] drop_trs = int(steady_ind[0]) else: raise ValueError( "drop_trs not supplied and steady_state volumes not" " found in confounds dataframe!") # Construct new image object new_conf = confounds.loc[drop_trs:, :] new_img = nimg.new_img_like(img, img.get_fdata(caching="unchanged")[:, :, :, drop_trs:], copy_header=True) return (new_img, new_conf)
def _get_vol_index(img: Nifti1Image, inds: npt.ArrayLike) -> Nifti1Image: return nimg.new_img_like( img, img.get_fdata(caching="unchanged")[:, :, :, inds], img.affine, copy_header=True, )
def _image_to_signals(img: Nifti1Image) -> npt.ArrayLike: """ Transform a Nibabel image into an [NVOX x T] array """ nvox = np.prod(img.shape[:-1]) return img.get_fdata(caching="unchanged").reshape((nvox, -1))
def fix_mosaic(mosaic_nifti: nib.Nifti1Image, acq_dims: tuple): """ Fixes incorrectly-processed NIFTIs by dcm2niix where they still remain mosaics due to a lack of NumberOfImagesInMosaic header. This function implements a hack to :param mosaic_nifti: the nifti image object that needs to be fixed. Should be of shape m x n x 1 the sliding window algorithm :param acq_dims: the (row, col) acquisition dimensions for rows and columns from the AcquisitionMatrix DICOM field. Used to determine the appropriate kernel size to use for the sliding window algorithm :return: new_nifti; a 3D NIFTI that is no longer mosaic """ acq_rows, acq_cols = acq_dims # Get the shape and array values of the mosaic (flatten the latter into a 2D array) img_shape = mosaic_nifti.shape # noinspection PyTypeChecker img_data = np.rot90(np.squeeze(mosaic_nifti.get_fdata())) # If this is a square, and the rows perfectly divides the mosaic if img_shape[0] == img_shape[1] and img_shape[0] % acq_rows == 0: nsplits_w, nsplits_h = img_shape[0] / acq_rows, img_shape[0] / acq_rows kernel_w, kernel_h = acq_rows, acq_rows # If this is a square, and the cols perfectly divides the mosaic elif img_shape[0] == img_shape[1] and img_shape[0] % acq_cols == 0: nsplits_w, nsplits_h = img_shape[0] / acq_cols, img_shape[0] / acq_cols kernel_w, kernel_h = acq_cols, acq_cols # If this is a rectangle elif all([img_shape[0] != img_shape[1], img_shape[0] % acq_rows == 0, img_shape[1] % acq_cols == 0 ]): nsplits_w, nsplits_h = img_shape[0] / acq_rows, img_shape[1] / acq_cols kernel_w, kernel_h = acq_rows, acq_cols else: return # Initialize the data that will house the split mosaic into slices new_img_data = np.zeros(shape=(kernel_w, kernel_h, int(nsplits_w * nsplits_h))) slice_num = 0 # Sliding Window algorithm for ii in range(int(nsplits_w)): for jj in range(int(nsplits_h)): x_start, x_end = ii * kernel_w, (ii + 1) * kernel_w y_start, y_end = jj * kernel_h, (jj + 1) * kernel_h img_slice = img_data[x_start:x_end, y_start:y_end] # Disregard slices that are only zeros if np.nanmax(img_slice) == 0: continue # Otherwise update the zeros array at the appropriate slice with the new values else: new_img_data[:, :, slice_num] = img_slice slice_num += 1 # Filter off slices that had only zeros new_img_data = np.rot90(new_img_data[:, :, 0:slice_num], 3) new_nifti = image.new_img_like(mosaic_nifti, new_img_data, affine=mosaic_nifti.affine) return new_nifti
def get_data(img:Nifti1Image)->ndarray: '''获取影像数据 args: img:Nifti1Image return: data:ndarray size[d,w,h] ''' return img.get_fdata()
def analyze_cubes(seg:Nifti1Image)->list: '''细分割(每种前景)体素点在数组中的分布范围 args: seg:Nifti1Image 体素点标注 return: list:list[tuple(slice,N)...] 每种前景对象在各个维度上的范围切片对象,N为维度 ''' return ndimage.find_objects(seg.get_fdata().astype(int))
def robust_set_limits_in_mask(data_img: nib.Nifti1Image, mask_img: nib.Nifti1Image) -> dict[str, float]: plot_params: dict[str, float] = dict() mask = np.asanyarray(mask_img.dataobj).astype(bool) data = data_img.get_fdata()[mask] plot_params = robust_set_limits(data.reshape(-1), plot_params) return plot_params
def analyze_cube(seg:Nifti1Image)->tuple: '''粗分割(前景)体素点在数组中的分布范围 args: seg:Nifti1Image 体素点标注 return: slice_tuple:tuple(slice,N) 各个维度上的范围切片对象,N为维度 ''' indexs=argwhere(seg.get_fdata()!=0) return tuple(slice(start,end+1) for start,end in zip(indexs.min(axis=0),indexs.max(axis=0)))
def analyze_kidney_center(seg:Nifti1Image)->list: '''每个肾脏在3D图像中坐标的质心 args: seg:Nifti1Image 体素点标注 return: list:[[d,w,h],[d,w,h]] ''' indexs=argwhere(seg.get_fdata()>0) kmeans=KMeans(n_clusters=2,precompute_distances=True).fit(indexs) return array(kmeans.cluster_centers_,dtype=int)
def from_image(cls, image: Nifti1Image, space: Space, ignore_affine=False): """Construct a bounding box from a nifti image""" bounds = cls._determine_bounds(image.get_fdata()) if bounds is None: return None if ignore_affine: target_space = None else: bounds = np.dot(image.affine, bounds) target_space = space return cls(point1=bounds[:3, 0], point2=bounds[:3, 1], space=target_space)
def analyze_mean_std(img:Nifti1Image): '''分析影像体素点的均值和方差 args: img:Nifti1Image 待分析影像 return: mean:float 均值 std:float 方差 ''' data=img.get_fdata() mean=data.mean() std=data.std() return mean,std
def resample_image(img:Nifti1Image,space_target:ndarray=array([3.22,1.62,1.62]))->Nifti1Image: '''对影像数据进行重采样(类型为Nifti1Image) args: img:Nifti1Image 原始数组 space_target 目标间距 return: data:Nifti1Image 重采样后的影像数据 ''' aff=-diag([*space_target,-1]) aff=matmul(array([ [0,0,1,0], [0,1,0,0], [1,0,0,0], [0,0,0,1], ]),aff) scales=get_spacing(img)/space_target data=img.get_fdata() resample=ndimage.zoom(data,scales,mode="reflect") return Nifti1Image(resample,aff)
def resample_segmentation(seg:Nifti1Image,space_target:ndarray=array([3.22,1.62,1.62]))->Nifti1Image: '''对分割标注数据进行重采样(类型为Nifti1Image) args: img:Nifti1Image 原始影像 space_target 目标间距 return: data:Nifti1Image 重采样后的分割标注数据 ''' aff=-diag([*space_target,-1]) aff=matmul(array([ [0,0,1,0], [0,1,0,0], [1,0,0,0], [0,0,0,1], ]),aff) scales=get_spacing(seg)/space_target data=seg.get_fdata() resample=ndimage.interpolation.zoom(data,scales,order=1,mode="reflect") resample=around(resample,0).astype(int) return Nifti1Image(resample,aff)
def weight(echo: nib.Nifti1Image, TE: float): data = echo.get_fdata() mean = data[..., -n_vols:].mean(axis=-1) std = data[..., -n_vols:].std(axis=-1) return TE * mean / std