def test_brainMaskVolume(self): b0Img = read_siemens_scil_b0() b0Data = np.squeeze(b0Img.get_data()) voxelSizes = b0Img.header.get_zooms()[:3] voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes) brainMask = createBrainMaskFromb0Data(b0Data)[1] brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask) self.assertTrue(brainVolume > 500 and brainVolume < 1500)
First import the necessary modules: """ import numpy as np import nibabel as nib """ Download and read the data for this tutorial. The scil_b0 dataset contains different data from different companies and models. For this example, the data comes from a 1.5 tesla Siemens MRI. """ from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0 fetch_scil_b0() img = read_siemens_scil_b0() data = np.squeeze(img.get_data()) """ ``img`` contains a nibabel Nifti1Image object. Data is the actual brain data as a numpy ndarray. Segment the brain using dipy's mask module. ``median_otsu`` returns the segmented brain data and a binary mask of the brain. It is possible to fine tune the parameters of ``median_otsu`` (``median_radius`` and ``num_pass``) if extraction yields incorrect results but the default parameters work well on most volumes. For this example, we used 2 as ``median_radius`` and 1 as ``num_pass`` """ from dipy.segment.mask import median_otsu
First import the necessary modules: """ import numpy as np import nibabel as nib """ Download and read the data for this tutorial. The scil_b0 dataset contains different data from different companies and models. For this example, the data comes from a 1.5 tesla Siemens MRI. """ from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0 fetch_scil_b0() img = read_siemens_scil_b0() data = np.squeeze(img.get_data()) """ ``img`` contains a nibabel Nifti1Image object. Data is the actual brain data as a numpy ndarray. Segment the brain using dipy's mask module. ``median_otsu`` returns the segmented brain data and a binary mask of the brain. It is possible to fine tune the parameters of ``median_otsu`` (``median_radius`` and ``num_pass``) if extraction yields incorrect results but the default parameters work well on most volumes. For this example, we used 2 as ``median_radius`` and 1 as ``num_pass`` """
def load_data(name="sherbrooke", a=.5, flat=True): """ Loads the raw data :param name: str options: "sherbrooke", "hardi", "siemens" (all MRI) "j0126" (EM) :param a: float specifies the angle of the used MRI data 0 <= a < 1. :param flat: bool True: Transform data in the format used along with spark :return: either: new_data: flatted raw data or: data: 3d raw data sigma: noise estimation """ if name == "sherbrooke": img, gtab = read_sherbrooke_3shell() elif name == "hardi": img, gtab = read_stanford_hardi() elif name == "siemens": img = read_siemens_scil_b0() elif name == "j0126": data = np.load(path_to_folder + "/data/j0126_sample.npy") else: raise Exception() if not "j0126" in name: data = img.get_data() d_type = np.int else: d_type = np.uint8 print("Original shape:", data.shape) z = int((data.shape[2]-sh[2])/2) if not "j0126" in name: a = int(data.shape[3]*a) data = data[: sh[0], : sh[1], z: z+sh[2], a].astype(np.int32) else: data = data[: sh[0], : sh[1], z: z+sh[2]].astype(np.int32) sigma = noise_estimate.estimate_sigma(data, N=4) print(np.mean(sigma)) if flat: if len(sigma.shape) == 1: sigma = np.ones_like(data)*sigma[0] new_data = [] for x in range(0, data.shape[0], DB): for y in range(0, data.shape[1], DB): for z in range(0, data.shape[2], DB): data_block = np.zeros([DB, DB, DB], dtype=d_type) sigma_block = np.ones([DB, DB, DB], dtype=d_type) d_sh = data[x: x+DB, y: y+DB, z: z+DB].shape data_block[0: d_sh[0], 0: d_sh[1], 0: d_sh[2]] = data[x: x+DB, y: y+DB, z: z+DB] sigma_block[0: d_sh[0], 0: d_sh[1], 0: d_sh[2]] = sigma[x: x+DB, y: y+DB, z: z+DB] new_data.append([_coordinate_to_index([x, y, z]), [data_block, sigma_block]]) return new_data else: return data, sigma