Example #1
0
def test_median_otsu():
    fname = get_data('S0_10')
    img = nib.load(fname)
    data = img.get_data()
    data = np.squeeze(data)
    dummy_mask = data > data.mean()
    data_masked, mask = median_otsu(data, median_radius=3, numpass=2,
                                    autocrop=False, vol_idx=None,
                                    dilate=None)
    assert_equal(mask.sum() < dummy_mask.sum(), True)
    data2 = np.zeros(data.shape + (2,))
    data2[..., 0] = data
    data2[..., 1] = data
    data2_masked, mask2 = median_otsu(data2, median_radius=3, numpass=2,
                                      autocrop=False, vol_idx=[0, 1],
                                      dilate=None)
    assert_equal(mask.sum() == mask2.sum(), True)

    _, mask3 = median_otsu(data2, median_radius=3, numpass=2,
                                      autocrop=False, vol_idx=[0, 1],
                                      dilate=1)
    assert_equal(mask2.sum() < mask3.sum(), True)

    _, mask4 = median_otsu(data2, median_radius=3, numpass=2,
                                      autocrop=False, vol_idx=[0, 1],
                                      dilate=2)
    assert_equal(mask3.sum() < mask4.sum(), True)
Example #2
0
def test_median_otsu():
    fname = get_fnames('S0_10')
    img = nib.load(fname)
    data = img.get_data()
    data = np.squeeze(data.astype('f8'))
    dummy_mask = data > data.mean()
    data_masked, mask = median_otsu(data, median_radius=3, numpass=2,
                                    autocrop=False, vol_idx=None,
                                    dilate=None)
    assert_equal(mask.sum() < dummy_mask.sum(), True)
    data2 = np.zeros(data.shape + (2,))
    data2[..., 0] = data
    data2[..., 1] = data

    data2_masked, mask2 = median_otsu(data2, median_radius=3, numpass=2,
                                      autocrop=False, vol_idx=[0, 1],
                                      dilate=None)
    assert_almost_equal(mask.sum(), mask2.sum())

    _, mask3 = median_otsu(data2, median_radius=3, numpass=2,
                           autocrop=False, vol_idx=[0, 1],
                           dilate=1)
    assert_equal(mask2.sum() < mask3.sum(), True)

    _, mask4 = median_otsu(data2, median_radius=3, numpass=2,
                           autocrop=False, vol_idx=[0, 1],
                           dilate=2)
    assert_equal(mask3.sum() < mask4.sum(), True)
Example #3
0
def test_median_otsu():
    fname = get_fnames('S0_10')
    data = load_nifti_data(fname)
    data = np.squeeze(data.astype('f8'))
    dummy_mask = data > data.mean()
    data_masked, mask = median_otsu(data, median_radius=3, numpass=2,
                                    autocrop=False, vol_idx=None,
                                    dilate=None)
    assert_equal(mask.sum() < dummy_mask.sum(), True)
    data2 = np.zeros(data.shape + (2,))
    data2[..., 0] = data
    data2[..., 1] = data

    data2_masked, mask2 = median_otsu(data2, median_radius=3, numpass=2,
                                      autocrop=False, vol_idx=[0, 1],
                                      dilate=None)
    assert_almost_equal(mask.sum(), mask2.sum())

    _, mask3 = median_otsu(data2, median_radius=3, numpass=2,
                           autocrop=False, vol_idx=[0, 1],
                           dilate=1)
    assert_equal(mask2.sum() < mask3.sum(), True)

    _, mask4 = median_otsu(data2, median_radius=3, numpass=2,
                           autocrop=False, vol_idx=[0, 1],
                           dilate=2)
    assert_equal(mask3.sum() < mask4.sum(), True)

    # For 4D volumes, can't call without vol_idx input:
    assert_raises(ValueError, median_otsu, data2)
Example #4
0
def exampleDipy():
	
    # example obtained from: http://nipy.org/dipy/examples_built/syn_registration_2d.html
    import ssl
    if hasattr(ssl, '_create_unverified_context'):
        ssl._create_default_https_context = ssl._create_unverified_context
    from dipy.data import fetch_stanford_hardi, read_stanford_hardi
    fetch_stanford_hardi()
    nib_stanford, gtab_stanford = read_stanford_hardi()
    stanford_b0 = np.squeeze(nib_stanford.get_data())[..., 0]

    from dipy.data.fetcher import fetch_syn_data, read_syn_data
    fetch_syn_data()
    nib_syn_t1, nib_syn_b0 = read_syn_data()
    syn_b0 = np.array(nib_syn_b0.get_data())

    from dipy.segment.mask import median_otsu

    stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4)
    syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

    static = stanford_b0_masked
    static_affine = nib_stanford.affine
    moving = syn_b0_masked
    moving_affine = nib_syn_b0.affine

    pre_align = np.array(
        [[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
         [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
         [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
         [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

    from dipy.align.imaffine import AffineMap
    affine_map = AffineMap(pre_align,
                           static.shape, static_affine,
                           moving.shape, moving_affine)

    resampled = affine_map.transform(moving)

    metric = CCMetric(3)

    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           pre_align)

    warped_moving = mapping.transform(moving)

    for slice in range(41 - 12, 41 + 13):
        regtools.overlay_slices(static, resampled, slice, 1, 'Static',
                                'Pre Moving',
                                'GIFexample1/' + str(slice) + 'T1pre.png')
        regtools.overlay_slices(static, warped_moving, slice, 1, 'Static',
                                'Post moving',
                                'GIFexample1/' + str(slice) + 'T1post.png')
Example #5
0
def main(dti_file, bvals_file, bvecs_file, b_ss=1000):

    # Load the image data
    nii = nib.load(dti_file)
    img_data = nii.get_data()

    # Read in the b-shell values and gradient directions
    bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file)

    # Boolean array to identify entries with either b = 0 or b = b_ss
    bvals_eq_0_b_ss = (bvals == 0) | (bvals == b_ss)

    # Extract info needed to run single-compartment dti model
    dti_bvals = bvals[bvals_eq_0_b_ss].copy()
    dti_bvecs = bvecs[bvals_eq_0_b_ss].copy()
    dti_img_data = img_data[:, :, :, bvals_eq_0_b_ss].copy()

    # Compute gradient table
    grad_table = gradient_table(dti_bvals, dti_bvecs)

    # Extract brain so we don't fit the background
    brain_img_data, brain_mask = median_otsu(dti_img_data, 2, 1)

    # Run the dti model and fit it to the brain extracted image data
    ten_model = dti.TensorModel(grad_table)
    ten_fit = ten_model.fit(brain_img_data)
Example #6
0
def test_median_otsu_flow():
    with nib.tmpdirs.InTemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        median_otsu_flow(data_path, out_dir, save_masked, median_radius,
                         numpass, autocrop, vol_idx, dilate)

        masked, mask = median_otsu(volume, median_radius, numpass, autocrop,
                                   vol_idx, dilate)

        fname, _ = splitext(splitext(basename(data_path))[0])

        mask_fname = fname + '_mask.nii.gz'
        result_mask_data = nib.load(join(out_dir, mask_fname)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        masked_fname = fname + '_bet.nii.gz'
        result_masked_data = nib.load(join(out_dir, masked_fname)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
Example #7
0
def test_dwi_sh_conversion(data_dwi, gtab):
    rtol = 0.05
    atol = 1e-8

    b0_mask_crop, mask_crop = median_otsu(data_dwi,
                                          vol_idx=gtab.b0s_mask,
                                          autocrop=False)
    mask_crop = mask_crop[..., None]

    data_dwi_true = shm.normalize_data(data_dwi, gtab.b0s_mask)
    data_dwi_true *= mask_crop

    sh_order = 4

    data_sh = shm.dwi_to_sh(data_dwi_true,
                            gtab,
                            smooth=0.006,
                            mask=mask_crop,
                            sh_order=sh_order)
    data_dwi_reconst = shm.sh_to_dwi(data_sh,
                                     gtab,
                                     mask=mask_crop,
                                     add_b0=False)

    data_dwi_true = data_dwi_true[..., ~gtab.b0s_mask]

    print(
        np.isclose(data_dwi_true, data_dwi_reconst, rtol=rtol,
                   atol=atol)[mask_crop[..., 0]].astype(int).sum() /
        (data_dwi_true.shape[-1] * np.sum(mask_crop)))

    assert data_dwi_reconst.shape == data_dwi_true.shape
    assert np.isclose(data_dwi_true, data_dwi_reconst, rtol=rtol,
                      atol=atol).all()
Example #8
0
def measure_denoising(n_datasets, ip_file_path):
    data_ids = range(n_datasets)

    ip_ports, ips = dh.read_ip_file(ip_file_path)

    tf_cluster = dt.TfCluster(ip_ports[:])

    datasets = []
    masks = []
    for data_id in data_ids:
        data, bvals_path, bvecs_path = dh.download(data_id)

        datasets.append(data)
        gtab = dpg.gradient_table(bvals_path, bvecs_path, b0_threshold=10)
        mean_b0 = np.mean(data[..., gtab.b0s_mask], -1)
        _, mask = median_otsu(mean_b0,
                              4,
                              2,
                              False,
                              vol_idx=np.where(gtab.b0s_mask),
                              dilate=1)
        masks.append(mask)

    with tf.Session("grpc://%s" % tf_cluster.host) as sess:
        dt.parallel_denoise(sess, tf_cluster, datasets, masks, depth=1)
Example #9
0
def test_median_otsu_flow():
    with nib.tmpdirs.InTemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        median_otsu_flow(data_path, out_dir, save_masked, median_radius,
                         numpass, autocrop, vol_idx, dilate)

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        fname, _ = splitext(splitext(basename(data_path))[0])

        mask_fname = fname + '_mask.nii.gz'
        result_mask_data = nib.load(join(out_dir, mask_fname)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        masked_fname = fname + '_bet.nii.gz'
        result_masked_data = nib.load(join(out_dir, masked_fname)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
Example #10
0
def test_median_otsu_flow():
    with TemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        mo_flow = MedianOtsuFlow()
        mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked,
                             median_radius=median_radius, numpass=numpass,
                             autocrop=autocrop, vol_idx=vol_idx, dilate=dilate)

        mask_name = mo_flow.last_generated_outputs['out_mask']
        masked_name = mo_flow.last_generated_outputs['out_masked']

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        result_mask_data = nib.load(join(out_dir, mask_name)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        result_masked_data = nib.load(join(out_dir, masked_name)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
Example #11
0
def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
    colorfa = make_a_square(load_and_reorient(colorFA_file),
                            include_last_dim=False)
    b0 = make_a_square(load_and_reorient(b0_file)[:, :, :, 0])
    anat_mask = make_a_square(load_and_reorient(mask_file))

    # make a b0 sprite
    _, mask = median_otsu(b0)
    outb0 = create_sprite_from_tiles(b0, as_bytes=True)
    outb0['img_type'] = 'brainsprite'

    # make a colorFA sprite, masked by b0
    Q = make_a_square(colorfa, include_last_dim=False)
    Q[np.logical_not(mask)] = np.nan
    Q = np.moveaxis(Q, -2, -1)
    outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
    outcolorFA['img_type'] = 'brainsprite'

    # make an anat mask contour sprite
    outmask = create_sprite_from_tiles(
        make_a_square(anat_mask, include_last_dim=False))
    img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
    outmask['img'] = img

    return outb0, outcolorFA, outmask
Example #12
0
 def _brain_mask(self,
                 row,
                 median_radius=4,
                 numpass=1,
                 autocrop=False,
                 vol_idx=None,
                 dilate=10):
     brain_mask_file = self._get_fname(row, '_brain_mask.nii.gz')
     if self.force_recompute or not op.exists(brain_mask_file):
         b0_file = self._b0(row)
         mean_b0_img = nib.load(b0_file)
         mean_b0 = mean_b0_img.get_fdata()
         _, brain_mask = median_otsu(mean_b0,
                                     median_radius,
                                     numpass,
                                     autocrop,
                                     dilate=dilate)
         be_img = nib.Nifti1Image(brain_mask.astype(int),
                                  mean_b0_img.affine)
         nib.save(be_img, brain_mask_file)
         meta = dict(source=b0_file,
                     median_radius=median_radius,
                     numpass=numpass,
                     autocrop=autocrop,
                     vol_idx=vol_idx)
         meta_fname = self._get_fname(row, '_brain_mask.json')
         afd.write_json(meta_fname, meta)
     return brain_mask_file
Example #13
0
 def process(self, ds: Dataset = None, *args, **kwargs):
     median_radius = kwargs.get('median_radius', 5)
     num_pass = kwargs.get('num_pass', 4)
     brain, mask = median_otsu(ds.pixel_array,
                               median_radius=median_radius,
                               numpass=num_pass)
     return brain
Example #14
0
def auto_mask(data, raw_d=None, nskip=3, mask_bad_end_vols=False):
    from dipy.segment.mask import median_otsu
    mn = data[:, :, :, nskip:].mean(3)
    _, mask = median_otsu(mn, 3, 2) # oesteban: masked_data was not used
    mask = np.concatenate((
        np.tile(True, (data.shape[0], data.shape[1], data.shape[2], nskip)),
        np.tile(np.expand_dims(mask == 0, 3), (1, 1, 1, data.shape[3]-nskip))),
        axis=3)
    mask_vols = np.zeros((mask.shape[-1]), dtype=int)
    if mask_bad_end_vols:
        # Some runs have corrupt volumes at the end (e.g., mux scans that are stopped prematurely). Mask those too.
        # But... motion correction might have interpolated the empty slices such that they aren't exactly zero.
        # So use the raw data to find these bad volumes.
        # 2015.10.29 RFD: this caused problems with some non-mux EPI scans that (inexplicably)
        # have empty slices at the top of the brain. So we'll disable it for
        # now.
        if raw_d is None:
            slice_max = data.max(0).max(0)
        else:
            slice_max = raw_d.max(0).max(0)

        bad = np.any(slice_max == 0, axis=0)
        # We don't want to miss a bad volume somewhere in the middle, as that could be a valid artifact.
        # So, only mask bad vols that are contiguous to the end.
        mask_vols = np.array([np.all(bad[i:]) for i in range(bad.shape[0])])
    # Mask out the skip volumes at the beginning
    mask_vols[0:nskip] = True
    mask[..., mask_vols] = True
    brain = np.ma.masked_array(data, mask=mask)
    good_vols = np.logical_not(mask_vols)
    return brain, mask, good_vols
Example #15
0
 def process(self, tup):
     if tup.values[0] == 0:
         image_data = tup.values[1]
         _, mask = median_otsu(image_data, 4, 2, False, dilate=1)
         self.log("emitting mask with shap {} edge: {} center: {}".format(
             mask.shape, mask[0][0][0], mask[32, 32, 15]))
         self.emit(['reference', image_data, mask])
Example #16
0
def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
    colorfa = load_and_reorient(colorFA_file)
    b0 = load_and_reorient(b0_file)[:, :, :, 0]
    anat_mask = load_and_reorient(mask_file)

    N = max(*b0.shape[:2])

    # make a b0 sprite
    b0 = reshape3D(b0, N)
    _, mask = median_otsu(b0)
    outb0 = create_sprite_from_tiles(b0, as_bytes=True)
    outb0['img_type'] = 'brainsprite'

    # make a colorFA sprite, masked by b0
    Q = reshape4D(colorfa, N)
    Q[np.logical_not(mask)] = np.nan
    Q = np.moveaxis(Q, -2, -1)
    outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
    outcolorFA['img_type'] = 'brainsprite'

    # make an anat mask contour sprite
    outmask = create_sprite_from_tiles(reshape3D(anat_mask, N))
    img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
    outmask['img'] = img

    return outb0, outcolorFA, outmask
Example #17
0
 def process(self, ds: Dataset = None, *args, **kwargs):
     mode = kwargs.get('mode', 'crop')
     # if mode == 'crop':
     median_radius = kwargs.get('median_radius', 5)
     num_pass = kwargs.get('num_pass', 4)
     threshold = kwargs.get('threshold', THRESHOLD)
     window = kwargs.get('window', (5, 5))
     iterations = kwargs.get('iterations', 3)
     brain, mask = median_otsu(ds.pixel_array,
                               median_radius=median_radius,
                               numpass=num_pass)
     flatten = brain.reshape((-1))
     for i in range(flatten.shape[0]):
         if flatten[i] < threshold:
             flatten[i] = 0
     # ret, thresh = cv.threshold(brain, threshold, MAX_BIT, cv.THRESH_BINARY)
     thresh = flatten.reshape(brain.shape)
     kernel = np.ones(window, dtype=np.uint16)
     res = cv.morphologyEx(thresh,
                           cv.MORPH_OPEN,
                           kernel,
                           iterations=iterations)
     res = res.reshape((-1))
     original = brain.reshape((-1))
     for i in range(res.shape[0]):
         if res[i] == 0:
             original[i] = 0
     return original.reshape(brain.shape)
Example #18
0
 def process(self, img, **kwargs):
     n_components = kwargs.get('n_components', 3)
     numpass = kwargs.get('numpass', 5)
     median_radius = kwargs.get('median_radius', 10)
     if isinstance(img, Dataset):
         img = img.pixel_array
     w, h = img.shape[1], img.shape[0]
     img, _ = median_otsu(img, numpass=numpass, median_radius=median_radius)
     img = (img - np.min(img)) / (np.max(img) - np.min(img))
     x = img.reshape((-1, 1))
     model = GaussianMixture(n_components=n_components,
                             covariance_type='spherical')
     model.fit(x)
     labels = model.predict(x)
     # c_index = np.argmax(k_means.cluster_centers_.reshape((-1)))
     # flat = np.full(img.shape[0] * img.shape[1], 0, dtype=np.uint8)
     # flat[k_means.labels_ == c_index] = 1
     # mask = flat.reshape(img.shape)
     # k1 = np.ones((3, 3), np.uint16)
     # k2 = np.ones((5, 5), np.uint16)
     # mask = cv.erode(mask, k2, iterations=1)
     # mask = cv.dilate(mask, k1, iterations=1)
     # mask = cv.erode(mask, k2, iterations=2)
     # mask = cv.dilate(mask, k1, iterations=5)
     return labels.reshape((h, w))
Example #19
0
    def apply_mask(self):
        """
        If self.mask is not None, will mask the raw_data with the mask provided.
        If self.mask is None, median_otsu is used to generate those files.
        """
        if self.mask == None:
            print("Generating mask with median_otsu.")
            raw_data = self.raw_data.get_data()
            masked_data, mask = median_otsu(raw_data, 2,2)

            #Update the instance
            self.data = nib.nifti1.Nifti1Image(masked_data.astype(np.float32), self.raw_data.get_affine())
            self.mask = nib.nifti1.Nifti1Image(mask.astype(np.int_), self.data.get_affine())
        else:
            print("Masking data with provided mask.")
            raw_data = self.raw_data.get_data()
            mask_data = self.mask.get_data()
            masked_data = raw_data * mask_data

            #Update the instance
            self.data = nib.nifti1.Nifti1Image(masked_data.astype(np.float32), self.raw_data.get_affine())

        #Regenerate an average B0 image
        values = np.array(self.gradient_table.bvals)
        ii = np.where(values == self.gradient_table.bvals.min())[0]
        self.b0_average = np.mean(self.data.get_data()[:,:,:,ii], axis=3)
    def load_mask(self, make_mask):
        if make_mask is None:
            # empty mask
            self.mask = np.zeros_like(self.volume)
        else:
            # use dipy to isolate brain as an initial mask
            if make_mask.lower() == 'auto':
                print('creating brain mask...')
                _, self.mask = median_otsu(self.volume, median_radius=1,
                                           numpass=2)
                print('done.')
            elif make_mask.lower() == 'none':
                # empty mask
                self.mask = np.zeros_like(self.volume)
            else:
                print('reading mask file...')

                # same initial axes swap as for volume
                self.mask = nib.load(make_mask).get_fdata()
                print('done.')

        if config['remove small clusters'] and (np.sum(self.mask) > 0):
            # only keep biggest data cluster (brain) - removes small clusters
            label_image = label(self.mask)
            max_label = sorted([[np.sum(label_image == val), val]
                                for val in np.unique(label_image)[1:]])[-1][1]
            self.mask[label_image != max_label] = 0
Example #21
0
def mask(d, raw_d=None, nskip=3):
    mn = d[:, :, :, nskip:].mean(3)
    masked_data, mask = median_otsu(mn, 3, 2)
    mask = np.concatenate(
        (np.tile(True, (d.shape[0], d.shape[1], d.shape[2], nskip)),
         np.tile(np.expand_dims(mask == False, 3),
                 (1, 1, 1, d.shape[3] - nskip))),
        axis=3)
    # Some runs have corrupt volumes at the end (e.g., mux scans that are stopped prematurely). Mask those too.
    # But... motion correction might have interpolated the empty slices such that they aren't exactly zero.
    # So use the raw data to find these bad volumes.
    if raw_d != None:
        slice_max = raw_d.max(0).max(0)
    else:
        slice_max = d.max(0).max(0)
    bad = np.any(slice_max == 0, axis=0)
    # We don't want to miss a bad volume somewhere in the middle, as that could be a valid artifact.
    # So, only mask bad vols that are contiguous to the end.
    mask_vols = np.array([np.all(bad[i:]) for i in range(bad.shape[0])])
    # Mask out the skip volumes at the beginning
    mask_vols[0:nskip] = True
    mask[:, :, :, mask_vols] = True
    brain = np.ma.masked_array(d, mask=mask)
    good_vols = np.logical_not(mask_vols)
    return brain, good_vols
Example #22
0
 def process(self, img, **kwargs):
     quantile = kwargs.get('quantile', 0.1)
     n_samples = kwargs.get('n_samples', 100)
     numpass = kwargs.get('numpass', 5)
     median_radius = kwargs.get('median_radius', 10)
     high_intensity_threshold = kwargs.get('high_intensity_threshold', 0.1)
     blur_radius = kwargs.get('blur_radius', 9)
     dilation_radius = kwargs.get('dilation_radius', 5)
     dilation_iterations = kwargs.get('dilation_iterations', 1)
     if isinstance(img, Dataset):
         img = img.pixel_array
     img, _ = median_otsu(img, numpass=numpass, median_radius=median_radius)
     original_shape = img.shape
     img = (img - np.min(img)) / (np.max(img) - np.min(img))
     blurred = cv.blur(img, (15, 15))
     edges = np.clip(img - blurred, 0.0, 1.0)
     edges[edges > high_intensity_threshold] = 1.0
     edges[edges <= high_intensity_threshold] = 0.0
     edges = cv.dilate(edges, np.ones((3, 3)), iterations=1)
     img = np.clip(img - edges, 0.0, 1.0)
     img = cv.erode(img, np.ones((3, 3)), iterations=1)
     img = cv.blur(img, (blur_radius, blur_radius))
     # Flatten image.
     x = np.reshape(img, [-1, 1])
     bandwidth = estimate_bandwidth(x, quantile=quantile, n_samples=n_samples)
     mean_shift = MeanShift(bandwidth=bandwidth, bin_seeding=True)
     mean_shift.fit(x)
     # c_index = np.argmax(mean_shift.cluster_centers_.reshape((-1)))
     # flat = np.full(original_shape[0] * original_shape[1], 0, dtype=np.uint8)
     # flat[mean_shift.labels_ == c_index] = 1
     mask = mean_shift.labels_.reshape(original_shape)
     # mask = cv.dilate(mask, np.ones((dilation_radius, dilation_radius)), iterations=dilation_iterations)
     return mask
Example #23
0
def createBrainMaskFromb0Data(b0Data, affineMatrix=None, saveDir=None):
    """Creates a mask of the brain from a b0 volume.
    The output is written to file if affineMatrix and saveDir are provided.

    Parameters:
    ----------
    b0Data : 3D ndarray
        MRI scan of the head without diffusion weighting
    affineMatrix : 4x4 array
        Affine transformation matrix as in Nifti
    saveDir : string
        Directory where the created mask will be saved as 'brainMask.nii.gz'.

    Returns:
    --------
    mask : 3D ndarray
        The binary brain mask.
    """

    # Call median_otsu and discard first return value
    masked_b0, mask = median_otsu(b0Data, median_radius=3, numpass=5, dilate=2)
    mask = np.logical_and(mask == 1, masked_b0 > 0)

    if affineMatrix is not None and saveDir is not None:
        try:
            maskNifti = nib.Nifti1Image(mask.astype(np.float32), affineMatrix)
            nib.save(maskNifti, join(saveDir, 'brainMask.nii.gz'))
        except Exception as e:
            print('Saving of the brain mask \
                  failed with message {}'.format(e.message))
    return masked_b0, mask
Example #24
0
    def _run_interface(self, runtime):

        in_file = self.inputs.in_file

        b0_img = nb.load(in_file)
        b0_data = b0_img.get_fdata()

        masked_data, data_mask = median_otsu(
            b0_data,
            median_radius=self.inputs.median_radius,
            numpass=self.inputs.num_pass,
            autocrop=False,
            dilate=self.inputs.dilate)

        self._results['out_mask'] = fname_presuffix(in_file,
                                                    suffix='_mask',
                                                    newpath=runtime.cwd)

        self._results['masked_input'] = fname_presuffix(in_file,
                                                        suffix='_brain_masked',
                                                        newpath=runtime.cwd)

        masked_img = nb.Nifti1Image(masked_data, b0_img.affine, b0_img.header)
        masked_img.to_filename(self._results['masked_input'])

        mask_img = nb.Nifti1Image(data_mask.astype('f8'), b0_img.affine,
                                  b0_img.header)
        mask_img.to_filename(self._results['out_mask'])

        return runtime
Example #25
0
def auto_mask(data, raw_d=None, nskip=3, mask_bad_end_vols=False):
    from dipy.segment.mask import median_otsu
    mn = data[:, :, :, nskip:].mean(3)
    _, mask = median_otsu(mn, 3, 2) # oesteban: masked_data was not used
    mask = np.concatenate((
        np.tile(True, (data.shape[0], data.shape[1], data.shape[2], nskip)),
        np.tile(np.expand_dims(mask == 0, 3), (1, 1, 1, data.shape[3]-nskip))),
        axis=3)
    mask_vols = np.zeros((mask.shape[-1]), dtype=int)
    if mask_bad_end_vols:
        # Some runs have corrupt volumes at the end (e.g., mux scans that are stopped prematurely). Mask those too.
        # But... motion correction might have interpolated the empty slices such that they aren't exactly zero.
        # So use the raw data to find these bad volumes.
        # 2015.10.29 RFD: this caused problems with some non-mux EPI scans that (inexplicably)
        # have empty slices at the top of the brain. So we'll disable it for
        # now.
        if raw_d is None:
            slice_max = data.max(0).max(0)
        else:
            slice_max = raw_d.max(0).max(0)

        bad = np.any(slice_max == 0, axis=0)
        # We don't want to miss a bad volume somewhere in the middle, as that could be a valid artifact.
        # So, only mask bad vols that are contiguous to the end.
        mask_vols = np.array([np.all(bad[i:]) for i in range(bad.shape[0])])
    # Mask out the skip volumes at the beginning
    mask_vols[0:nskip] = True
    mask[..., mask_vols] = True
    brain = np.ma.masked_array(data, mask=mask)
    good_vols = np.logical_not(mask_vols)
    return brain, mask, good_vols
Example #26
0
    def apply_mask(self):
        """
        If self.mask is not None, will mask the raw_data with the mask provided.
        If self.mask is None, median_otsu is used to generate those files.
        """
        if self.mask == None:
            print("Generating mask with median_otsu.")
            raw_data = self.raw_data.get_data()
            masked_data, mask = median_otsu(raw_data, self.median_radius, self.numpass)

            #Update the instance
            self.data = nib.nifti1.Nifti1Image(masked_data.astype(np.float32), self.raw_data.get_affine())
            self.mask = nib.nifti1.Nifti1Image(mask.astype(np.int_), self.data.get_affine())
        else:
            print("Masking data with provided mask.")
            raw_data = self.raw_data.get_data()
            mask_data = self.mask.get_data()
            masked_data = raw_data * mask_data

            #Update the instance
            self.data = nib.nifti1.Nifti1Image(masked_data.astype(np.float32), self.raw_data.get_affine())

        #Regenerate an average B0 image
        values = np.array(self.gradient_table.bvals)
        ii = np.where(values == self.gradient_table.bvals.min())[0]
        self.b0_average = np.mean(self.data.get_data()[:,:,:,ii], axis=3)
Example #27
0
    def _get_from_file_mapping(self, path, file_mapping: dict, b0_threshold: float = 10.0):

        path_mapping = {key: os.path.join(path, file_mapping[key]) for key in file_mapping}
        bvals, bvecs = read_bvals_bvecs(path_mapping['bvals'],
                                        path_mapping['bvecs'])

        # img, t1, gradient table, affine and dwi
        img = nb.load(path_mapping['img'])
        t1 = nb.load(path_mapping['t1']).get_data()

        dwi = img.get_data().astype("float32")

        aff = img.affine

        # binary mask
        if 'mask' in path_mapping:
            binary_mask = nb.load(path_mapping['mask']).get_data()
        else:
            _, binary_mask = median_otsu(dwi[..., 0], 2, 1)

        # calculating b0
        b0 = dwi[..., bvals < b0_threshold].mean(axis=-1)

        # Do not generate fa yet
        fa = None
        gtab = gradient_table(bvals, bvecs)
        data_container = DataContainer(bvals, bvecs, gtab, t1, dwi, aff, binary_mask, b0, fa)
        return self._preprocess(data_container)
Example #28
0
def medianOtsu(file_in, outPath, median_radius=4, num_pass=4):
    print('    - running Median Otsu algoritm...')

    finalFileName = outPath + utils.to_extract_filename(
        file_in) + d.id_median_otsu + '_maskedVolume' + d.extension
    binaryMaskFileName = outPath + utils.to_extract_filename(
        file_in) + d.id_median_otsu + '_binaryMask' + d.extension
    b0MaskedFileName = outPath + utils.to_extract_filename(
        file_in) + d.id_median_otsu + '_b0Masked' + d.extension

    if not (os.path.exists(finalFileName)):
        img = nib.load(file_in)
        data = img.get_data()
        maskedvolume, mask = median_otsu(data, median_radius, num_pass)

        nib.save(nib.Nifti1Image(maskedvolume.astype(np.float32), img.affine),
                 finalFileName)
        nib.save(nib.Nifti1Image(mask.astype(np.float32), img.affine),
                 binaryMaskFileName)
        nib.save(
            nib.Nifti1Image(
                maskedvolume[:, :, :, d.default_b0_ref].astype(np.float32),
                img.affine), b0MaskedFileName)

    return finalFileName, binaryMaskFileName
Example #29
0
 def process(self, img, **kwargs):
     n_clusters = kwargs.get('n_clusters', 3)
     numpass = kwargs.get('numpass', 5)
     median_radius = kwargs.get('median_radius', 10)
     high_intensity_threshold = kwargs.get('high_intensity_threshold', 0.1)
     blur_radius = kwargs.get('blur_radius', 5)
     dilation_radius = kwargs.get('dilation_radius', 5)
     dilation_iterations = kwargs.get('dilation_iterations', 1)
     if isinstance(img, Dataset):
         img = img.pixel_array
     img, _ = median_otsu(img, numpass=numpass, median_radius=median_radius)
     original_shape = img.shape
     img = (img - np.min(img)) / (np.max(img) - np.min(img))
     blurred = cv.blur(img, (15, 15))
     edges = np.clip(img - blurred, 0.0, 1.0)
     edges[edges > high_intensity_threshold] = 1.0
     edges[edges <= high_intensity_threshold] = 0.0
     edges = cv.dilate(edges, np.ones((3, 3)), iterations=1)
     img = np.clip(img - edges, 0.0, 1.0)
     img = cv.erode(img, np.ones((3, 3)), iterations=1)
     img = cv.blur(img, (blur_radius, blur_radius))
     # Flatten image.
     x = np.reshape(img, [-1, 1])
     k_means = KMeans(n_clusters=n_clusters, random_state=0).fit(x)
     c_index = np.argmax(k_means.cluster_centers_.reshape((-1)))
     flat = np.full(original_shape[0] * original_shape[1], 0, dtype=np.uint8)
     flat[k_means.labels_ == c_index] = 1
     mask = flat.reshape(original_shape)
     mask = cv.dilate(mask, np.ones((dilation_radius, dilation_radius)), iterations=dilation_iterations)
     return mask
Example #30
0
def test_median_otsu_flow():
    with TemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        mo_flow = MedianOtsuFlow()
        mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked,
                             median_radius=median_radius, numpass=numpass,
                             autocrop=autocrop, vol_idx=vol_idx, dilate=dilate)

        mask_name = mo_flow.last_generated_outputs['out_mask']
        masked_name = mo_flow.last_generated_outputs['out_masked']

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        result_mask_data = nib.load(join(out_dir, mask_name)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        result_masked_data = nib.load(join(out_dir, masked_name)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
Example #31
0
def process_data(path_dict, gtab, signal_parameters):
    processing_parameters = signal_parameters["processing_params"]
    try:
        data, affine = load_nifti(path_dict["dwi"])
    except Exception as e:
        print(path_dict['name'])
        raise e
    data = data[:].copy()
    # Crop the MRI

    try:
        mask, _ = load_nifti(path_dict["mask"])
    except FileNotFoundError:
        print('No mask found, generating one, may be erroneous')
        data, mask = median_otsu(data,
                                 vol_idx=gtab.b0s_mask,
                                 autocrop=True,
                                 **processing_parameters['median_otsu_params'])

    mask = np.expand_dims(mask.astype(int), axis=-1)

    mean_b0 = data[..., gtab.b0s_mask].mean(-1)
    mean_b0 = np.expand_dims(mean_b0, axis=-1)
    mean_b0 *= mask

    data = normalize_data(data, gtab.b0s_mask)

    sh_coeff = dwi_to_sh(data,
                         gtab,
                         mask=mask,
                         sh_order=signal_parameters['sh_order'],
                         **processing_parameters["sh_params"])

    # Pad the x,y,z axes so they can be divided by the respective patch size
    patch_size = np.array(signal_parameters["patch_size"])
    pad_needed = patch_size - sh_coeff.shape[:3] % patch_size
    pad_needed = [(x // 2, x // 2 + x % 2) for x in pad_needed] + [(0, 0)]

    sh_coeff = np.pad(sh_coeff, pad_width=pad_needed)
    mask = np.pad(mask, pad_width=pad_needed)
    mean_b0 = np.pad(mean_b0, pad_width=pad_needed)

    real_size = sh_coeff.shape[:3]

    sh_coeff = sh_coeff.astype(np.float32)
    mean_b0 = mean_b0.astype(np.float32)

    data = {
        'sh': sh_coeff,
        'mask': mask,
        'mean_b0': mean_b0,
        'real_size': real_size,
        'gtab': gtab
    }

    if 'site' in path_dict.keys():
        data['site'] = [path_dict['site']]

    return data
Example #32
0
def run_dmri_pipeline(subject_session, do_topup=True, do_edc=True):
    subject, session = subject_session
    data_dir = os.path.join(source_dir,  subject, session, 'dwi')
    write_dir = os.path.join(derivatives_dir, subject, session)
    dwi_dir = os.path.join(write_dir, 'dwi')
    # Apply topup to the images
    input_imgs = sorted(glob.glob('%s/sub*.nii.gz' % data_dir))
    dc_imgs = sorted(glob.glob(os.path.join(dwi_dir, 'dcsub*run*.nii.gz')))
    mem = Memory('/neurospin/tmp/bthirion/cache_dir')
    if len(dc_imgs) < len(input_imgs):
        se_maps = [
            os.path.join(source_dir, subject, session, 'fmap',
                         '%s_%s_dir-ap_epi.nii.gz' % (subject, session)),
            os.path.join(source_dir, subject, session, 'fmap',
                         '%s_%s_dir-pa_epi.nii.gz' % (subject, session))]

        if do_topup:
            fsl_topup(se_maps, input_imgs, mem, write_dir, 'dwi')

    # Then proceeed with Eddy current correction
    # get the images
    dc_imgs = sorted(glob.glob(os.path.join(dwi_dir, 'dc*run*.nii.gz')))
    dc_img = os.path.join(dwi_dir, 'dc%s_%s_dwi.nii.gz' % (subject, session))
    concat_images(dc_imgs, dc_img)

    # get the bvals/bvec
    file_bvals = sorted(glob.glob('%s/sub*.bval' % data_dir))
    bvals = np.concatenate([np.loadtxt(fbval) for fbval in sorted(file_bvals)])
    bvals_file = os.path.join(dwi_dir, 'dc%s_%s_dwi.bval' % (subject, session))
    np.savetxt(bvals_file, bvals)
    file_bvecs = sorted(glob.glob('%s/sub*.bvec' % data_dir))
    bvecs = np.hstack([np.loadtxt(fbvec) for fbvec in sorted(file_bvecs)])
    bvecs_file = os.path.join(dwi_dir, 'dc%s_%s_dwi.bvec' % (subject, session))
    np.savetxt(bvecs_file, bvecs)

    # Get eddy-preprocessed images
    # eddy_img = nib.load(glob.glob(os.path.join(dwi_dir, 'eddc*.nii*'))[-1])

    # Get eddy-preprocessed images
    eddy_img = mem.cache(eddy_current_correction)(
        dc_img, bvals_file, bvecs_file, dwi_dir, mem)

    # load the data
    gtab = gradient_table(bvals, bvecs, b0_threshold=10)
    # Create a brain mask

    from dipy.segment.mask import median_otsu
    b0_mask, mask = median_otsu(eddy_img.get_data(), 2, 1)
    if subject == 'sub-13':
        from nilearn.masking import compute_epi_mask
        from nilearn.image import index_img
        imgs_ = [index_img(eddy_img, i)
                 for i in range(len(bvals)) if bvals[i] < 50]
        mask_img = compute_epi_mask(imgs_, upper_cutoff=.8)
        mask_img.to_filename('/tmp/mask.nii.gz')
        mask = mask_img.get_data()
    # do the tractography
    streamlines = tractography(eddy_img, gtab, mask, dwi_dir)
    return streamlines
Example #33
0
def brainMask(dwi):
    img = nib.load(dwi['denoised'])
    raw = img.get_data()

    b0_raw = raw[:, :, :, dwi['shellind'] == 0]
    if b0_raw.shape[3] > 0:
        b0_raw = np.mean(b0_raw, axis=3)

    raw = np.transpose(raw, np.hstack((dwi['perm'], 3)))
    if dwi['flip_sign'][0] < 0:
        raw = raw[::-1, :, :, :]

    if dwi['flip_sign'][1] < 0:
        raw = raw[:, ::-1, :, :]

    if dwi['flip_sign'][2] < 0:
        raw = raw[:, :, ::-1, :]

    b0 = raw[:, :, :, dwi['shellind'] == 0]
    b0 = np.median(b0, axis=3)
    mds = raw[:, :, :, dwi['shellind'] != 0]
    mds = np.median(mds, axis=3)

    # do bfc correction for better brainMasks
    import SimpleITK as sitk
    corrector = sitk.N4BiasFieldCorrectionImageFilter()

    inputImage = sitk.GetImageFromArray(b0)
    inputImageMask = sitk.Cast(inputImage, sitk.sitkInt32)
    maskImage = sitk.OtsuThreshold(inputImageMask, 0, 1, 200)
    inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
    b0ImageBfc = corrector.Execute(inputImage, maskImage)
    b0 = sitk.GetArrayFromImage(b0ImageBfc)

    inputImage = sitk.GetImageFromArray(mds)
    inputImageMask = sitk.Cast(inputImage, sitk.sitkInt32)
    maskImage = sitk.OtsuThreshold(inputImageMask, 0, 1, 200)
    inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
    mdsImageBfc = corrector.Execute(inputImage, maskImage)
    mds = sitk.GetArrayFromImage(mdsImageBfc)

    _, b0_mask = median_otsu(b0, 4, 4)
    _, mds_mask = median_otsu(mds, 4, 4)

    dwi['b0'] = b0_raw
    dwi['mask'] = np.bitwise_or(b0_mask, mds_mask)
Example #34
0
 def mask_getter(subses_dict, dwi_affine, data_imap):
     mean_b0_img = nib.load(data_imap["b0_file"])
     mean_b0 = mean_b0_img.get_fdata()
     _, mask_data = median_otsu(mean_b0, **self.median_otsu_kwargs)
     return mask_data, dict(
         source=data_imap["b0_file"],
         technique="median_otsu applied to b0",
         median_otsu_kwargs=self.median_otsu_kwargs)
Example #35
0
def remove_background(image):
    maskdata, mask = median_otsu(image,
                                 vol_idx=range(image.shape[-1]),
                                 median_radius=3,
                                 numpass=1,
                                 autocrop=True,
                                 dilate=2)
    return maskdata
Example #36
0
def smooth_mask(subject, run):
         """ Applies smoothing and computes mask. Applies mask to smoothed data """

         data = bold_data(subject, run)
         mean_data = np.mean(data,axis=-1)
         masked, mask = median_otsu(mean_data,2,1)
         smooth_data = gaussian_filter(data,[2,2,2,0])
         smooth_masked = smooth_data[mask]
         return smooth_masked.T
 def handle(self):
     img = nib.load(self.dmri_file)
     data = img.get_data()
     bvals, bvecs = read_bvals_bvecs(self.fbvals, self.fbvecs)
     gtab = gradient_table(bvals, bvecs)
     maskdata, mask = median_otsu(data, 3, 1, True,\
                          vol_idx=range(10, 50), dilate=2)
     #print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)
     tenmodel = dti.TensorModel(gtab)
     self.tenfit = tenmodel.fit(maskdata)
Example #38
0
def brain_extraction(file):
    from dipy.segment.mask import median_otsu
    from os.path import splitext
    img = nib.load(file)
    data = img.get_data()
    masked_data, mask = median_otsu(data, 2, 1)
    mask_img = nib.Nifti1Image(mask.astype(np.int), img.get_affine())
    masked_img = nib.Nifti1Image(masked_data, img.get_affine())

    root, ext = splitext(file)

    nib.save(mask_img, root + '_binary_mask.nii')
    nib.save(masked_img, root + '_masked.nii')
def design_matrix(subject, run, TR = 2.5):

      data = bold_data(subject,run)
      vol_shape, n_trs = data.shape[:-1], data.shape[-1]
      tr_times = np.arange(0,30,TR)
      hrf_at_trs = hrf(tr_times)
      col = 0
      X = np.ones((n_trs,14))

      #Smoothed and masked data
      mean_data = np.mean(data,axis=-1)
      masked, mask = median_otsu(mean_data,2,1)
      # smooth_data = gaussian_filter(data,[2,2,2,0])
      # Y = smooth_data[mask].T
      #omitted smoothing for now
      Y = data[mask].T
      
      #Adding onsets to design matrix
      for i in list_cond_file(subject,run):
          neural_prediction = events2neural_fixed(i, TR, n_trs)
          convolved = convolve(neural_prediction, hrf_at_trs)
          X[:,col] = convolved
          col = col+1


      ##PCA
      Y_demeaned = Y - np.mean(Y,axis=1).reshape([-1,1])
      unscaled_cov = Y_demeaned.dot(Y_demeaned.T) 
      U, S, V = npl.svd(unscaled_cov)
      X[:,8] = U[:,0]
      X[:,9:11] = U[:,6:8] 

     


      linear_drift = np.linspace(-1,1,n_trs)
      X[:,11] = linear_drift
      quadratic_drift = linear_drift ** 2
      quadratic_drift -= np.mean(quadratic_drift)
      X[:,12]= quadratic_drift

      betas = npl.pinv(X).dot(Y)
      betas_vols = np.zeros(vol_shape+(14,))
      betas_vols[mask,:] = betas.T
      
      projections = U.T.dot(Y_demeaned)
      projection_vols = np.zeros(data.shape)
      projection_vols[mask,:] = projections.T
          
      return X, Y, betas_vols, mask, U, Y_demeaned, mean_data, projection_vols
Example #40
0
def _brain_mask(row, median_radius=4, numpass=4, autocrop=False,
                vol_idx=None, dilate=None, force_recompute=False):
    brain_mask_file = _get_fname(row, '_brain_mask.nii.gz')
    if not op.exists(brain_mask_file) or force_recompute:
        img = nib.load(row['dwi_file'])
        data = img.get_data()
        gtab = row['gtab']
        mean_b0 = np.mean(data[..., ~gtab.b0s_mask], -1)
        _, brain_mask = median_otsu(mean_b0, median_radius, numpass,
                                    autocrop, dilate=dilate)
        be_img = nib.Nifti1Image(brain_mask.astype(int),
                                 img.affine)
        nib.save(be_img, brain_mask_file)
    return brain_mask_file
Example #41
0
def peaks_from_nifti(fdwi, fbvec=None, fbval=None, mask=None):

    if '.' not in fdwi:
        fbase = fdwi
        fdwi = fdwi+".nii.gz"
        if not fbval:
            fbval = fbase+".bval"
        if not fbvec:
            fbvec = fbase+".bvec"
    print fdwi
    img = nib.load(fdwi)
    data = img.get_data()
    zooms = img.get_header().get_zooms()[:3]
    affine = img.get_affine()
    bval, bvec = dio.read_bvals_bvecs(fbval, fbvec)
    gtab = dgrad.gradient_table(bval, bvec)


    if not mask:
        print 'generate mask'
        maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)

    else:
        mask_img = nib.load(mask)
        mask = mask_img.get_data()

        from dipy.segment.mask import applymask
        maskdata = applymask(data, mask)

    print maskdata.shape, mask.shape


    from dipy.reconst.shm import QballModel, CsaOdfModel
    model = QballModel(gtab, 6)

    sphere = get_sphere('symmetric724')

    print "fit Qball peaks"
    proc_num = multiprocessing.cpu_count()-1
    print "peaks_from_model using core# =" + str(proc_num)

    peaks = peaks_from_model(model=model, data=maskdata, relative_peak_threshold=.5,
                            min_separation_angle=25,
        sphere=sphere, mask=mask, parallel=True, nbr_processes=proc_num)

    return peaks
Example #42
0
def mask(d, raw_d=None, nskip=3):
    mn = d[:,:,:,nskip:].mean(3)
    masked_data, mask = median_otsu(mn, 3, 2)
    mask = np.concatenate((np.tile(True, (d.shape[0], d.shape[1], d.shape[2], nskip)),
                           np.tile(np.expand_dims(mask==False, 3), (1,1,1,d.shape[3]-nskip))),
                           axis=3)
    # Some runs have corrupt volumes at the end (e.g., mux scans that are stopped prematurely). Mask those too.
    # But... motion correction might have interpolated the empty slices such that they aren't exactly zero.
    # So use the raw data to find these bad volumes.
    if raw_d!=None:
        slice_max = raw_d.max(0).max(0)
    else:
        slice_max = d.max(0).max(0)
    bad = np.any(slice_max==0, axis=0)
    # We don't want to miss a bad volume somewhere in the middle, as that could be a valid artifact.
    # So, only mask bad vols that are contiguous to the end.
    mask_vols = np.array([np.all(bad[i:]) for i in range(bad.shape[0])])
    # Mask out the skip volumes at the beginning
    mask_vols[0:nskip] = True
    mask[:,:,:,mask_vols] = True
    brain = np.ma.masked_array(d, mask=mask)
    good_vols = np.logical_not(mask_vols)
    return brain,good_vols
def dodata(f_name,data_path):
    dipy_home = pjoin(os.path.expanduser('~'), 'dipy_data')
    folder = pjoin(dipy_home, data_path)
    fraw = pjoin(folder, f_name+'.nii.gz')
    fbval = pjoin(folder, f_name+'.bval')
    fbvec = pjoin(folder, f_name+'.bvec')
    flabels = pjoin(folder, f_name+'.nii-label.nii.gz')
    
    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs)
    
    img = nib.load(fraw)
    data = img.get_data()
    affine = img.get_affine()
    
    label_img = nib.load(flabels)
    labels=label_img.get_data()
    lap=through_label_sl.label_position(labels, labelValue=1)    
    dataslice = data[40:80, 20:80, lap[2][2] / 2]
    #print lap[2][2]/2
    
    #get_csd_gfa(f_name,data,gtab,dataslice)
    
    maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景
    
    """ get fa and tensor evecs and ODF"""
    from dipy.reconst.dti import TensorModel,mean_diffusivity
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)
    
    sphere = get_sphere('symmetric724')
    
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
      
    np.save(os.getcwd()+'\zhibiao'+f_name+'_FA.npy',FA)
    fa_img = nib.Nifti1Image(FA.astype(np.float32), affine)
    nib.save(fa_img,os.getcwd()+'\zhibiao'+f_name+'_FA.nii.gz')
    print('Saving "DTI_tensor_fa.nii.gz" sucessful.')
    evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine)
    nib.save(evecs_img, os.getcwd()+'\zhibiao'+f_name+'_DTI_tensor_evecs.nii.gz')
    print('Saving "DTI_tensor_evecs.nii.gz" sucessful.')
    MD1 = mean_diffusivity(tenfit.evals)
    nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), os.getcwd()+'\zhibiao'+f_name+'_MD.nii.gz')
    
    
    #tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere)
    #from dipy.reconst.odf import gfa
    #dti_gfa=gfa(tensor_odfs)
    
    wm_mask = (np.logical_or(FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011))))

    response = recursive_response(gtab, data, mask=wm_mask, sh_order=8,
                                  peak_thr=0.01, init_fa=0.08,
                                  init_trace=0.0021, iter=8, convergence=0.001,
                                  parallel=False)
    from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    
    #csd_fit = csd_model.fit(data)

    from dipy.direction import peaks_from_model

    csd_peaks = peaks_from_model(model=csd_model,
                                 data=data,
                                 sphere=sphere,
                                 relative_peak_threshold=.5,
                                 min_separation_angle=25,
                                 parallel=False)
    
    GFA = csd_peaks.gfa
    
    nib.save(GFA, os.getcwd()+'\zhibiao'+f_name+'_MSD.nii.gz')
    print('Saving "GFA.nii.gz" sucessful.')
    
    from dipy.reconst.shore import ShoreModel
    asm = ShoreModel(gtab)
    print('Calculating...SHORE msd')
    asmfit = asm.fit(data,mask)
    msd = asmfit.msd()
    msd[np.isnan(msd)] = 0
    
    #print GFA[:,:,slice].T
    print('Saving msd_img.png')
    nib.save(msd, os.getcwd()+'\zhibiao'+f_name+'_GFA.nii.gz')
Example #44
0
import numpy as np
from dipy.segment.mask import median_otsu
import utils


if __name__ == "__main__":
    img = utils.load_nifti('b0.nii.gz')
    data = img.get_data()

    # b0_mask, mask = median_otsu(data, 2, 1)
    # b0_mask, mask = median_otsu(data, 3, 1)
    # b0_mask, mask = median_otsu(data, 1, 2)
    # b0_mask, mask = median_otsu(data, 1, 3)
    # b0_mask, mask = median_otsu(data, 3, 2)
    # b0_mask, mask = median_otsu(data, 2, 3)
    b0_mask, mask1 = median_otsu(data, 1, 1)
    b0_mask, mask2 = median_otsu(data, 4, 2)
    # parametres retenues
    b0_mask, mask = median_otsu(data, 2, 2)

    fig = plt.figure()
    fig.add_subplot(221)
    plt.title("Image")
    plt.axis('off')
    plt.imshow(data[data.shape[0]/2, :, :], cmap=cm.gray)

    fig.add_subplot(222)
    plt.title("radius 2, numpass 2")
    plt.axis('off')
    plt.imshow(mask[mask.shape[0]/2, :, :], cmap=cm.gray)
Example #45
0
img, gtab = read_cenir_multib(bvals)

data = img.get_data()

affine = img.get_affine()

"""
Function ``read_cenir_multib`` return img and gtab which contains respectively
a nibabel Nifti1Image object (where the data can be extracted) and a
GradientTable object with information about the b-values and b-vectors.

Before fitting the data, we preform some data pre-processing. We first compute
a brain mask to avoid unnecessary calculations on the background of the image.
"""

maskdata, mask = median_otsu(data, 4, 2, False, vol_idx=[0, 1], dilate=1)

"""
Since the diffusion kurtosis models involves the estimation of a large number
of parameters [TaxCMW2015]_ and since the non-Gaussian components of the
diffusion signal are more sensitive to artefacts [NetoHe2012]_, a fundamental
data pre-processing step for diffusion kurtosis fitting is to denoise our data.
For this, we use Dipy's non-local mean filter (see
:ref:`example-denoise-nlmeans`). Note that, since the HCP-like data has a large
number of diffusion-weigthed volumes, this procedure can take a couple of hours
to compute the entire dataset. Therefore, to speed the run time in this example
we only denoise an axial slice of the data.
"""

axial_slice = 40
fval = sys.argv[7]+'.bval'
fvec = sys.argv[7]+'.bvec'
bvals,bvecs = read_bvals_bvecs(fval,fvec)
fname = sys.argv[7]+'.nii.gz'
gtab=gradient_table(bvals,bvecs)
img=nib.load(fname)
data = img.get_data()
data_b0=np.squeeze(data)[...,0]

from dipy.data.fetcher import fetch_syn_data, read_syn_data
fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
syn_b0 = np.array(nib_syn_b0.get_data())

from dipy.segment.mask import median_otsu
data_b0_masked, data_b0_mask = median_otsu(data_b0, 4, 4)
syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

static = data_b0_masked
static_affine = nib_stanford.get_affine()
moving = syn_b0_masked
moving_affine = nib_syn_b0.get_affine()

pre_align = np.array([[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
                      [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
                      [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
                      [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
import dipy.align.vector_fields as vfu

transform = np.linalg.inv(moving_affine).dot(pre_align.dot(static_affine))
resampled = vfu.warp_3d_affine(moving.astype(np.float32),
Example #47
0
    def run(self, data_files, bvals_files, bvecs_files, mask_files,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='',
            out_file='product.json', out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_files : string
            Path of brain mask
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            logging.info('Computing brain mask...')
            _, calc_mask = median_otsu(data)

            mask, affine = load_nifti(mask_path)
            mask = np.array(calc_mask == mask.astype(bool)).astype(int)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info(
                'Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0],
                   bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1]//2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0]/noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) +
                                 " " + str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction]/noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                        'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) +
                        ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                        'directions': 'b0' + ' ' + str(SNR_directions[0]) +
                        ' ' + str(SNR_directions[1]) + ' ' +
                        str(SNR_directions[2])
                        })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Example #48
0
File: segment.py Project: nipy/dipy
    def run(
        self,
        input_files,
        save_masked=False,
        median_radius=2,
        numpass=5,
        autocrop=False,
        vol_idx=None,
        dilate=None,
        out_dir="",
        out_mask="brain_mask.nii.gz",
        out_masked="dwi_masked.nii.gz",
    ):
        """Workflow wrapping the median_otsu segmentation method.

        Applies median_otsu segmentation on each file found by 'globing'
        ``input_files`` and saves the results in a directory specified by
        ``out_dir``.

        Parameters
        ----------
        input_files : string
            Path to the input volumes. This path may contain wildcards to
            process multiple inputs at once.
        save_masked : bool
            Save mask
        median_radius : int, optional
            Radius (in voxels) of the applied median filter (default 2)
        numpass : int, optional
            Number of pass of the median filter (default 5)
        autocrop : bool, optional
            If True, the masked input_volumes will also be cropped using the
            bounding box defined by the masked data. For example, if diffusion
            images are of 1x1x1 (mm^3) or higher resolution auto-cropping could
            reduce their size in memory and speed up some of the analysis.
            (default False)
        vol_idx : string, optional
            1D array representing indices of ``axis=3`` of a 4D `input_volume`
            'None' (the default) corresponds to ``(0,)`` (assumes first volume
            in 4D array)
        dilate : string, optional
            number of iterations for binary dilation (default 'None')
        out_dir : string, optional
            Output directory (default input file directory)
        out_mask : string, optional
            Name of the mask volume to be saved (default 'brain_mask.nii.gz')
        out_masked : string, optional
            Name of the masked volume to be saved (default 'dwi_masked.nii.gz')
        """
        io_it = self.get_io_iterator()

        for fpath, mask_out_path, masked_out_path in io_it:
            logging.info("Applying median_otsu segmentation on {0}".format(fpath))

            data, affine, img = load_nifti(fpath, return_img=True)

            masked_volume, mask_volume = median_otsu(data, median_radius, numpass, autocrop, vol_idx, dilate)

            save_nifti(mask_out_path, mask_volume.astype(np.float32), affine)

            logging.info("Mask saved as {0}".format(mask_out_path))

            if save_masked:
                save_nifti(masked_out_path, masked_volume, affine, img.header)

                logging.info("Masked volume saved as {0}".format(masked_out_path))

        return io_it
Example #49
0
Load the raw diffusion data and the affine.
"""

data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

"""
data.shape ``(81, 106, 76, 160)``

Remove most of the background using dipy's mask module.
"""

from dipy.segment.mask import median_otsu

maskdata, mask = median_otsu(data, 3, 2, True, range(0, 10))

"""
We instantiate our CSA model with spherical harmonic order of 4
"""

csamodel = CsaOdfModel(gtab, 4)

"""
`Peaks_from_model` is used to calculate properties of the ODFs (Orientation
Distribution Function) and return for
example the peaks and their indices, or GFA which is similar to FA but for ODF
based models. This function mainly needs a reconstruction model, the data and a
sphere as input. The sphere is an object that represents the spherical discrete
grid where the ODF values will be evaluated.
"""
from dipy.reconst.dti import TensorModel, fractional_anisotropy
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
                                 auto_response)
from dipy.reconst.peaks import peaks_from_model
from dipy.tracking.eudx import EuDX
from dipy.data import get_sphere
from dipy.segment.mask import median_otsu
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
from dipy.core.gradients import gradient_table
from dipy.io import read_bvals_bvecs
fval = sys.argv[1]+'.bval'
fvec = sys.argv[1]+'.bvec'
bvals,bvecs = read_bvals_bvecs(fval,fvec)
fname = sys.argv[1]+'.nii.gz'
gtab=gradient_table(bvals,bvecs)
img=nib.load(fname)
data = img.get_data()
print(data.shape)
maskdata, mask = median_otsu(data, 3, 1, False,
                           vol_idx=range(1, 9), dilate=2)
response, ratio = auto_response(gtab, data, roi_radius=5, fa_thr=0.7)








Example #51
0
input_folder = '/Users/arokem/tmp/input/'

fdata = op.join(input_folder, "Ax DTI 30 DIRECTIONAL_aligned_trilin.nii.gz")
fbval = op.join(input_folder, "Ax DTI 30 DIRECTIONAL_aligned_trilin.bvals")
fbvec = op.join(input_folder, "Ax DTI 30 DIRECTIONAL_aligned_trilin.bvecs")

img = nib.load(fdata)
data = img.get_data()
gtab = dpg.gradient_table(fbval, fbvec)

mean_b0 = np.mean(data[..., gtab.b0s_mask], -1)

print("Calculating brain-mask")
if not op.exists('./brain_mask.nii.gz'):
    _, brain_mask = median_otsu(mean_b0, median_radius=4, numpass=4)
    nib.save(nib.Nifti1Image(brain_mask.astype(int),
                             img.affine), './brain_mask.nii.gz')
else:
    brain_mas = nib.load('./brain_mask.nii.gz').get_data().astype(bool)

print("Calculating DTI...")
if not op.exists('./dti_FA.nii.gz'):
    dti_params = dti.fit_dti(fdata, fbval, fbvec,
                             out_dir='.', mask=brain_mask)
else:
    dti_params = {'FA': './dti_FA.nii.gz',
                  'MD': './dti_MD.nii.gz',
                  'RD': './dti_RD.nii.gz',
                  'AD': './dti_AD.nii.gz',
                  'params': './dti_params.nii.gz'}
Example #52
0
"""
``img`` contains a nibabel Nifti1Image object. Data is the actual brain data as
a numpy ndarray.

Segment the brain using dipy's mask module.

``median_otsu`` returns the segmented brain data and a binary mask of the brain.
It is possible to fine tune the parameters of ``median_otsu`` (``median_radius``
and ``num_pass``) if extraction yields incorrect results but the default
parameters work well on most volumes. For this example, default parameters (4,
4) will be used.
"""

from dipy.segment.mask import median_otsu
b0_mask, mask = median_otsu(data, 4, 4)

"""
Saving the segmentation results is very easy using nibabel. We need the b0_mask,
and the binary mask volumes. The affine matrix which transform the image's
coordinates to the world coordinates is also needed. Here, we choose to save
both images in float32.
"""

mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), img.get_affine())

fname = 'ge_3t'
nib.save(mask_img, fname + '_binary_mask.nii.gz')
nib.save(b0_img, fname + '_mask.nii.gz')
Example #53
0
The selected b-values and gradient directions are then converted to Dipy's
GradientTable format.
"""

from dipy.core.gradients import gradient_table

gtab = gradient_table(selected_bvals, selected_bvecs)

"""
Before fitting the data some data pre-processing is done. First, we mask and
crop the data to avoid calculating Tensors on the background of the image.
"""

from dipy.segment.mask import median_otsu

maskdata, mask = median_otsu(selected_data, 3, 1, True,
                             vol_idx=range(10, 50), dilate=2)

"""
Now that we have prepared the datasets we can go forward with the voxel
reconstruction. This can be done by first instantiate the DiffusinKurtosisModel
in the following way.
"""

dkimodel = dki.DiffusionKurtosisModel(gtab)

"""
Fitting the data is very simple. We just need to call the fit method of the
DiffusinKurtosisModel in the following way:
"""

dkifit = dkimodel.fit(maskdata)
Example #54
0
def otsu_median(data, size, n_iter):
    from dipy.segment.mask import median_otsu
    data, mask = median_otsu(data, size, n_iter)
    return mask
Example #55
0
"""
The second one will be the same b0 we used for the 2D registration tutorial
"""

from dipy.data.fetcher import fetch_syn_data, read_syn_data
fetch_syn_data()
nib_syn_t1, nib_syn_b0 = read_syn_data()
syn_b0 = np.array(nib_syn_b0.get_data())

"""
We first remove the skull from the b0's
"""

from dipy.segment.mask import median_otsu
stanford_b0_masked, stanford_b0_mask = median_otsu(stanford_b0, 4, 4)
syn_b0_masked, syn_b0_mask = median_otsu(syn_b0, 4, 4)

static = stanford_b0_masked
static_affine = nib_stanford.affine
moving = syn_b0_masked
moving_affine = nib_syn_b0.affine

"""
Suppose we have already done a linear registration to roughly align the two
images
"""

pre_align = np.array([[1.02783543e+00, -4.83019053e-02, -6.07735639e-02, -2.57654118e+00],
                      [4.34051706e-03, 9.41918267e-01, -2.66525861e-01, 3.23579799e+01],
                      [5.34288908e-02, 2.90262026e-01, 9.80820307e-01, -1.46216651e+01],
Example #56
0
"""

from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel

fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.get_affine()

print("Computing brain mask...")
b0_mask, mask = median_otsu(data)

print("Computing tensors...")
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)

"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.

Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.

The following lines perform these two operations and then saves the computed mask.
"""
 bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
 gtab = gradient_table(bvals, bvecs)
 
 img = nib.load(fraw)
 data = img.get_data()
 affine = img.get_affine()
 
 label_img = nib.load(flabels)
 labels=label_img.get_data()
 lap=through_label_sl.label_position(labels, labelValue=1)    
 dataslice = data[40:80, 20:80, lap[2][2] / 2]
 #print lap[2][2]/2
 
 #get_csd_gfa(f_name,data,gtab,dataslice)
 
 maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景
 
 """ get fa and tensor evecs and ODF"""
 from dipy.reconst.dti import TensorModel,mean_diffusivity
 tenmodel = TensorModel(gtab)
 tenfit = tenmodel.fit(data, mask)
 
 sphere = get_sphere('symmetric724')
 
 FA = fractional_anisotropy(tenfit.evals)
 FA[np.isnan(FA)] = 0
   
 np.save(os.getcwd()+'\zhibiao'+f_name+'_FA.npy',FA)
 fa_img = nib.Nifti1Image(FA.astype(np.float32), affine)
 print FA.shape
 nib.save(fa_img,os.getcwd()+'/zhibiao/'+f_name+'_FA.nii.gz')
Example #58
0
############## 

#Loading Data and HDR
data = bold_data(subject, 1)
vol_shape, n_trs = data.shape[:-1], data.shape[-1]

TR = 2.5
tr_times = np.arange(0,30,TR)
all_tr_times = np.arange(n_trs) * TR
hrf_at_trs = hrf(tr_times)

X = np.ones((n_trs,14))
X_np = np.ones((n_trs,14))

mean_data = np.mean(data,axis=-1)
masked, mask = median_otsu(mean_data,2,1)

Y = data[mask].T
col = 0
pred = 0

#Adding onsets to design matrix
for i in list_cond_file(subject,run):
    neural_prediction = events2neural_fixed(i, TR, n_trs)
    convolved = convolve(neural_prediction, hrf_at_trs)
    X[:,col] = convolved
    X_np[:,pred] = neural_prediction
    col = col + 1
    pred = pred + 1

plt.plot(all_tr_times ,X_np[:,:8])
Example #59
0
Load the raw diffusion data and the affine.
"""

data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

"""
data.shape ``(81, 106, 76, 160)``

Remove most of the background using dipy's mask module.
"""

from dipy.segment.mask import median_otsu

maskdata, mask = median_otsu(data, 3, 1, True,
                             vol_idx=range(10, 50), dilate=2)

"""
We instantiate our CSA model with spherical harmonic order of 4
"""

csamodel = CsaOdfModel(gtab, 4)

"""
`Peaks_from_model` is used to calculate properties of the ODFs (Orientation
Distribution Function) and return for
example the peaks and their indices, or GFA which is similar to FA but for ODF
based models. This function mainly needs a reconstruction model, the data and a
sphere as input. The sphere is an object that represents the spherical discrete
grid where the ODF values will be evaluated.
"""
Example #60
0
def median_otsu_flow(input_files, out_dir='', save_masked=False,
                     median_radius=4, numpass=4, autocrop=False,
                     vol_idx=None, dilate=None):
    """ Workflow wrapping the median_otsu segmentation method.

    It applies median_otsu segmentation on each file found by 'globing'
    ``input_files`` and saves the results in a directory specified by
    ``out_dir``.

    Parameters
    ----------
    input_files : string
        Path to the input volumes. This path may contain wildcards to process
        multiple inputs at once.
    out_dir : string, optional
        Output directory (default input file directory)
    save_masked : bool
        Save mask
    median_radius : int, optional
        Radius (in voxels) of the applied median filter(default 4)
    numpass : int, optional
        Number of pass of the median filter (default 4)
    autocrop : bool, optional
        If True, the masked input_volumes will also be cropped using the
        bounding box defined by the masked data. Should be on if DWI is
        upsampled to 1x1x1 resolution. (default False)
    vol_idx : string, optional
        1D array representing indices of ``axis=3`` of a 4D `input_volume`
        'None' (the default) corresponds to ``(0,)`` (assumes first volume in
        4D array)
    dilate : string, optional
        number of iterations for binary dilation (default 'None')

    Outputs
    -------
    mask : Nifti File
           Binary volume representing the computed mask.
    masked : Nifti File
            Volume representing the masked input. This file is saved
            save_masked is True.
    """
    for fpath in glob(input_files):
        print('')
        print('Applying median_otsu segmentation on {0}'.format(fpath))
        img = nib.load(fpath)
        volume = img.get_data()

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        fname, ext = splitext(basename(fpath))
        if fname.endswith('.nii'):
            fname, _ = splitext(fname)
            ext = '.nii.gz'

        mask_fname = fname + '_mask' + ext

        out_dir_path = choose_create_out_dir(out_dir, fpath)

        mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
        mask_out_path = join(out_dir_path, mask_fname)
        mask_img.to_filename(mask_out_path)
        print('Mask saved as {0}'.format(mask_out_path))

        if save_masked:
            masked_fname = fname + '_bet' + ext
            masked_img = nib.Nifti1Image(
                masked, img.get_affine(), img.get_header())
            masked_out_path = join(out_dir_path, masked_fname)
            masked_img.to_filename(masked_out_path)
            print('Masked volume saved as {0}'.format(masked_out_path))