Esempio n. 1
0
def get_data_unregistered(petmr_path, trio_path, scans_list, input_scanner):
    # This function simply uploads the training scans that are specified in the list "scans_list"
    # The scans that are uploaded are not registered between the scanners (hence unregistered)

    train_val_test_inp = []
    train_val_test_out = []

    if input_scanner == "PETMR":
        input_path = petmr_path
        output_path = trio_path
    else:
        input_path = trio_path
        output_path = petmr_path

    for subj_scan in scans_list:

        input_scan_image = nib.load(
            str(input_path) + "/Subj" + str(subj_scan[0]) + "Scan" +
            str(subj_scan[1]) + "/Brain_Matched.nii.gz")
        input_scan_data = input_scan_image.get_data()

        # Upload the corresponding output scan (unregistered)
        output_scan_image1 = nib.load(
            str(output_path) + "/Subj" + str(subj_scan[0]) + "Scan1" +
            "/Brain_Matched.nii.gz")
        output_scan_data1 = output_scan_image1.get_data()

        input_bvals_scan, input_bvecs_scan = read_bvals_bvecs(str(input_path) + "/Subj" + str(subj_scan[0]) + "Scan" + str(subj_scan[1]) + "/NODDI.bval",\
                                                      str(input_path) + "/Subj" + str(subj_scan[0]) + "Scan" + str(subj_scan[1]) + "/NODDI.bvec")

        output_bvals_scan1, output_bvecs_scan1 = read_bvals_bvecs(str(output_path) + "/Subj" + str(subj_scan[0]) + "Scan1/NODDI.bval",\
                                                      str(output_path) + "/Subj" + str(subj_scan[0]) + "Scan1/NODDI.bvec")

        #set a threshold value for b=0 values (due to TRIO dataset)
        input_gtab_scan = gradient_table(input_bvals_scan,
                                         input_bvecs_scan,
                                         b0_threshold=5)
        input_s0s_scan = input_scan_data[:, :, :, input_gtab_scan.b0s_mask]

        output_gtab_scan1 = gradient_table(output_bvals_scan1,
                                           output_bvecs_scan1,
                                           b0_threshold=5)
        output_s0s_scan1 = output_scan_data1[:, :, :,
                                             output_gtab_scan1.b0s_mask]

        print("Uploading unregistered Subject %s Scan %s" %
              (str(subj_scan[0]), str(subj_scan[1])))
        # Append the data to the lists containing the training inputs and outputs
        # upload the first volume of each slice only
        train_val_test_inp.append(input_s0s_scan[:, :, :, [0]])
        train_val_test_out.append(output_s0s_scan1[:, :, :, [0]])

    return (train_val_test_inp, train_val_test_out)
def single_shell_extraction(dir_src, dir_out, verbose=False):

    fbval = pjoin(dir_src, 'bvals')
    fbvec = pjoin(dir_src, 'bvecs')
    fmask = pjoin(dir_src, 'nodif_brain_mask.nii.gz')
    fdwi = pjoin(dir_src, 'data.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    data, affine = load_nifti(fdwi, verbose)

    if par_b_shell == 1000:
        sind = (bvals < 10) | ((bvals < 1100) & (bvals > 900))
    elif par_b_shell == 2000:
        sind = (bvals < 10) | ((bvals < 2100) & (bvals > 1900))
    elif par_b_shell == 3000:
        sind = (bvals < 10) | ((bvals < 3100) & (bvals > 2900))

    shell_data = data[..., sind]
    shell_gtab = gradient_table(bvals[sind], bvecs[sind, :],
                                  b0_threshold=par_b0_threshold)

    fname = 'data_' + par_b_tag + '.nii.gz'
    save_nifti(pjoin(dir_out, fname), shell_data, affine)
    np.savetxt(pjoin(dir_out, 'bvals_' + par_b_tag), shell_gtab.bvals)
    np.savetxt(pjoin(dir_out, 'bvecs_' + par_b_tag), shell_gtab.bvecs.T)
Esempio n. 3
0
    def load_bval_bvec_dti(self, fbval, fbvec, dti_file, dti_file_out):
        """
        Takes bval and bvec files and produces a structure in dipy format

        **Positional Arguments:**
        """

        # Load Data
        img = nb.load(dti_file)
        data = img.get_data()

        bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

        # Get rid of spurrious scans
        idx = np.where((bvecs[:, 0] == 100) & (bvecs[:, 1] == 100)
                       & (bvecs[:, 2] == 100))
        bvecs = np.delete(bvecs, idx, axis=0)
        bvals = np.delete(bvals, idx, axis=0)
        data = np.delete(data, idx, axis=3)

        # Save corrected DTI volume
        dti_new = nb.Nifti1Image(data,
                                 affine=img.get_affine(),
                                 header=img.get_header())
        dti_new.update_header()
        nb.save(dti_new, dti_file_out)

        gtab = gradient_table(bvals, bvecs, atol=0.01)

        print(gtab.info)
        return gtab
Esempio n. 4
0
def load_diff_files(subj_folder, file_names):
    from dipy.io import read_bvals_bvecs

    # Load diff nii file:
    diff_file = nib.load(os.path.join(subj_folder,file_names['data']))
    diff_img = diff_file.get_fdata()  # i1
    info_nii = diff_file.header
    data = np.asarray(diff_img, dtype='float64')
    data[data < 0] = 0
    affine = diff_file.affine

    # Load bval/bvec:
    bval, bvec = read_bvals_bvecs(os.path.join(subj_folder,file_names['bval']), os.path.join(subj_folder,file_names['bvec']))
    bvec = np.reshape(bvec, [len(bval), -1]);

    # Round bval to closest 50 and divide by 1000:
    bval2 = 2 * np.asarray(bval)
    bval2 = bval2.round(-2)
    bval = bval2 / 2000

    # Remove bval<1000 from calc:
    blow_locs = np.intersect1d(np.where(bval > 0)[0], np.where(bval < 1)[0])
    bval = np.delete(bval, blow_locs)
    bvec = np.delete(bvec, blow_locs, 0)
    data = np.delete(data, blow_locs, 3)

    # Load mask:
    mask = nib.load(os.path.join(subj_folder,file_names['mask'])).get_fdata()

    return data, affine, info_nii, bval, bvec, mask
def gtabs(path_dicts):
    gtabs = []
    for patient, path_dict in path_dicts.items():
        bvals, bvecs = read_bvals_bvecs(path_dict["bval"], path_dict["bvec"])
        gtab = gradient_table(bvals, bvecs)
        gtabs.append(gtab)
    return gtabs
Esempio n. 6
0
File: utils.py Progetto: gkiar/ndmg
def load_bval_bvec_dwi(fbval, fbvec, dwi_file, dwi_file_out):
    """
    Takes bval and bvec files and produces a structure in dipy format

    **Positional Arguments:**
    """

    # Load Data
    img = nb.load(dwi_file)
    data = img.get_data()

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

    # Get rid of spurrious scans
    idx = np.where((bvecs[:, 0] == 100) & (bvecs[:, 1] == 100) &
                   (bvecs[:, 2] == 100))
    bvecs = np.delete(bvecs, idx, axis=0)
    bvals = np.delete(bvals, idx, axis=0)
    data = np.delete(data, idx, axis=3)

    # Save corrected DTI volume
    dwi_new = nb.Nifti1Image(data, affine=img.get_affine(),
                             header=img.get_header())
    dwi_new.update_header()
    nb.save(dwi_new, dwi_file_out)

    gtab = gradient_table(bvals, bvecs, atol=0.01)

    print(gtab.info)
    return gtab
Esempio n. 7
0
def shell_extraction(src_dwi,
                     src_bvec,
                     src_bval,
                     out_b0,
                     delta=100,
                     verbose=False):

    shell = [0]

    bvals, bvecs = read_bvals_bvecs(src_bval, src_bvec)

    img = nib.load(src_dwi)
    data = img.get_data()
    affine = img.get_affine()
    header = img.header

    sind = np.zeros((bvals.size), dtype=bool)
    for b in shell:
        tind = (bvals < b + delta) & (bvals > b - delta)
        sind = sind | tind

    shell_data = data[..., sind].mean(axis=3)

    shell_img = nib.Nifti1Image(shell_data, affine, header)
    shell_img.update_header()
    nib.save(shell_img, out_b0)

    return sind.sum()
Esempio n. 8
0
def main(dti_file, bvals_file, bvecs_file, b_ss=1000):

    # Load the image data
    nii = nib.load(dti_file)
    img_data = nii.get_data()

    # Read in the b-shell values and gradient directions
    bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file)

    # Boolean array to identify entries with either b = 0 or b = b_ss
    bvals_eq_0_b_ss = (bvals == 0) | (bvals == b_ss)

    # Extract info needed to run single-compartment dti model
    dti_bvals = bvals[bvals_eq_0_b_ss].copy()
    dti_bvecs = bvecs[bvals_eq_0_b_ss].copy()
    dti_img_data = img_data[:, :, :, bvals_eq_0_b_ss].copy()

    # Compute gradient table
    grad_table = gradient_table(dti_bvals, dti_bvecs)

    # Extract brain so we don't fit the background
    brain_img_data, brain_mask = median_otsu(dti_img_data, 2, 1)

    # Run the dti model and fit it to the brain extracted image data
    ten_model = dti.TensorModel(grad_table)
    ten_fit = ten_model.fit(brain_img_data)
Esempio n. 9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.dwi, args.bvals, args.bvecs])
    assert_outputs_exist(
        parser, args, [args.output_dwi, args.output_bvals, args.output_bvecs])

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    # Find the volume indices that correspond to the shells to extract.
    tol = args.tolerance

    img = nib.load(args.dwi)

    outputs = extract_dwi_shell(img, bvals, bvecs, args.bvals_to_extract, tol,
                                args.block_size)
    indices, shell_data, new_bvals, new_bvecs = outputs

    logging.info("Selected indices: {}".format(indices))

    np.savetxt(args.output_bvals, new_bvals, '%d')
    np.savetxt(args.output_bvecs, new_bvecs.T, '%0.15f')
    nib.save(nib.Nifti1Image(shell_data, img.affine, img.header),
             args.output_dwi)
def main(args = None):

    if not args:
        args = sys.argv[1:]

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    fname_in = arguments['-i']
    if '-o' in arguments:
        fname_out = arguments['-o']
    else:
        fname_out = ''
    verbose = int(arguments['-v'])

    # get bvecs in proper orientation
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(None, fname_in)

    # # Transpose bvecs
    # printv('Transpose bvecs...', verbose)
    # # from numpy import transpose
    # bvecs = bvecs.transpose()

    # Write new file
    if fname_out == '':
        path_in, file_in, ext_in = extract_fname(fname_in)
        fname_out = path_in+file_in+ext_in
    fid = open(fname_out, 'w')
    for iLine in range(bvecs.shape[0]):
        fid.write(' '.join(str(i) for i in bvecs[iLine, :])+'\n')
    fid.close()

    # display message
    printv('Created file:\n--> '+fname_out+'\n', verbose, 'info')
Esempio n. 11
0
def prepare_q4half_257vol(fimg, fbtext, fbval, fbvec, fmask, flipy):

    img = nib.load(fimg)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]

    # Read b-values from bval file
    bvals, temp = read_bvals_bvecs(fbval, fbvec)
    bvals = bvals[0:257]

    # Read Wedeen file grad514.txt (fbtxt)
    bmat = np.loadtxt(fbtext) 
    bvecs = bmat[0:257, 1:]
    # Normalize diffusion directions
    bvecs = bvecs / np.sqrt(np.sum(bvecs ** 2, axis=1))[:, None]
    bvecs[np.isnan(bvecs)] = 0

    if flipy:
        bvecs[:,1] = -bvecs[:,1]      

    # Delete empty DSI volumes
    data = data[:,:,:, 0:257]

    # Read mask
    img_mask = nib.load(fmask)
    mask = img_mask.get_data()

    return data, affine, zooms, bvals, bvecs, mask
Esempio n. 12
0
def execution( self, context):
    bvals, bvecs = read_bvals_bvecs(self.bvals.fullPath(), self.bvecs.fullPath())
    if self.round_bvals:
        context.write("Rouding bvalues to : useful for shell based models")
        bvals = np.round(bvals,-2)
    try:
        minf = self.diffusion_data.minf()
        t = minf['storage_to_memory']
    except KeyError:
        context.write("No storage_to_memory field in the  minf file associated to the volume, using the one of the header of the volume")
        dwi = aims.read(self.diffusion_data.fullPath())
        header = dwi.header()
        t = header['storage_to_memory']
    finally:
        try :
            t1 = aims.AffineTransformation3d(t).toMatrix()
            aff = np.diag(t1)[:-1]
            affine = np.diag(aff)
        except:
            context.write("Warning!: there is no storage to memory matrix, I assume bvecs have an RAS (Nifti convention) orientation")
            affine = -1.0*np.eye(3)

    context.write("The following transformation is going to be applied:", affine)
    bvecs = np.dot(bvecs, np.transpose(affine))
    context.write("Transforming bvecs coordinate from storage to Aims referential")


    gtab = gradient_table(bvals, bvecs,b0_threshold=self.b0_threshold)
    dump(gtab, self.gradient_table.fullPath(), compress=9)

    #Handling metadata
    self.gradient_table.setMinf('rounded_bvals', self.round_bvals)
    self.gradient_table.setMinf('bvalues_uuid', self.bvals.uuid())
    self.gradient_table.setMinf('bvectors_uuid',self.bvecs.uuid())
def main(args=None):

    if not args:
        args = sys.argv[1:]

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    fname_in = arguments['-bvec']
    if '-o' in arguments:
        fname_out = arguments['-o']
    else:
        fname_out = ''
    verbose = int(arguments['-v'])

    # get bvecs in proper orientation
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(None, fname_in)

    # # Transpose bvecs
    # printv('Transpose bvecs...', verbose)
    # # from numpy import transpose
    # bvecs = bvecs.transpose()

    # Write new file
    if fname_out == '':
        path_in, file_in, ext_in = extract_fname(fname_in)
        fname_out = path_in+file_in+ext_in
    fid = open(fname_out, 'w')
    for iLine in range(bvecs.shape[0]):
        fid.write(' '.join(str(i) for i in bvecs[iLine, :])+'\n')
    fid.close()

    # display message
    printv('Created file:\n--> '+fname_out+'\n', verbose, 'info')
Esempio n. 14
0
 def bvecs(self):
     if self._bvecs is None:
         bvals, bvecs = read_bvals_bvecs(self.filename_bval,
                                         self.filename_bvecs)
         self._bvals = bvals
         self._bvecs = bvecs
     return self._bvecs
Esempio n. 15
0
def main(args=None):

    parser = get_parser()
    if args:
        arguments = parser.parse_args(args)
    else:
        arguments = parser.parse_args(
            args=None if sys.argv[1:] else ['--help'])

    fname_in = arguments.bvec
    fname_out = arguments.o
    verbose = int(arguments.v)
    init_sct(log_level=verbose, update=True)  # Update log level

    # get bvecs in proper orientation
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(None, fname_in)

    # # Transpose bvecs
    # printv('Transpose bvecs...', verbose)
    # # from numpy import transpose
    # bvecs = bvecs.transpose()

    # Write new file
    if fname_out == '':
        path_in, file_in, ext_in = extract_fname(fname_in)
        fname_out = path_in + file_in + ext_in
    fid = open(fname_out, 'w')
    for iLine in range(bvecs.shape[0]):
        fid.write(' '.join(str(i) for i in bvecs[iLine, :]) + '\n')
    fid.close()

    # display message
    printv('Created file:\n--> ' + fname_out + '\n', verbose, 'info')
Esempio n. 16
0
def load_dwi(nifti_path,
             bval_path,
             bvec_path,
             mask_path=None,
             b0_threshold=250):
    """Load the data needed to process a diffusion-weighted image.

    Parameters
    ----------
    nifti_path : string
        Path to the nifti DWI
    bval_path : string
        Path to the .bval file
    bvec_path : string
        Path to the .bvec file
    mask_path : string, optional
        Path to the nifti mask, if one exists
    b0_threshold
        Threshold below which a b-value is considered zero

    Returns
    -------
    img : DiffusionWeightedImage
        The DWI data with a mask, if applicable.
    """

    img = nib.load(nifti_path)
    bvals, bvecs = read_bvals_bvecs(bval_path, bvec_path)
    gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)

    if mask_path is not None:
        mask = nib.load(mask_path)
        return MaskedDiffusionWeightedImage(img, gtab, mask.get_data())
    else:
        return DiffusionWeightedImage(img, gtab)
def compute_tensor_model(dir_src, dir_out, verbose=False):

    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi =  pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    ten_model = TensorModel(gtab)
    ten_fit = ten_model.fit(data, mask)

    FA = ten_fit.fa
    MD = ten_fit.md
    EV = ten_fit.evecs.astype(np.float32)

    fa_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_FA.nii.gz'
    save_nifti(pjoin(dir_out, fa_name), FA, affine)
    md_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_MD.nii.gz'
    save_nifti(pjoin(dir_out, md_name), MD, affine)
    ev_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_EV.nii.gz'
    save_nifti(pjoin(dir_out, ev_name), EV, affine)
def load_fibercup_tractography_derivatives_2D():
    niftipath = 'data\\fibercup\\R3.nii.gz'
    bvalspath = 'data\\fibercup\\R3.bvals'
    bvecspath = 'data\\fibercup\\R3.bvecs'

    print("Fetching data")
    data, affine = load_nifti(niftipath)

    bvals, bvecs = read_bvals_bvecs(bvalspath, bvecspath)

    gtab = gradient_table(bvals, bvecs)

    print("Fitting tensor model...")
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data)
    quadratic_form = tenfit.quadratic_form

    evals, evecs = np.linalg.eig(quadratic_form)

    max_directions = np.argmax(evals, axis=2)
    directions = np.array([[[
        evals[i, j, k][max_directions[i, j, k]] *
        evecs[i, j, k][max_directions[i, j, k]]
        for k in range(len(max_directions[0, 0]))
    ] for j in range(len(max_directions[0]))]
                           for i in range(len(max_directions))])

    return np.real(directions[:, :, 1, :2])
Esempio n. 19
0
def to_estimate_dti(file_in, file_inMask, outPath, fbval, fbvec):
    print(d.separador + 'building DTI Model...')

    ref_name = utils.to_extract_filename(file_in)

    if (
            not (os.path.exists(outPath + ref_name + d.id_evecs + d.extension))
    ) | (not (os.path.exists(outPath + ref_name + d.id_evals + d.extension))):
        try:
            os.remove(outPath + ref_name + d.id_evecs + d.extension)
            os.remove(outPath + ref_name + d.id_evals + d.extension)
        except:
            print("Unexpected error:", sys.exc_info()[0])

        img = nib.load(file_in)
        data = img.get_data()
        mask = nib.load(file_inMask)
        mask = mask.get_data()

        bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
        gtab = gradient_table(bvals, bvecs)

        tensor_model = dti.TensorModel(gtab)
        tensor_fitted = tensor_model.fit(data, mask)

        nib.save(
            nib.Nifti1Image(tensor_fitted.evecs.astype(np.float32),
                            img.affine),
            outPath + ref_name + d.id_evecs + d.extension)
        nib.save(
            nib.Nifti1Image(tensor_fitted.evals.astype(np.float32),
                            img.affine),
            outPath + ref_name + d.id_evals + d.extension)

    return outPath + ref_name + d.id_evecs + d.extension, outPath + ref_name + d.id_evals + d.extension
Esempio n. 20
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_bval)
    assert_outputs_exist(parser, args, args.out_bval)

    bvals, bvecs = read_bvals_bvecs(args.in_bval, None)
    # Find the volume indices that correspond to the shells to extract.
    tol = args.tolerance

    sorted_centroids, sorted_indices = identify_shells(bvals, tol, sort=True)

    bvals_to_extract = np.sort(args.bvals_to_extract)
    n_shells = np.shape(bvals_to_extract)[0]

    logging.info("number of shells: {}".format(n_shells))
    logging.info("bvals to extract: {}".format(bvals_to_extract))
    logging.info("estimated centroids: {}".format(sorted_centroids))
    logging.info("original bvals: {}".format(bvals))
    logging.info("selected indices: {}".format(sorted_indices))

    new_bvals = bvals
    for i in range(n_shells):
        if np.abs(sorted_centroids[i] - bvals_to_extract[i]) <= tol:
            new_bvals[np.where(sorted_indices == i)] = bvals_to_extract[i]
        else:
            parser.error("No bvals to resample: tolerance is too low.")

    logging.info("new bvals: {}".format(new_bvals))
    new_bvals.shape = (1, len(new_bvals))
    np.savetxt(args.out_bval, new_bvals, '%d')
Esempio n. 21
0
    def test_bvalMap(self):
        inPath= pjoin(FILEDIR, 'connectom_prisma/connectom/A/')
        inPrefix= pjoin(inPath, 'dwi_A_connectom_st_b1200')

        lowResImgPath= inPrefix+'.nii.gz'
        bvalPath= inPrefix+'.bval'
        lowResMaskPath= inPrefix+'_mask.nii.gz'

        # load signal attributes for pre-processing ----------------------------------------------------------------
        imgPath = nrrd2nifti(lowResImgPath)
        dwi = load(imgPath)

        maskPath = nrrd2nifti(lowResMaskPath)
        mask = load(maskPath)

        bvals, _ = read_bvals_bvecs(bvalPath, None)

        bNew= 1000.

        print('B value mapping ', imgPath)
        dwiNew, bvalsNew= remapBval(dwi.get_fdata(), mask.get_fdata(), bvals, bNew)

        outPrefix = imgPath.split('.nii')[0] + '_bmapped'
        save_nifti(outPrefix + '.nii.gz', dwiNew, dwi.affine, dwi.header)
        copyfile(inPrefix + '.bvec', outPrefix + '.bvec')
        write_bvals(outPrefix + '.bval', bvals)
Esempio n. 22
0
    def _get_from_file_mapping(self, path, file_mapping: dict, b0_threshold: float = 10.0):

        path_mapping = {key: os.path.join(path, file_mapping[key]) for key in file_mapping}
        bvals, bvecs = read_bvals_bvecs(path_mapping['bvals'],
                                        path_mapping['bvecs'])

        # img, t1, gradient table, affine and dwi
        img = nb.load(path_mapping['img'])
        t1 = nb.load(path_mapping['t1']).get_data()

        dwi = img.get_data().astype("float32")

        aff = img.affine

        # binary mask
        if 'mask' in path_mapping:
            binary_mask = nb.load(path_mapping['mask']).get_data()
        else:
            _, binary_mask = median_otsu(dwi[..., 0], 2, 1)

        # calculating b0
        b0 = dwi[..., bvals < b0_threshold].mean(axis=-1)

        # Do not generate fa yet
        fa = None
        gtab = gradient_table(bvals, bvecs)
        data_container = DataContainer(bvals, bvecs, gtab, t1, dwi, aff, binary_mask, b0, fa)
        return self._preprocess(data_container)
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    fname_in = arguments.bvec
    fname_out = arguments.o

    # get bvecs in proper orientation
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(None, fname_in)

    # # Transpose bvecs
    # printv('Transpose bvecs...', verbose)
    # # from numpy import transpose
    # bvecs = bvecs.transpose()

    # Write new file
    if fname_out == '':
        path_in, file_in, ext_in = extract_fname(fname_in)
        fname_out = path_in + file_in + ext_in
    fid = open(fname_out, 'w')
    for iLine in range(bvecs.shape[0]):
        fid.write(' '.join(str(i) for i in bvecs[iLine, :]) + '\n')
    fid.close()

    # display message
    printv('Created file:\n--> ' + fname_out + '\n', verbose, 'info')
def constrained_spherical_deconvolution(dir_src, dir_out, verbose=False):

    # Load data
    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi =  pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    sphere = get_sphere('symmetric724')

    response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, 
                                    fa_thr=par_ar_fa_th)
    # print('Response function', response)

    # Model fitting
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd_model.fit(data, mask=mask)

    # Saving Spherical Harmonic Coefficient
    out_peaks = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz'
    save_nifti(pjoin(dir_out, out_peaks), csd_fit.shm_coeff, affine)
Esempio n. 25
0
def compute_tensor_model(dir_src, dir_out, verbose=False):

    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    ten_model = TensorModel(gtab)
    ten_fit = ten_model.fit(data, mask)

    FA = ten_fit.fa
    MD = ten_fit.md
    EV = ten_fit.evecs.astype(np.float32)

    fa_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_FA.nii.gz'
    save_nifti(pjoin(dir_out, fa_name), FA, affine)
    md_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_MD.nii.gz'
    save_nifti(pjoin(dir_out, md_name), MD, affine)
    ev_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_EV.nii.gz'
    save_nifti(pjoin(dir_out, ev_name), EV, affine)
Esempio n. 26
0
def single_shell_extraction(dir_src, dir_out, verbose=False):

    fbval = pjoin(dir_src, 'bvals')
    fbvec = pjoin(dir_src, 'bvecs')
    fmask = pjoin(dir_src, 'nodif_brain_mask.nii.gz')
    fdwi = pjoin(dir_src, 'data.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    data, affine = load_nifti(fdwi, verbose)

    if par_b_shell == 1000:
        sind = (bvals < 10) | ((bvals < 1100) & (bvals > 900))
    elif par_b_shell == 2000:
        sind = (bvals < 10) | ((bvals < 2100) & (bvals > 1900))
    elif par_b_shell == 3000:
        sind = (bvals < 10) | ((bvals < 3100) & (bvals > 2900))

    shell_data = data[..., sind]
    shell_gtab = gradient_table(bvals[sind],
                                bvecs[sind, :],
                                b0_threshold=par_b0_threshold)

    fname = 'data_' + par_b_tag + '.nii.gz'
    save_nifti(pjoin(dir_out, fname), shell_data, affine)
    np.savetxt(pjoin(dir_out, 'bvals_' + par_b_tag), shell_gtab.bvals)
    np.savetxt(pjoin(dir_out, 'bvecs_' + par_b_tag), shell_gtab.bvecs.T)
Esempio n. 27
0
def constrained_spherical_deconvolution(dir_src, dir_out, verbose=False):

    # Load data
    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src,
                  'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    sphere = get_sphere('symmetric724')

    response, ratio = auto_response(gtab,
                                    data,
                                    roi_radius=par_ar_radius,
                                    fa_thr=par_ar_fa_th)
    # print('Response function', response)

    # Model fitting
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd_model.fit(data, mask=mask)

    # Saving Spherical Harmonic Coefficient
    out_peaks = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz'
    save_nifti(pjoin(dir_out, out_peaks), csd_fit.shm_coeff, affine)
Esempio n. 28
0
def nifti_read_gtab(filename, meta):
    fsbvals, fsbvecs = nifti_get_specific_bvals_bvecs_filenames(filename)
    fgbvals, fgbvecs = nifti_get_global_bvals_bvecs_filenames(filename)

    # where to find the files?
    if os.path.isfile(fsbvals) and os.path.isfile(fsbvecs):
        # specific bvals/bvecs for this nifti file?
        bvals, bvecs = read_bvals_bvecs(fsbvals, fsbvecs)
    elif os.path.isfile(fgbvals) and os.path.isfile(fgbvecs):
        # general bvals/bvecs in the same directory?
        bvals, bvecs = read_bvals_bvecs(fgbvals, fgbvecs)
    else:
        raise Exception('no bvals/bvecs files found for ' + filename)

    if fields.auto_convert_world_space:
        bvecs = _nifti_bvecs_to_worldspace(bvecs, meta)
    return GradientTable(bvals, bvecs)
Esempio n. 29
0
def bval_vec(bval_file):
    from dipy.io import read_bvals_bvecs

    bvals = read_bvals_bvecs(bval_file, None)[0]
    bvals = np.around(bvals, decimals=-1)
    b0i = np.where(bvals == 0)[0]

    return bvals, b0i
Esempio n. 30
0
def load_hcp_data(data_path, bvals_path, bvecs_path):
    # load HCP data
    img = nib.load(data_path)
    data = img.get_data()

    # load b-values and vectors
    bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
    return data, bvals, bvecs
Esempio n. 31
0
    def _retrieve_data(self, file_names, denoise=False, b0_threshold=10):
        """
        Reads data from specific files and returns them as object.

        This functions reads the filenames of the DWI image and loads/parses them accordingly.
        Also, it denoises them, if specified and generates a b0 image.

        The `file_names` param should be a dict with the following keys:
        `['bvals', 'bvecs', 'img', 't1', 'mask']`

        Parameters
        ----------
        file_names : dict
            The filenames, or relative paths from `self.path`.
        denoise : bool, optional
            A boolean indicating wether the given data should be denoised, by default False
        b0_threshold : float, optional
            A single value indicating the b0 threshold used for b0 calculation, by default 10.0

        Returns
        -------
        RawData
            An object holding all data as attributes, usable for further processing.

        Raises
        ------
        DataContainerNotLoadableError
            This error is thrown if one or multiple files cannot be found.
        """
        data = RawData()
        try:
            data.bvals, data.bvecs = read_bvals_bvecs(os.path.join(self.path, file_names['bvals']),
                                                      os.path.join(self.path, file_names['bvecs']))
            data.img = nb.load(os.path.join(self.path, file_names['img']))
            data.t1 = nb.load(os.path.join(self.path, file_names['t1'])).get_data()
        except FileNotFoundError as error:
            raise DataContainerNotLoadableError(self.path, error.filename) from None

        data.gtab = gradient_table(bvals=data.bvals, bvecs=data.bvecs)
        data.dwi = data.img.get_data().astype("float32")
        data.aff = data.img.affine
        data.fa = None

        if denoise:
            sigma = pca_noise_estimate(data.dwi, data.gtab, correct_bias=True,
                                       smooth=Config.get_config().getint("denoise", "smooth",
                                                                         fallback="3"))
            data.dwi = localpca(data.dwi, sigma=sigma,
                                patch_radius=Config.get_config().getint("denoise", "pathRadius",
                                                                        fallback="2"))
        if 'mask' in file_names:
            data.binarymask = nb.load(os.path.join(self.path, file_names['mask'])).get_data()
        else:
            _, data.binarymask = median_otsu(data.dwi[..., 0], 2, 1)

        data.b0 = data.dwi[..., data.bvals < b0_threshold].mean(axis=-1)

        return data
 def handle(self):
     img = nib.load(self.dmri_file)
     data = img.get_data()
     bvals, bvecs = read_bvals_bvecs(self.fbvals, self.fbvecs)
     gtab = gradient_table(bvals, bvecs)
     maskdata, mask = median_otsu(data, 3, 1, True,\
                          vol_idx=range(10, 50), dilate=2)
     #print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)
     tenmodel = dti.TensorModel(gtab)
     self.tenfit = tenmodel.fit(maskdata)
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix):
    """
    Compute DTI.
    :param fname_in: input 4d file.
    :param bvals: bvals txt file
    :param bvecs: bvecs txt file
    :param prefix: output prefix. Example: "dti_"
    :return: True/False
    """
    # Open file.
    from msct_image import Image
    nii = Image(fname_in)
    data = nii.data
    print('data.shape (%d, %d, %d, %d)' % data.shape)

    # open bvecs/bvals
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs)

    # # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
    # from dipy.segment.mask import median_otsu
    # maskdata, mask = median_otsu(data, 3, 1, True, vol_idx=range(10, 50), dilate=2)
    # print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)

    # fit tensor model
    import dipy.reconst.dti as dti
    tenmodel = dti.TensorModel(gtab)
    tenfit = tenmodel.fit(data)

    # Compute metrics
    printv('Computing metrics...', param.verbose)
    # FA
    from dipy.reconst.dti import fractional_anisotropy
    nii.data = fractional_anisotropy(tenfit.evals)
    nii.setFileName(prefix + 'FA.nii.gz')
    nii.save('float32')
    # MD
    from dipy.reconst.dti import mean_diffusivity
    nii.data = mean_diffusivity(tenfit.evals)
    nii.setFileName(prefix + 'MD.nii.gz')
    nii.save('float32')
    # RD
    from dipy.reconst.dti import radial_diffusivity
    nii.data = radial_diffusivity(tenfit.evals)
    nii.setFileName(prefix + 'RD.nii.gz')
    nii.save('float32')
    # AD
    from dipy.reconst.dti import axial_diffusivity
    nii.data = axial_diffusivity(tenfit.evals)
    nii.setFileName(prefix + 'AD.nii.gz')
    nii.save('float32')

    return True
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix):
    """
    Compute DTI.
    :param fname_in: input 4d file.
    :param bvals: bvals txt file
    :param bvecs: bvecs txt file
    :param prefix: output prefix. Example: "dti_"
    :return: True/False
    """
    # Open file.
    from msct_image import Image
    nii = Image(fname_in)
    data = nii.data
    print('data.shape (%d, %d, %d, %d)' % data.shape)

    # open bvecs/bvals
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs)

    # # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
    # from dipy.segment.mask import median_otsu
    # maskdata, mask = median_otsu(data, 3, 1, True, vol_idx=range(10, 50), dilate=2)
    # print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)

    # fit tensor model
    import dipy.reconst.dti as dti
    tenmodel = dti.TensorModel(gtab)
    tenfit = tenmodel.fit(data)

    # Compute metrics
    printv('Computing metrics...', param.verbose)
    # FA
    from dipy.reconst.dti import fractional_anisotropy
    nii.data = fractional_anisotropy(tenfit.evals)
    nii.setFileName(prefix+'FA.nii.gz')
    nii.save('float32')
    # MD
    from dipy.reconst.dti import mean_diffusivity
    nii.data = mean_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'MD.nii.gz')
    nii.save('float32')
    # RD
    from dipy.reconst.dti import radial_diffusivity
    nii.data = radial_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'RD.nii.gz')
    nii.save('float32')
    # AD
    from dipy.reconst.dti import axial_diffusivity
    nii.data = axial_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'AD.nii.gz')
    nii.save('float32')

    return True
Esempio n. 35
0
def run_to_estimate_dti(path_input,
                        path_output,
                        fbval="",
                        fbvec="",
                        file_inMask=""):
    if fbval == "" or fbvec == "":
        folder_sujeto = path_output
        for l in os.listdir(folder_sujeto):
            if "TENSOR" in l and "bval" in l:
                fbval = os.path.join(folder_sujeto, l)
            if "TENSOR" in l and "bvec" in l:
                fbvec = os.path.join(folder_sujeto, l)

    if file_inMask == "":
        folder = os.path.dirname(path_input)
        for i in os.listdir(folder):
            if "masked_mask" in i:
                file_inMask = os.path.join(folder, i)

    #def to_estimate_dti(file_in, file_inMask, outPath, fbval, fbvec):
    print(d.separador + 'building DTI Model...')

    ref_name = utils.to_extract_filename(path_input)
    file_evecs = os.path.join(path_output, ref_name + d.id_evecs + d.extension)
    file_evals = os.path.join(path_output, ref_name + d.id_evals + d.extension)

    if (not (os.path.exists(file_evecs))) | (not (os.path.exists(file_evals))):
        try:
            os.remove(file_evecs)
            os.remove(file_evals)
        except:
            print("Unexpected error:", sys.exc_info()[0])

        img = nib.load(path_input)
        data = img.get_data()
        mask = nib.load(file_inMask)
        mask = mask.get_data()

        bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
        gtab = gradient_table(bvals, bvecs)

        tensor_model = dti.TensorModel(gtab)
        tensor_fitted = tensor_model.fit(data, mask)

        nib.save(
            nib.Nifti1Image(tensor_fitted.evecs.astype(np.float32),
                            img.affine), file_evecs)
        nib.save(
            nib.Nifti1Image(tensor_fitted.evals.astype(np.float32),
                            img.affine), file_evals)

    print(file_evecs)
    print(file_evals)
    print(path_input)
    return path_input
def merge_multiple_phase_encodes(input_dwi_up, input_bvals_up, input_bvecs_up, input_index_up, input_acqparam_up, input_dwi_down, input_bvals_down, input_bvecs_down, input_index_down, input_acqparam_down, output_dwi, output_bvals, output_bvecs, output_index, output_acqparam):

    #First, get the size of the images
    img_up = nib.load(input_dwi_up)
    img_dn = nib.load(input_dwi_down)

    bvals_up, bvecs_up = read_bvals_bvecs(input_bvals_up, input_bvecs_up)
    bvals_dn, bvecs_dn = read_bvals_bvecs(input_bvals_down, input_bvecs_down)

    index_up = np.loadtxt(input_index_up)
    index_dn = np.loadtxt(input_index_down)

    acqparam_up = np.loadtxt(input_acqparam_up)
    acqparam_dn = np.loadtxt(input_acqparam_down)

    numImages_up = img_up.header.get_data_shape()[3]
    numImages_dn = img_dn.header.get_data_shape()[3]

    if bvals_up.shape[0] != numImages_up:
        indices_to_remove_up = np.arange(numImages_up, bvals_up.shape[0])
        bvals_up = np.delete(bvals_up, indices_to_remove_up)
        bvecs_up = np.delete(bvecs_up, indices_to_remove_up, 0)

    if bvals_dn.shape[0] != numImages_dn:
        indices_to_remove_dn = np.arange(numImages_dn, bvals_dn.shape[0])
        bvals_dn = np.delete(bvals_dn, indices_to_remove_dn)
        bvecs_dn = np.delete(bvecs_dn, indices_to_remove_dn, 0)


    #Read in the DWI ACQPARAMS FILE, DETERMINE WHICH IMAGES CORRESPOND TO UP AND DOWN, AND MERGE INTO SEPARATE FILES
    os.system('fslmerge -t ' + output_dwi + ' ' + input_dwi_up + ' ' + input_dwi_down)

    bvals = np.concatenate((bvals_up, bvals_dn), axis=0)
    bvecs = np.concatenate((bvecs_up, bvecs_dn), axis=0)
    index = np.concatenate((index_up, 2*index_dn), axis=0)
    acqparam = np.vstack((acqparam_up, acqparam_dn))


    np.savetxt(output_bvals, bvals, fmt='%i', newline=' ')
    np.savetxt(output_bvecs, bvecs.transpose(), fmt='%.8f')
    np.savetxt(output_index, index, fmt='%i', newline=' ')
    np.savetxt(output_acqparam, acqparam, fmt='%.5f')
Esempio n. 37
0
def main():

    parser = build_args_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    verify_overwrite(args.output_dwi, parser, args.force)
    verify_overwrite(args.output_bvals, parser, args.force)
    verify_overwrite(args.output_bvecs, parser, args.force)

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    # Find the volume indices that correspond to the shells to extract.
    tol = args.tolerance

    def get_shell_indices(bval):
        return np.where(np.logical_and(bvals < bval + tol,
                                       bvals > bval - tol))[0]

    indices = [get_shell_indices(b) for b in args.bvals_to_extract]
    indices = np.sort(np.hstack(indices))

    if len(indices) == 0:
        parser.error('There are no volumes that have the supplied b-values.')

    logging.info(
        'Extracting shells [{}], with number of images per shell [{}], '
        'from {} images from {}.'.format(
            ' '.join([str(b) for b in args.bvals_to_extract]), ' '.join([
                str(len(get_shell_indices(b))) for b in args.bvals_to_extract
            ]), len(bvals), args.dwi))

    img = nib.load(args.dwi)

    if args.block_size is None:
        args.block_size = img.shape[-1]

    # Load the shells by iterating through blocks of volumes. This approach
    # is slower for small files, but allows very big files to be split
    # with less memory usage.
    shell_data = np.zeros((img.shape[:-1] + (len(indices), )),
                          dtype=img.get_data_dtype())
    for vi, data in volumes(img, args.block_size):
        in_volume = np.array([i in vi for i in indices])
        in_data = np.array([i in indices for i in vi])
        shell_data[..., in_volume] = data[..., in_data]

    bvals = bvals[indices].astype(int)
    bvals.shape = (1, len(bvals))
    np.savetxt(args.output_bvals, bvals, '%d')
    np.savetxt(args.output_bvecs, bvecs[indices, :].T, '%0.15f')
    nib.save(nib.Nifti1Image(shell_data, img.affine), args.output_dwi)
Esempio n. 38
0
File: utils.py Progetto: gkiar/ndmg
def load_bval_bvec(fbval, fbvec):
    """
    Takes bval and bvec files and produces a structure in dipy format

    **Positional Arguments:**
    """

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, atol=0.01)

    print(gtab.info)
    return gtab
Esempio n. 39
0
def to_register_dwi_to_mni(path_in, path_out, path_bvec, path_bval):
    ref_name = utils.to_extract_filename(path_in)

    # if not os.path.exists(path_out + ref_name + '_normalized' + d.extension):

    img_DWI = nib.load(path_in)
    data_DWI = img_DWI.get_data()
    affine_DWI = img_DWI.affine

    bvals, bvecs = read_bvals_bvecs(path_bval, path_bvec)
    gtab = gradient_table(bvals, bvecs)

    b0 = data_DWI[..., gtab.b0s_mask]

    mean_b0 = np.mean(b0, -1)

    mni_t2 = nib.load(d.standard_t2)
    mni_t2_data = mni_t2.get_data()
    MNI_T2_affine = mni_t2.affine

    directionWarped = np.zeros((mni_t2_data.shape[0], mni_t2_data.shape[1],
                                mni_t2_data.shape[2], data_DWI.shape[-1]))
    rangos = range(data_DWI.shape[-1])

    affine, starting_affine = tools.affine_registration(
        mean_b0,
        mni_t2_data,
        moving_grid2world=affine_DWI,
        static_grid2world=MNI_T2_affine)

    warped_moving, mapping = tools.syn_registration(
        mean_b0,
        mni_t2_data,
        moving_grid2world=affine_DWI,
        static_grid2world=MNI_T2_affine,
        # step_length=0.1,
        # sigma_diff=2.0,
        metric='CC',
        dim=3,
        level_iters=[10, 10, 5],
        # prealign=affine.affine)
        prealign=starting_affine)

    for gradientDirection in rangos:
        # print(gradientDirection)
        directionWarped[:, :, :, gradientDirection] = mapping.transform(
            data_DWI[:, :, :, gradientDirection].astype(int),
            interpolation='nearest')

    nib.save(nib.Nifti1Image(directionWarped, MNI_T2_affine),
             path_out + ref_name + '_normalized' + d.extension)

    return path_out + ref_name + '_normalized' + d.extension, mapping
Esempio n. 40
0
def tracking_eudx4csd(dir_src, dir_out, verbose=False):

    # Load data
    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi =  pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    sphere = get_sphere('symmetric724') 

    response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, 
                                    fa_thr=par_ar_fa_th)
    # print('Response function', response)

    # Model fitting
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    csd_peaks = peaks_from_model(csd_model, 
                                 data, 
                                 sphere,
                                 relative_peak_threshold=.5,
                                 min_separation_angle=25,
                                 parallel=False)

    # Computation of streamlines
    streamlines = EuDX(csd_peaks.peak_values,
                       csd_peaks.peak_indices, 
                       seeds=par_eudx_seeds,
                       odf_vertices= sphere.vertices,
                       a_low=par_eudx_threshold)

    # Saving tractography
    voxel_size =  (par_dim_vox,) * 3
    dims = mask.shape[:3]
    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = voxel_size
    hdr['voxel_order'] = 'LAS'
    hdr['dim'] = dims
    hdr['vox_to_ras'] = affine
    strm = ((sl, None, None) for sl in streamlines)
    trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_csd_tag + '_' + par_eudx_tag + '.trk'
    trk_out = os.path.join(dir_out, trk_name)
    nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')    
Esempio n. 41
0
def peaks_from_nifti(fdwi, fbvec=None, fbval=None, mask=None):

    if '.' not in fdwi:
        fbase = fdwi
        fdwi = fdwi+".nii.gz"
        if not fbval:
            fbval = fbase+".bval"
        if not fbvec:
            fbvec = fbase+".bvec"
    print fdwi
    img = nib.load(fdwi)
    data = img.get_data()
    zooms = img.get_header().get_zooms()[:3]
    affine = img.get_affine()
    bval, bvec = dio.read_bvals_bvecs(fbval, fbvec)
    gtab = dgrad.gradient_table(bval, bvec)


    if not mask:
        print 'generate mask'
        maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)

    else:
        mask_img = nib.load(mask)
        mask = mask_img.get_data()

        from dipy.segment.mask import applymask
        maskdata = applymask(data, mask)

    print maskdata.shape, mask.shape


    from dipy.reconst.shm import QballModel, CsaOdfModel
    model = QballModel(gtab, 6)

    sphere = get_sphere('symmetric724')

    print "fit Qball peaks"
    proc_num = multiprocessing.cpu_count()-1
    print "peaks_from_model using core# =" + str(proc_num)

    peaks = peaks_from_model(model=model, data=maskdata, relative_peak_threshold=.5,
                            min_separation_angle=25,
        sphere=sphere, mask=mask, parallel=True, nbr_processes=proc_num)

    return peaks
Esempio n. 42
0
    def load_bval_bvec(self, fbval, fbvec):
        """
        Takes bval and bvec files and produces a structure in dipy format

        **Positional Arguments:**

                streamlines:
                    - Fiber streamlines either file or array in a dipy EuDX
                      or compatible format.
        """

        bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

        gtab = gradient_table(bvals, bvecs, atol=0.01)

        print gtab.info
        return gtab
Esempio n. 43
0
def reconstruction(dwi,bval_file,bvec_file,mask=None,type='dti',b0=0.,order=4):
	""" Uses Dipy to reconstruct an fODF for each voxel.
	    
	    Parameters
 		----------
 	    dwi: numpy array (mandatory)
	    	Holds the diffusion weighted image in a 4D-array (see nibabel).
	    bval_file: string (mandatory)
	    	Path to the b-value file (FSL format).
	    bvec_file: string (mandatory)
	    	Path to the b-vectors file (FSL format).
	    mask:  numpy array
	    	Holds the mask in a 3D array (see nibabel).
	    type: string \in {'dti','csd','csa'} (default = 'dti')
	    	The type of the ODF reconstruction.
		b0: float (default = 0)
			Threshold to use for defining b0 images.
	    order: int (default = 4)
	    	Order to use for constrained spherical deconvolution (csd) or constant solid angle (csa).
	    	
	    Returns
		-----------
		model_fit: Dipy Object (depends on the type)
			Represents the fitted model for each voxel.
	"""	
	
	#b-values and b-vectors
	bvals, bvecs = read_bvals_bvecs(bval_file,bvec_file)
	gtab = gradient_table(bvals, bvecs, b0_threshold=b0)
	
	#reconstruction
	if type == 'dti':
		model = TensorModel(gtab,fit_method='WLS')
	elif type == 'csd':
		response, ratio = auto_response(gtab, dwi, roi_radius=10, fa_thr=0.7)
		model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=order)
	elif type == 'csa':
		model = CsaOdfModel(gtab, order)

	if mask is not None:
		model_fit = model.fit(dwi,mask=mask)
	else:
		model_fit = model.fit(dwi)
	
	return model_fit
def prepare_q4half(fimg, fbval, fbvec):

    img = nib.load(fimg)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

    bvecs[257: 257 + 256] = -bvecs[1:257]
    bvals[257: 257 + 256] = bvals[1:257]
    data[..., 257: 257 + 256] = data[..., 1:257]

    bvals = np.delete(bvals, [513, 514], 0)
    bvecs = np.delete(bvecs, [513, 514], 0)
    data = np.delete(data, [513, 514], -1)

    return data, affine, zooms, bvals, bvecs
Esempio n. 45
0
def prepare_q5half(fimg, fbtext, fbval, fbvec, fmask, flipy):

    img = nib.load(fimg)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]

    bvals, temp = read_bvals_bvecs(fbval, fbvec)

    bmat = np.loadtxt(fbtext)
    bvecs = bmat[:, 1:]
    # Normalize bvec to unit norm
    bvecs = bvecs / np.sqrt(np.sum(bvecs ** 2, axis=1))[:, None]
    bvecs[np.isnan(bvecs)] = 0

    if flipy:
        bvecs[:,1] = -bvecs[:,1]

    img_mask = nib.load(fmask)
    mask = img_mask.get_data()

    return data, affine, zooms, bvals, bvecs, mask    
Esempio n. 46
0
def make_dti_data(out_fbval, out_fbvec, out_fdata, out_shape=(5, 6, 7)):
    """
    Create a synthetic data-set with a single shell acquisition

    out_fbval, out_fbvec, out_fdata : str
        Full paths to generated data and bval/bvec files

    out_shape : tuple
        The 3D shape of the output volum

    """
    fimg, fbvals, fbvecs = dpd.get_data('small_64D')
    img = nib.load(fimg)
    bvals, bvecs = dio.read_bvals_bvecs(fbvals, fbvecs)
    gtab = dpg.gradient_table(bvals, bvecs)

    # Simulate a signal based on the DTI model:
    signal = single_tensor(gtab, S0=100)
    DWI = np.zeros(out_shape + (len(gtab.bvals), ))
    DWI[:] = signal
    nib.save(nib.Nifti1Image(DWI, img.affine), out_fdata)
    np.savetxt(out_fbval, bvals)
    np.savetxt(out_fbvec, bvecs)
Esempio n. 47
0
def make_dki_data(out_fbval, out_fbvec, out_fdata, out_shape=(5, 6, 7)):
    """
    Create a synthetic data-set with a 2-shell acquisition

    out_fbval, out_fbvec, out_fdata : str
        Full paths to generated data and bval/bvec files

    out_shape : tuple
        The 3D shape of the output volum

    """
    # This is one-shell (b=1000) data:
    fimg, fbvals, fbvecs = dpd.get_data('small_64D')
    img = nib.load(fimg)
    bvals, bvecs = dio.read_bvals_bvecs(fbvals, fbvecs)
    # So  we create two shells out of it
    bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
    bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
    gtab_2s = dpg.gradient_table(bvals_2s, bvecs_2s)

    # Simulate a signal based on the DKI model:
    mevals_cross = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
                             [0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
    angles_cross = [(80, 10), (80, 10), (20, 30), (20, 30)]
    fie = 0.49
    frac_cross = [fie * 50, (1 - fie) * 50, fie * 50, (1 - fie) * 50]
    # Noise free simulates
    signal_cross, dt_cross, kt_cross = multi_tensor_dki(gtab_2s, mevals_cross,
                                                        S0=100,
                                                        angles=angles_cross,
                                                        fractions=frac_cross,
                                                        snr=None)
    DWI = np.zeros(out_shape + (len(gtab_2s.bvals), ))
    DWI[:] = signal_cross
    nib.save(nib.Nifti1Image(DWI, img.affine), out_fdata)
    np.savetxt(out_fbval, bvals_2s)
    np.savetxt(out_fbvec, bvecs_2s)
Esempio n. 48
0
    def load_bval_bvec_dti(self, fbval, fbvec, dti_file, dti_file_out):
        """
        Takes bval and bvec files and produces a structure in dipy format

        **Positional Arguments:**

                streamlines:
                    - Fiber streamlines either file or array in a dipy EuDX
                      or compatible format.
        """

        # Load Data
        startTime = datetime.now()

        img = nb.load(dti_file)
        data = img.get_data()

        bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

        # Get rid of spurrious scans
        idx = np.where((bvecs[:, 0] == 100) & (bvecs[:, 1] == 100) &
                       (bvecs[:, 2] == 100))
        bvecs = np.delete(bvecs, idx, axis=0)
        bvals = np.delete(bvals, idx, axis=0)
        data = np.delete(data, idx, axis=3)

        # Save corrected DTI volume
        dti_new = nb.Nifti1Image(data, affine=img.get_affine(),
                                 header=img.get_header())
        dti_new.update_header()
        nb.save(dti_new, dti_file_out)

        gtab = gradient_table(bvals, bvecs, atol=0.01)

        print gtab.info
        return gtab
def shell_extraction(src_dwi, src_bvec, src_bval, out_b0,
                     delta=100, verbose=False):

    shell = [0]
    
    bvals, bvecs = read_bvals_bvecs(src_bval, src_bvec)

    img = nib.load(src_dwi)
    data = img.get_data()
    affine = img.get_affine()
    header = img.header

    sind = np.zeros((bvals.size), dtype=bool)
    for b in shell:
        tind = (bvals < b+delta) & (bvals > b-delta)
        sind = sind | tind          

    shell_data = data[..., sind].mean(axis=3)
    
    shell_img = nib.Nifti1Image(shell_data, affine, header)
    shell_img.update_header()
    nib.save(shell_img, out_b0)
    
    return sind.sum()
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask):
    """
    Compute DTI.
    :param fname_in: input 4d file.
    :param bvals: bvals txt file
    :param bvecs: bvecs txt file
    :param prefix: output prefix. Example: "dti_"
    :param method: algo for computing dti
    :param evecs: bool: output diffusion tensor eigenvectors and eigenvalues
    :return: True/False
    """
    # Open file.
    from spinalcordtoolbox.image import Image
    nii = Image(fname_in)
    data = nii.data
    sct.printv('data.shape (%d, %d, %d, %d)' % data.shape)

    # open bvecs/bvals
    bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
    gtab = gradient_table(bvals, bvecs)

    # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
    if not file_mask == '':
        sct.printv('Open mask file...', param.verbose)
        # open mask file
        nii_mask = Image(file_mask)
        mask = nii_mask.data

    # fit tensor model
    sct.printv('Computing tensor using "' + method + '" method...', param.verbose)
    import dipy.reconst.dti as dti
    if method == 'standard':
        tenmodel = dti.TensorModel(gtab)
        if file_mask == '':
            tenfit = tenmodel.fit(data)
        else:
            tenfit = tenmodel.fit(data, mask)
    elif method == 'restore':
        import dipy.denoise.noise_estimate as ne
        sigma = ne.estimate_sigma(data)
        dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
        if file_mask == '':
            tenfit = dti_restore.fit(data)
        else:
            tenfit = dti_restore.fit(data, mask)

    # Compute metrics
    sct.printv('Computing metrics...', param.verbose)
    # FA
    nii.data = tenfit.fa
    nii.save(prefix + 'FA.nii.gz', dtype='float32')
    # MD
    nii.data = tenfit.md
    nii.save(prefix + 'MD.nii.gz', dtype='float32')
    # RD
    nii.data = tenfit.rd
    nii.save(prefix + 'RD.nii.gz', dtype='float32')
    # AD
    nii.data = tenfit.ad
    nii.save(prefix + 'AD.nii.gz', dtype='float32')
    if evecs:
        data_evecs = tenfit.evecs
        data_evals = tenfit.evals
        # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data
        for idim in range(3):
            nii.data = data_evecs[:, :, :, :, idim]
            nii.save(prefix + 'V' + str(idim+1) + '.nii.gz', dtype="float32")
            nii.data = data_evals[:, :, :, idim]
            nii.save(prefix + 'E' + str(idim+1) + '.nii.gz', dtype="float32")

    return True
Esempio n. 51
0
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, file_mask):
    """
    Compute DTI.
    :param fname_in: input 4d file.
    :param bvals: bvals txt file
    :param bvecs: bvecs txt file
    :param prefix: output prefix. Example: "dti_"
    :param method: algo for computing dti
    :return: True/False
    """
    # Open file.
    from msct_image import Image
    nii = Image(fname_in)
    data = nii.data
    print('data.shape (%d, %d, %d, %d)' % data.shape)

    # open bvecs/bvals
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs)

    # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
    if not file_mask == '':
        printv('Open mask file...', param.verbose)
        # open mask file
        nii_mask = Image(file_mask)
        mask = nii_mask.data

    # fit tensor model
    printv('Computing tensor using "'+method+'" method...', param.verbose)
    import dipy.reconst.dti as dti
    if method == 'standard':
        tenmodel = dti.TensorModel(gtab)
        if file_mask == '':
            tenfit = tenmodel.fit(data)
        else:
            tenfit = tenmodel.fit(data, mask)
    elif method == 'restore':
        import dipy.denoise.noise_estimate as ne
        sigma = ne.estimate_sigma(data)
        dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
        if file_mask == '':
            tenfit = dti_restore.fit(data)
        else:
            tenfit = dti_restore.fit(data, mask)

    # Compute metrics
    printv('Computing metrics...', param.verbose)
    # FA
    from dipy.reconst.dti import fractional_anisotropy
    nii.data = fractional_anisotropy(tenfit.evals)
    nii.setFileName(prefix+'FA.nii.gz')
    nii.save('float32')
    # MD
    from dipy.reconst.dti import mean_diffusivity
    nii.data = mean_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'MD.nii.gz')
    nii.save('float32')
    # RD
    from dipy.reconst.dti import radial_diffusivity
    nii.data = radial_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'RD.nii.gz')
    nii.save('float32')
    # AD
    from dipy.reconst.dti import axial_diffusivity
    nii.data = axial_diffusivity(tenfit.evals)
    nii.setFileName(prefix+'AD.nii.gz')
    nii.save('float32')

    return True
Esempio n. 52
0
plt.imshow(data[:, :, axial_middle, 10].T, cmap='gray', origin='lower')
plt.show()
plt.savefig('data.png', bbox_inches='tight')

"""
.. figure:: data.png
   :align: center

   **Showing the middle axial slice without (left) and with (right) diffusion weighting**.

The next step is to load the b-values and b-vectors from the disk using 
the function ``read_bvals_bvecs``.
"""

from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

"""
In Dipy, we use an object called ``GradientTable`` which holds all the acquision 
specific parameters, e.g. b-values, b-vectors, timings and others. To create this 
object you can use the function ``gradient_table``.
"""

from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)

"""
Finally, you can use ``gtab`` (the GradientTable object) to show some information about the
acquisition parameters
"""
def identify_b0(fname_bvecs, fname_bvals, bval_min, verbose):

    # Identify b=0 and DWI images
    sct.printv('\nIdentify b=0 and DWI images...', verbose)
    index_b0 = []
    index_dwi = []

    # if bval is not provided
    if not fname_bvals:
        # Open bvecs file
        bvecs = []
        with open(fname_bvecs) as f:
            for line in f:
                bvecs_new = [x for x in map(float, line.split())]
                bvecs.append(bvecs_new)

        # Check if bvecs file is nx3
        if not len(bvecs[0][:]) == 3:
            sct.printv('  WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs.', verbose, 'warning')
            sct.printv('  Transpose bvecs...', verbose)
            # transpose bvecs
            bvecs = list(zip(*bvecs))

        # get number of lines
        nt = len(bvecs)

        # identify b=0 and dwi
        for it in range(0, nt):
            if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
                index_b0.append(it)
            else:
                index_dwi.append(it)

    # if bval is provided
    else:

        # Open bvals file
        from dipy.io import read_bvals_bvecs
        bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)

        # get number of lines
        nt = len(bvals)

        # Identify b=0 and DWI images
        sct.printv('\nIdentify b=0 and DWI images...', verbose)
        for it in range(0, nt):
            if bvals[it] < bval_min:
                index_b0.append(it)
            else:
                index_dwi.append(it)

    # check if no b=0 images were detected
    if index_b0 == []:
        sct.printv('ERROR: no b=0 images detected. Maybe you are using non-null low bvals? in that case use flag -bvalmin. Exit program.', 1, 'error')
        sys.exit(2)

    # display stuff
    nb_b0 = len(index_b0)
    nb_dwi = len(index_dwi)
    sct.printv('  Number of b=0: ' + str(nb_b0) + ' ' + str(index_b0), verbose)
    sct.printv('  Number of DWI: ' + str(nb_dwi) + ' ' + str(index_dwi), verbose)

    # return
    return index_b0, index_dwi, nb_b0, nb_dwi
import sys
import numpy as np
import nibabel as nib
import dipy.reconst.dti as dti
from dipy.core.gradients import gradient_table
from dipy.io import read_bvals_bvecs
fval = sys.argv[7]+'.bval'
fvec = sys.argv[7]+'.bvec'
bvals,bvecs = read_bvals_bvecs(fval,fvec)
fname = sys.argv[7]+'.nii.gz'
gtab=gradient_table(bvals,bvecs)
img=nib.load(fname)
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
x_f=int(sys.argv[1])
y_f=int(sys.argv[2])
z_f=int(sys.argv[3])
x_t=int(sys.argv[4])
y_t=int(sys.argv[5])
z_t=int(sys.argv[6])
mask = data[..., 0] > 50
tenmodel = dti.TensorModel(gtab)
print('Tensor fitting computation')
tenfit = tenmodel.fit(data, mask)
print('Computing anisotropy measures (FA, MD, RGB)')
from dipy.reconst.dti import fractional_anisotropy, color_fa, lower_triangular
FA = fractional_anisotropy(tenfit.evals)
FA[np.isnan(FA)] = 0
fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
nib.save(fa_img, sys.argv[7]+'_fa.nii.gz')
evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine())
Esempio n. 55
0
def run(rawargs):
    arguments = docopt(doc, argv=rawargs, version='Fit v{0}'.format(Version))

    configuration = {}

    #Try to load the image data. If successful, save it to configuration as "data"
    try:
        configuration["data"] = nib.load(arguments["--image"])
    except:
        print("The image you specified does not exist, or cannot be read.")
        sys.exit(1)

    #Try to load the mask data. If successful, save it to configuration as "mask"
    if arguments["--mask"] != None and arguments["--mask"] != "None":
        try:
            configuration["mask"] = nib.load(arguments["--mask"])
        except:
            print("The mask image you specified does not exist, or cannot be read.")
            sys.exit(1)
    else:
        configuration["mask"] = None

    #Try to load the bvec, bvals files. If successful, save it to configuration as "gradient_table"
    try:
        bvals, bvecs = read_bvals_bvecs(arguments["--bvals"], arguments["--bvecs"])
        configuration["gradient_table"] = gradient_table(bvals, bvecs)
    except:
        print("Could not read bvec and/or bval file")
        sys.exit(1)

    #Update configuration with more basic settings
    lookup = {"--out_dti": "out_dti",
              "--out_dki": "out_dki",
              "--out_residual": "out_residual",
              "--out_noise": "out_noise",
              "--out_snr": "out_snr",
              "--out_fa": "out_fa",
              "--out_md": "out_md",
              "--out_rd": "out_rd",
              "--out_ad": "out_ad",
              "--out_mk": "out_mk",
              "--out_rk": "out_rk",
              "--out_ak": "out_ak",
              "--mask_median_radius": "median_radius",
              "--mask_numpass": "******"}

    for key, value in lookup.iteritems():
        if arguments[key] == "None" or arguments[key] == None:
            configuration[value] = None
        else:
            configuration[value] = arguments[key]


    if arguments["--fit_method"].upper() in ["WLS", "OLS"]:
        configuration["fit_method"] = arguments["--fit_method"].upper()
    else:
        print("'{0}' is not a valid fit method. Choose either 'WLS', 'OLS'".format(arguments["--fit_method"].upper()))
        sys.exit(1)

    fitter = Fitter(**configuration)
    fitter.apply_mask()
    fitter.fit()

    fitter.extract_scalars()
    fitter.save()
    sys.exit(0)
wm_data_bin = np.copy(wm_data)
wm_data_bin[wm_data_bin > 0] = 1

# Mask the dwi_data so that you're only investigating voxels inside the brain!
dwi_data = dwi_data * wm_data_bin.reshape([wm_data_bin.shape[0], 
                                             wm_data_bin.shape[1], 
                                             wm_data_bin.shape[2],
                                             1])

parcellation_img = nib.load(parcellation_file)
parcellation_data = parcellation_img.get_data().astype(np.int)

wm_img = nib.load(wm_file)
wm_data = wm_img.get_data()

bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file)
gtab = gradient_table(bvals, bvecs)

mask_data_bin[mask_data_bin > 0] = 1
wm_data_bin = np.copy(wm_data)
wm_data_bin[wm_data_bin > 0] = 1
parcellation_data = parcellation_data * mask_data_bin
parcellation_wm_data = parcellation_data * wm_data_bin
parcellation_wm_data = parcellation_wm_data.astype(np.int)


#=============================================================================
# Track all of white matter using EuDX
#=============================================================================

if not os.path.exists(Msym_file) and not os.path.exists(Mdir_file):
Esempio n. 57
0
    def run(self, data_files, bvals_files, bvecs_files, mask_files,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='',
            out_file='product.json', out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_files : string
            Path of brain mask
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            logging.info('Computing brain mask...')
            _, calc_mask = median_otsu(data)

            mask, affine = load_nifti(mask_path)
            mask = np.array(calc_mask == mask.astype(bool)).astype(int)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info(
                'Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0],
                   bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            mean_signal = np.mean(data[mask_cc_part], axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1]//2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = np.std(data[mask_noise, :])
            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0]/noise_std
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) +
                                 " " + str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction]/noise_std
                SNR_output.append(SNR)

            data = []
            data.append({
                        'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) +
                        ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                        'directions': 'b0' + ' ' + str(SNR_directions[0]) +
                        ' ' + str(SNR_directions[1]) + ' ' +
                        str(SNR_directions[2])
                        })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Esempio n. 58
0
def load_data(fraw, fmask, fbval, fbvec):
    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=10)
    data, affine = load_nifti(fraw)
    mask, _ = load_nifti(fmask)
    return gtab, data, affine, mask
Esempio n. 59
0
print(image.get_header().get_zooms()[:3])

import matplotlib.pyplot as plt

axial_middle = data.shape[2] / 2
plt.figure('Showing the datasets')
plt.subplot(1,2,1).set_axis_off()
plt.imshow(data[:,:, axial_middle, 0].T, cmap='gray', origin='lower')

plt.subplot(1,2,2).set_axis_off()
plt.imshow(data[:,:, axial_middle, 10].T, cmap='gray', origin='lower')
plt.show()

from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(file_bval, file_bvec)

from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)

print(gtab.info)
print(gtab.bvals)
print(gtab.bvecs[:10,:])

S0s = data[:,:,:, gtab.b0s_mask]

print(S0s.shape)

nib.save(nib.Nifti1Image(S0s, image.get_affine()), 'teste_S0s.nii.gz')

import numpy as np
def run(rawargs):
    arguments = docopt(doc, argv=rawargs, version='Orientation Check v{0}'.format(Version))
    inputs = [{"Value":"image file", "Flag": "--image"}, {"Value":"bvec file", "Flag": "--bvecs"}, {"Value":"bvec file", "Flag": "--bvecs"}]
    for inputinfo in inputs:
        if not exists(arguments[inputinfo["Flag"]]):
            print("The {0} specified does not exist!".format(inputinfo["Value"]))
            sys.exit(1)

    rawimage = nib.load(arguments["--image"])
    bvals, bvecs = read_bvals_bvecs(arguments['--bvals'], arguments['--bvecs'])
    print("Generating gradient table.")
    gtab = gradient_table(bvals, bvecs)

    #Define the tensor model
    print("Generating the tensor model.")
    dti_wls = dti.TensorModel(gtab, fit_method="NLLS")

    image_data = rawimage.get_data()


    print("Masking the brain.")
    image_masked, mask = median_otsu(image_data, 3, 1, autocrop=True, dilate=2)
    #print(image_masked)
    #image_masked_data = nib.nifti1.Nifti1Image(image_masked.astype(np.float32), image_data.get_affine())
    #print("Saving masked brain image")
    #nib.nifti1.save(image_masked_data, "./imagemasked.nii.gz")
    print("Resampling the brain to a standard resolution.")
    image, affine1 = reslice(image_masked, rawimage.get_affine(), rawimage.get_header().get_zooms()[:3], (3.0,3.0,3.0))
    mask, maskaffine1 = reslice(mask.astype(numpy.int), rawimage.get_affine(), rawimage.get_header().get_zooms()[:3], (3.0,3.0,3.0))
    #print(len([type(mask) for i in range(0,image.shape[3])]))
    #mask = numpy.expand_dims(mask,3)
    #print(mask)
    #print(mask.shape)
    #image=image*mask
    print(image[0][0][0])

    print("Checking the image dimensions")
    Xsize, Ysize, Zsize, directions = image.shape
    print("X: {0}\nY: {1}\nZ: {2}".format(Xsize, Ysize, Zsize))

    #Define Image Scopes
    print("Defining the image scopes.")
    imagedict = {"axial": {"dropdim": [0,1], "scope": (slice(0,Xsize), slice(0,Ysize), slice(math.floor(Zsize/2),math.floor(Zsize/2)+1))},
                 "coronal": {"dropdim": [0,2], "scope": (slice(0,Xsize), slice(math.floor(Ysize/2),math.floor(Ysize/2)+1), slice(0, Zsize))},
                 "sagittal": {"dropdim": [1,2], "scope": (slice(math.floor(Xsize/2),math.floor(Xsize/2)+1), slice(0,Ysize), slice(0, Zsize))}}


    #roi_idx = (slice(0,image.shape[0]), slice(0,image.shape[1]), slice(middleslice,middleslice+1))#(slice(0,image.shape[0]), slice(0,image.shape[1]), slice(int(image.shape[2]/2),int(image.shape[2]/2)+1))
    print("Defining sphere.")
    sphere = get_sphere('symmetric724')
    #sphere = dpd.get_sphere('symmetric362')

    #Slice the whole dataset by the scope
    print("Slicing the dataset with the scopes.")
    for view in ["sagittal", "coronal", "axial"]:
        imagedict[view]["image"] = image[imagedict[view]["scope"]]
        imagedict[view]["mask"] = mask[imagedict[view]["scope"]]
        print("Fitting {0} data.".format(view))
        fit_wls = dti_wls.fit(imagedict[view]["image"])
        print("Extracting {0} FA.".format(view))
        fa1 = fit_wls.fa * imagedict[view]["mask"]
        print("Extracting {0} EVALS.".format(view))
        evals1 = fit_wls.evals
        print("Extracting {0} EVECS.".format(view))
        evecs1 = fit_wls.evecs
        print("Extracting {0} Color FA.".format(view))
        cfa1 = dti.color_fa(fa1, evecs1)
        cfa1 = cfa1/cfa1.max()
        print("Defining {0} renderer.".format(view))
        render = fvtk.ren()
        print("Generating {0} image.".format(view))
        x =cfa1.shape[imagedict[view]["dropdim"][0]]
        y =cfa1.shape[imagedict[view]["dropdim"][1]]

        #print(x, y, 1, 3)
        cfa2 = cfa1.reshape(x, y, 1, 3)
        evals2 = evals1.reshape(x, y, 1, 3)*1.25
        evecs2 = evecs1.reshape(x, y, 1, 3, 3)*1.25
        print("Adding render.")
        fvtk.add(render, fvtk.tensor(evals2, evecs2, cfa2, sphere))
        print("Recording render.")
        with Xvfb() as xvfb:
            fvtk.record(render, out_path=arguments["--out"+view], size=(800,800), magnification=2)
        print("Image Saved")

    sys.exit(0)