Esempio n. 1
0
def trk_loader(filename):
    try:
        with InTemporaryDirectory():
            load_trk(filename, filepath_dix['gs.nii'])
        return True
    except (ValueError):
        return False
Esempio n. 2
0
def read_hcp_atlas_16_bundles():
    """
    XXX
    """
    bundle_dict = {}
    _, folder = fetch_hcp_atlas_16_bundles()
    whole_brain, _ = load_trk(
        op.join(folder, 'Atlas_in_MNI_Space_16_bundles', 'whole_brain',
                'whole_brain_MNI.trk'))
    bundle_dict['whole_brain'] = whole_brain
    bundle_files = glob(
        op.join(folder, "Atlas_in_MNI_Space_16_bundles", "bundles", "*.trk"))

    for bundle_file in bundle_files:
        bundle = op.splitext(op.split(bundle_file)[-1])[0]
        bundle_dict[bundle] = {}
        bundle_dict[bundle]['sl'] = load_trk(bundle_file)[0]

        feature = ResampleFeature(nb_points=100)
        metric = AveragePointwiseEuclideanMetric(feature)
        qb = QuickBundles(np.inf, metric=metric)
        cluster = qb.cluster(bundle_dict[bundle]['sl'])
        bundle_dict[bundle]['centroid'] = cluster.centroids[0]

    # For some reason, this file-name has a 0 in it, instead of an O:
    bundle_dict["IFOF_R"] = bundle_dict["IF0F_R"]
    del bundle_dict["IF0F_R"]
    return bundle_dict
Esempio n. 3
0
def test_recobundles_flow():
    with TemporaryDirectory() as out_dir:
        data_path = get_fnames('fornix')
        streams, hdr = nib.trackvis.read(data_path)
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)
        f1 = f.copy()

        f2 = f1[:15].copy()
        f2._data += np.array([40, 0, 0])

        f.extend(f2)

        f2_path = pjoin(out_dir, "f2.trk")
        save_trk(f2_path, f2, affine=np.eye(4))

        f1_path = pjoin(out_dir, "f1.trk")
        save_trk(f1_path, f, affine=np.eye(4))

        rb_flow = RecoBundlesFlow(force=True)
        rb_flow.run(f1_path,
                    f2_path,
                    greater_than=0,
                    clust_thr=10,
                    model_clust_thr=5.,
                    reduction_thr=10,
                    out_dir=out_dir)

        labels = rb_flow.last_generated_outputs['out_recognized_labels']
        recog_trk = rb_flow.last_generated_outputs['out_recognized_transf']

        rec_bundle, _ = load_trk(recog_trk)
        npt.assert_equal(len(rec_bundle) == len(f2), True)

        label_flow = LabelsBundlesFlow(force=True)
        label_flow.run(f1_path, labels)

        recog_bundle = label_flow.last_generated_outputs['out_bundle']
        rec_bundle_org, _ = load_trk(recog_bundle)

        BMD = BundleMinDistanceMetric()
        nb_pts = 20
        static = set_number_of_points(f2, nb_pts)
        moving = set_number_of_points(rec_bundle_org, nb_pts)

        BMD.setup(static, moving)
        x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0])  # affine
        bmd_value = BMD.distance(x0.tolist())

        npt.assert_equal(bmd_value < 1, True)
Esempio n. 4
0
def bundle_extract(atlas_track_path, atlas_bundle_path, target_track_path):

    time0 = time.time()

    atlas_file = atlas_track_path
    target_file = target_track_path

    print('loading data begin! time:', time.time() - time0)

    sft_atlas = load_trk(atlas_file, "same", bbox_valid_check=False)
    atlas = sft_atlas.streamlines
    atlas_header = create_tractogram_header(atlas_file,
                                            *sft_atlas.space_attributes)

    sft_target = load_trk(target_file, "same", bbox_valid_check=False)
    target = sft_target.streamlines
    target_header = create_tractogram_header(target_file,
                                             *sft_target.space_attributes)

    moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
        atlas,
        target,
        x0='affine',
        verbose=True,
        progressive=True,
        rng=np.random.RandomState(1984))

    bundle_track = StatefulTractogram(moved, target_header, Space.RASMM)
    save_trk(bundle_track, 'moved.trk', bbox_valid_check=False)

    np.save("slr_transform.npy", transform)

    model_bundle_file = atlas_bundle_path
    model_bundle = load_trk(model_bundle_file, "same", bbox_valid_check=False)
    model_bundle = model_bundle.streamlines

    print('comparing begin! time:', time.time() - time0)

    rb = RecoBundles(moved, verbose=True, rng=np.random.RandomState(2001))

    recognized_bundle, bundle_labels = rb.recognize(model_bundle=model_bundle,
                                                    model_clust_thr=0,
                                                    reduction_thr=20,
                                                    reduction_distance='mam',
                                                    slr=True,
                                                    slr_metric='asymmetric',
                                                    pruning_distance='mam')

    bundle_track = StatefulTractogram(target[bundle_labels], target_header,
                                      Space.RASMM)
    return bundle_track
Esempio n. 5
0
def get_tract_params(mypath, subject, str_identifier, pruned = False, verbose = False):

    trkpath, exists = gettrkpath(mypath, subject, str_identifier, pruned, verbose)
    if trkpath is not None:
        trkdata = load_trk(trkpath, "same")
        verbose = True
        if verbose:
            print("loaded ")
        # trkdata.to_vox()
        if hasattr(trkdata, 'space_attribute'):
            header = trkdata.space_attribute
        elif hasattr(trkdata, 'space_attributes'):
            header = trkdata.space_attributes
        affine = trkdata._affine
        lengths = length(trkdata.streamlines)
        #del trkdata
        # lengths = list(length(trkstreamlines))
        lengths = list(lengths)
        numtracts = np.size(lengths)
        minlength = np.min(lengths)
        maxlength = np.max(lengths)
        meanlength = np.mean(lengths)
        stdlength = np.std(lengths)
        if verbose:
            print("For subject " + subject + " the number of tracts is " + str(numtracts) + ", the minimum length is " +
                  str(minlength) + ", the maximum length is " + str(maxlength) + ", the mean length is " + str(meanlength)
                  + ", the std is " + str(stdlength))
        return subject, numtracts, minlength, maxlength, meanlength, stdlength, header, affine, trkdata
    else:
        print("Error, trkfile not found")
Esempio n. 6
0
def get_data(in_fn, out_fn):
    # Load volume
    tom = nib.load(in_fn).get_data() # 144 x 144 x 144 x 3
    #tom = np.sum(tom, axis=0)
    #tom = cv2.resize(tom, (256, 256))

    # Preprocess input
    tom = (tom - np.min(tom)) / (np.max(tom) - np.min(tom)) # normalise into range [0,1]
    tom = torch.from_numpy(tom)
    tom = tom.permute(3, 0, 1, 2) # channels first for pytorch
    
    # Load the tractogram
    tractogram = load_trk(out_fn, 'same', bbox_valid_check=False)
    streamlines = tractogram.streamlines

    # Preprocess the streamlines
    streamlines = select_random_set_of_streamlines(streamlines, 1024)
    streamlines = set_number_of_points(streamlines, 100)
    streamlines = np.array(streamlines)
    if len(streamlines) < 1024:
        temp_streamlines = np.zeros((1024, 100, 3))
        temp_streamlines[:streamlines.shape[0],:streamlines.shape[1], :streamlines.shape[2]] = streamlines
        streamlines = np.float32(temp_streamlines)
    streamlines = np.reshape(streamlines, (32, 32, 300))

    #tractogram = (tractogram - np.min(tractogram)) / (np.max(tractogram) - np.min(tractogram))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1) # channels first for pytorch

    return [tom, tractogram]
Esempio n. 7
0
def load_ft(tract_path, nii_file):
    from dipy.io.streamline import load_trk, Space

    streams = load_trk(tract_path, nii_file, Space.RASMM)
    streamlines = streams.get_streamlines_copy()

    return streamlines
def test_io_streamline():
    with InTemporaryDirectory():
        fname = 'test.trk'
        affine = np.eye(4)

        # Test save
        save_trk(fname, streamlines, affine, vox_size=np.array([2, 1.5, 1.5]), shape=np.array([50, 50, 50]))
        tfile = nib.streamlines.load(fname)
        npt.assert_array_equal(affine, tfile.affine)
        npt.assert_array_equal(np.array([2, 1.5, 1.5]), tfile.header.get('voxel_sizes'))
        npt.assert_array_equal(np.array([50, 50, 50]), tfile.header.get('dimensions'))
        npt.assert_equal(len(tfile.streamlines), len(streamlines))
        npt.assert_array_almost_equal(tfile.streamlines[1], streamline, decimal=4)

        # Test basic save
        save_trk(fname, streamlines, affine)
        tfile = nib.streamlines.load(fname)
        npt.assert_array_equal(affine, tfile.affine)
        npt.assert_equal(len(tfile.streamlines), len(streamlines))
        npt.assert_array_almost_equal(tfile.streamlines[1], streamline, decimal=5)

        # Test Load
        local_streamlines, hdr = load_trk(fname)
        npt.assert_equal(len(local_streamlines), len(streamlines))
        for arr1, arr2 in zip(local_streamlines, streamlines):
            npt.assert_allclose(arr1, arr2)
Esempio n. 9
0
def path_length(streamlines=None, track_path=None):

    if streamlines != None:
        return list(length(streamlines))
    else:
        track = load_trk(track_path)
        return list(length(track))
Esempio n. 10
0
def eval_method(name=None, method=None, track_path=None, data_path=None):

    if track_path == None:
        track_path = './Result/Track/tractogram_' + method + '_' + name + '.trk'
    if data_path == None:
        data_path = './data/DWI/' + name + '/'

    if not op.exists(track_path):
        print('no tracking')
        return 0

    else:

        from dipy.io.gradients import read_bvals_bvecs
        from dipy.io.image import load_nifti_data, load_nifti
        from dipy.core.gradients import gradient_table

        data, affine, hardi_img = load_nifti(data_path + 'norm.nii.gz',
                                             return_img=True)
        print(data.shape)
        labels = load_nifti_data(data_path + 'seg.nii.gz')
        # t1_data = load_nifti_data('./data/tanenci_20170601/b0.nii.gz')
        bvals, bvecs = read_bvals_bvecs(data_path + 'DWI.bval',
                                        data_path + 'DWI.bvec')
        gtab = gradient_table(bvals, bvecs)


# Read the candidates from file in voxel space:
    candidate_sl_sft = load_trk(track_path, 'same', bbox_valid_check=False)
    candidate_sl_sft.to_vox()
    candidate_sl = candidate_sl_sft.streamlines

    print('loading finished, begin weighting')

    fiber_model = life.FiberModel(gtab)
    inv_affine = np.linalg.inv(hardi_img.affine)
    fiber_fit = fiber_model.fit(data,
                                reduct(candidate_sl, data[:, :, :, 0]),
                                affine=np.eye(4))

    print('weighting finished, begin prediction')

    beta_baseline = np.zeros(fiber_fit.beta.shape[0])
    pred_weighted = np.reshape(
        opt.spdot(fiber_fit.life_matrix, beta_baseline),
        (fiber_fit.vox_coords.shape[0], np.sum(~gtab.b0s_mask)))

    model_predict = fiber_fit.predict()
    model_error = model_predict - fiber_fit.data
    model_rmse = np.sqrt(np.mean(model_error[:, 10:]**2, -1))
    #print('model_rmse:', model_rmse.shape)

    vol_model = np.zeros(data.shape[:3]) * np.nan
    vol_model[fiber_fit.vox_coords[:, 0], fiber_fit.vox_coords[:, 1],
              fiber_fit.vox_coords[:, 2]] = model_rmse

    #print('error:', np.sum(vol_model) / model_rmse.shape[0])

    return np.sum(model_rmse) / model_rmse.shape[0], vol_model, affine
Esempio n. 11
0
def read_sl(fname):
    """
    Reads streamlines from file.
    """
    streams, hdr = load_trk(fname)
    sl = Streamlines(streams)

    return sl
Esempio n. 12
0
def test_recobundles_flow():
    with TemporaryDirectory() as out_dir:
        data_path = get_fnames('fornix')
        streams, hdr = nib.trackvis.read(data_path)
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)
        f1 = f.copy()

        f2 = f1[:15].copy()
        f2._data += np.array([40, 0, 0])

        f.extend(f2)

        f2_path = pjoin(out_dir, "f2.trk")
        save_trk(f2_path, f2, affine=np.eye(4))

        f1_path = pjoin(out_dir, "f1.trk")
        save_trk(f1_path, f, affine=np.eye(4))

        rb_flow = RecoBundlesFlow(force=True)
        rb_flow.run(f1_path, f2_path, greater_than=0, clust_thr=10,
                    model_clust_thr=5., reduction_thr=10, out_dir=out_dir)

        labels = rb_flow.last_generated_outputs['out_recognized_labels']
        recog_trk = rb_flow.last_generated_outputs['out_recognized_transf']

        rec_bundle, _ = load_trk(recog_trk)
        npt.assert_equal(len(rec_bundle) == len(f2), True)

        label_flow = LabelsBundlesFlow(force=True)
        label_flow.run(f1_path, labels)

        recog_bundle = label_flow.last_generated_outputs['out_bundle']
        rec_bundle_org, _ = load_trk(recog_bundle)

        BMD = BundleMinDistanceMetric()
        nb_pts = 20
        static = set_number_of_points(f2, nb_pts)
        moving = set_number_of_points(rec_bundle_org, nb_pts)

        BMD.setup(static, moving)
        x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0])  # affine
        bmd_value = BMD.distance(x0.tolist())

        npt.assert_equal(bmd_value < 1, True)
Esempio n. 13
0
def streamline_registration(moving,
                            static,
                            n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = load_trk(moving, 'same', bbox_valid_check=False).streamlines
    if isinstance(static, str):
        static = load_trk(static, 'same', bbox_valid_check=False).streamlines

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = transform_tracking_output(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
Esempio n. 14
0
def read_trk(fname):
    r""" Read trk file

    :param fname: str,
        The file name to read
    :return: void
    """

    return load_trk(fname)[0]
Esempio n. 15
0
def load_tracts(fn):
    print('Loading streamlines...')

    # Loaded into 'rasmm' space by default
    # According to: https://nipy.org/nibabel/reference/nibabel.trackvis.html
    #               'rasmm' = "points are expressed in mm space according to the affine."
    # TODO PROBLEM: ************* NOT SURE IF I SHOULD BE USING 'RASMM' or 'VOXMM'
    tract = load_trk(fn, 'same', bbox_valid_check=False)
    streamlines = tract.streamlines
    return streamlines
Esempio n. 16
0
def transform_bundles(wb_tracts_name):
    atlas_file, all_bundles_files = get_bundle_atlas_hcp842()
    sft_atlas = load_trk(atlas_file, "same", bbox_valid_check=False)
    atlas = sft_atlas.streamlines
    sft_target = load_trk(wb_tracts_name, "same", bbox_valid_check=False)

    target = sft_target.streamlines
    #show_atlas_target_graph(atlas, target,out_path=folder_name+r'\try_atlas_target',interactive=True)
    atlas = set_number_of_points(atlas, 20)
    target = set_number_of_points(target, 20)
    moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
        atlas,
        target,
        x0='affine',
        verbose=True,
        progressive=True,
        rng=np.random.RandomState(1984))
    #np.save("slf_L_transform.npy", transform)
    #show_atlas_target_graph(atlas, moved,out_path=r'',interactive=True)

    return moved, target
Esempio n. 17
0
def load_ft():
    from dipy.io.streamline import load_trk
    from dipy.viz import window, actor, colormap as cmap

    streams, hdr = load_trk(tract_name)
    streamlines = Streamlines(streams)

    streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)
Esempio n. 18
0
def ROI_atlas_dipy(atlasobj, track_path):
    trk = load_trk(track_path, 'same')
    streamlines = trk.streamlines
    streamlines = reduct(streamlines)
    atlas = atlasobj.get_data(2)
    roi_feature = np.zeros(atlasobj.count)
    for i in range(1, atlasobj.count + 1):
        ROI_mask = atlas == i
        cc_streamlines = utils.target(streamlines, trk.affine, ROI_mask)
        cc_streamlines = Streamlines(cc_streamlines)
        roi_feature[i - 1] = len(cc_streamlines)
        print('roi ', i, ' finished!', atlasobj.count, 'total')
    return roi_feature
Esempio n. 19
0
def save_as_trk(output, fn):
    output = OutputToStreamlines(output)

    ref_trk = load_trk(
        '../../DATASETS/TRACTSEG_105_SUBJECTS/tractograms/672756/tracts/CST_left.trk',
        'same',
        bbox_valid_check=False)

    sls = []
    for item in output:
        sls.append(item)
    ref_trk.streamlines = sls
    save_trk(ref_trk, fn + '.trk', bbox_valid_check=False)
Esempio n. 20
0
def read_stanford_hardi_tractography():
    """
    Reads a minimal tractography from the Stanford dataset.
    """
    files, folder = fetch_stanford_hardi_tractography()
    files_dict = {}
    files_dict['mapping.nii.gz'] = nib.load(
        op.join(afq_home, 'stanford_hardi_tractography', 'mapping.nii.gz'))

    files_dict['tractography_subsampled.trk'], _ = load_trk(
        op.join(afq_home, 'stanford_hardi_tractography',
                'tractography_subsampled.trk'))
    return files_dict
Esempio n. 21
0
def load_ft(tract_path, nii_ref):
    from dipy.io.streamline import load_trk, load_tck, Space

    if tract_path.endswith('.trk'):
        streams = load_trk(tract_path, nii_ref, Space.RASMM)
    elif tract_path.endswith('.tck'):
        streams = load_tck(tract_path, nii_ref, Space.RASMM)
    else:
        print("Couldn't recognize streamline file type")

    streamlines = streams.get_streamlines_copy()

    return streamlines
Esempio n. 22
0
def run_recobundles(input_folder, atlas_file, ground_truth_folder,
                    output_folder):
    print('Running Recobundles in ' + input_folder)

    # make a folder to save output
    try:
        Path(output_folder).mkdir(parents=True, exist_ok=True)
    except OSError:
        print('Could not create output dir. Aborting...')
        return

    # Uncomment for first exemplary use
    # target_file, target_folder = fetch_target_tractogram_hcp()
    # atlas_file, atlas_folder = fetch_bundle_atlas_hcp842()

    # target_file = get_target_tractogram_hcp()

    target_file = input_folder + 'whole_brain.trk'

    # use this line to select tracts if necessary
    sel_tracts = tracts
    # sel_bundle_paths = [data_path + 'tractseg/599469/tracts/AF_left.trk']
    # print(sel_bundle_paths)
    sft_atlas = load_trk(atlas_file, 'same', bbox_valid_check=False)
    atlas = sft_atlas.streamlines

    sft_target = load_trk(target_file, 'same', bbox_valid_check=False)
    target = sft_target.streamlines
    target_header = create_tractogram_header(target_file,
                                             *sft_atlas.space_attributes)

    target, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
        target, atlas, x0='affine', progressive=True)
    print(transform)
    sft_rec = StatefulTractogram(
        nib.streamlines.array_sequence.concatenate([target, atlas], 0),
        target_header, Space.RASMM)
    save_trk(sft_rec, output_folder + 'test.trk', bbox_valid_check=False)
Esempio n. 23
0
def save_roisubset(trkfile, roislist, roisexcel, labelmask):
    #loads trk file, list of rois, the full correspondance of structure => label and the label mask, and saves the
    # tracts traversing each region

    trkdata = load_trk(trkfile, 'same')
    trkdata.to_vox()
    if hasattr(trkdata, 'space_attribute'):
        header = trkdata.space_attribute
    elif hasattr(trkdata, 'space_attributes'):
        header = trkdata.space_attributes
    trkstreamlines = trkdata.streamlines
    import pandas as pd
    df = pd.read_excel(roisexcel, sheet_name='Sheet1')
    df['Structure'] = df['Structure'].str.lower()

    for rois in roislist:

        labelslist = []
        for roi in rois:
            rslt_df = df.loc[df['Structure'] == roi.lower()]
            if rois[0].lower() == "wholebrain" or rois[0].lower() == "brain":
                labelslist = None
            else:
                labelslist = np.concatenate((labelslist, np.array(rslt_df.index2)))
        print(labelslist)
        if isempty(labelslist) and roi.lower() != "wholebrain" and roi.lower() != "brain":
            txt = "Warning: Unrecognized roi, will take whole brain as ROI. The roi specified was: " + roi
            print(txt)

        if isempty(labelslist):
            if labelmask is None:
                roimask = (fdwi_data[:, :, :, 0] > 0)
            else:
                roimask = np.where(labelmask == 0, False, True)
        else:
            if labelmask is None:
                raise ("File not found error: labels requested but labels file could not be found at "+dwipath+ " for subject " + subject)
            roimask = np.zeros(np.shape(labelmask),dtype=int)
            for label in labelslist:
                roimask = roimask + (labelmask == label)

        trkroipath = trkfile.replace(".trk", "_" + rois + ".trk")
        if not os.path.exists(trkroipath):
            affinetemp = np.eye(4)
            trkroistreamlines = target(trkstreamlines, affinetemp, roimask, include=True, strict="longstring")
            trkroistreamlines = Streamlines(trkroistreamlines)
            myheader = create_tractogram_header(trkroipath, *header)
            roi_sl = lambda: (s for s in trkroistreamlines)
            tract_save.save_trk_heavy_duty(trkroipath, streamlines=roi_sl,
                                           affine=header[0], header=myheader)
Esempio n. 24
0
def read_sl_mni(fname):
    """
    Reads streamlines from file in MNI space.
    """
    streams, hdr = load_trk(fname)
    sl = Streamlines(streams)
    sl_mni = []
    for i in range(len(sl)):
        tmp = sl[i]
        tmp2 = np.zeros([len(tmp), 3])
        tmp2[:, 0] = tmp[:, 0] * -1 + 90
        tmp2[:, 1] = tmp[:, 1] + 126
        tmp2[:, 2] = tmp[:, 2] + 72
        sl_mni.append(np.round(tmp2))
    return sl_mni
Esempio n. 25
0
def mean_bundle_weight(folder_name, bundle_file_name):

    if not os.path.exists(bundle_file_name):
        mean_bundle = np.nan
    else:
        bundle = load_trk(bundle_file_name, "same", bbox_valid_check=False)
        bundle = bundle.streamlines
        bvec_file = load_dwi_files(folder_name)[6]
        mean_vols = weighting_streamlines(folder_name,
                                          bundle,
                                          bvec_file,
                                          weight_by='AxPasi7')
        mean_bundle = np.nanmean(mean_vols)

    return mean_bundle
Esempio n. 26
0
def load_trk_tensor(filename):
    streamlines = load_trk(filename)

    #just the coordinates
    streamlines = streamlines[0]
    the_one_true_shape = streamlines[0].shape

    #check lengths
    for s in streamlines:
        if s.shape != the_one_true_shape:
            raise Exception(\
              "Streamlines have not been resampled to same length, aborting."
            )
            exit(1)
    return np.stack(streamlines)
Esempio n. 27
0
def ROI_atlas(atlasobj, track_path=None, streamlines=None):
    atlas = atlasobj.get_data(2)
    streamlines = load_trk(track_path, 'same')
    streamlines.to_vox()
    streamlines = streamlines.streamlines
    streamlines = reduct(streamlines)

    roi_feature = np.zeros(atlasobj.count)

    for i in range(1, atlasobj.count + 1):
        ROI_mask = atlas == i
        cc_streamlines = select_by_ROI(ROI_mask=ROI_mask,
                                       streamlines=streamlines)
        roi_feature[i - 1] = len(cc_streamlines)
        print('roi ', i, ' finished! ans = ', roi_feature[i - 1])
    return roi_feature
Esempio n. 28
0
def trktotck(trk_path, overwrite=False):

    import warnings
    try:
        tractogram = load_trk(trk_path, 'same')
    except:
        tractogram = load_trk_spe(trk_path, 'same')

    if nib.streamlines.detect_format(tractogram) is not nib.streamlines.TrkFile:
        warnings.warn("Skipping non TRK file: '{}'".format(tractogram))

    output_filename = tractogram[:-4] + '.tck'
    if os.path.isfile(output_filename) and not overwrite:
        warnings.warn("Skipping existing file: '{}'. Set overwrite to true".format(output_filename))

    trk = nib.streamlines.load(tractogram)
    nib.streamlines.save(trk.tractogram, output_filename)
Esempio n. 29
0
def make_atlas():
    print('Making atlas...')
    trk_list = []
    sft = None
    for t in tracts:
        print('\r' + t + '      ', end='')
        sft = load_trk(tractseg_dir + training_ids[1] + '/tracts/' + t +
                       '.trk',
                       'same',
                       bbox_valid_check=False)
        trk_list.append(sft.streamlines)
    out_file = training_paths[1] + 'T1w/Diffusion/atlas.trk'
    target_header = create_tractogram_header(out_file, *sft.space_attributes)
    sft_rec = StatefulTractogram(
        nib.streamlines.array_sequence.concatenate(trk_list, 0), target_header,
        Space.RASMM)
    save_trk(sft_rec, out_file, bbox_valid_check=False)
Esempio n. 30
0
def read_stanford_hardi_tractography():
    """
    Reads a minimal tractography from the Stanford dataset.
    """
    files, folder = fetch_stanford_hardi_tractography()
    files_dict = {}
    files_dict['mapping.nii.gz'] = nib.load(
        op.join(afq_home, 'stanford_hardi_tractography', 'mapping.nii.gz'))

    files_dict['tractography_subsampled.trk'] = load_trk(
        op.join(afq_home, 'stanford_hardi_tractography',
                'tractography_subsampled.trk'),
        nib.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)),
        bbox_valid_check=False,
        trk_header_check=False).streamlines

    return files_dict
def main():

    parser = build_argparser()
    args = parser.parse_args()

    # Load the streamlines from COMMIT
    streamlines_name = args.input + '/dictionary_TRK_fibers.trk'
    print(streamlines_name)

    streams, hdr = load_trk(streamlines_name)
    streamlines = Streamlines(streams)

    # Load weights
    weights_name = args.input + '/Results_StickZeppelinBall/xic.txt'

    # Check lines counts of both text files
    def file_len(fname):
        with open(fname) as f:
            for i, l in enumerate(f):
                pass
        return i + 1

    print "Number of lines (streamlines) for file with SZB (COMMIT framework) weights is {}".format(
        file_len(weights_name))

    for j, k in enumerate(streamlines):
        pass
    print j + 1
    print "Number of streamlines from COMMIT framework is".format(j)

    # Iterate simultaneously through both files and extract non-zero weights and corresponding streamlines
    from itertools import izip

    filtered_streamlines = []
    with open(args.input + '/' + args.subject + '_SZB_filtered_weights.txt',
              "w+") as fil_wgh:
        for line_from_weights, line_from_streamlines in izip(
                open(weights_name), streamlines):
            if float(line_from_weights) > 0:
                filtered_streamlines.append(line_from_streamlines)
                fil_wgh.write(line_from_weights)

    save_trk(
        args.input + '/' + args.subject + '_' + args.tracks_type + '_' +
        args.model_type + '_streamlines.trk', filtered_streamlines, np.eye(4))
Esempio n. 32
0
    def run(self, streamline_files, labels_files,
            out_dir='',
            out_bundle='recognized_orig.trk'):
        """ Extract bundles using existing indices (labels)

        Parameters
        ----------
        streamline_files : string
            The path of streamline files where you want to recognize bundles
        labels_files : string
            The path of model bundle files
        out_dir : string, optional
            Output directory (default input file directory)
        out_bundle : string, optional
            Recognized bundle in the space of the model bundle
            (default 'recognized_orig.trk')

        References
        ----------
        .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
         bundles using local and global streamline-based registration and
         clustering, Neuroimage, 2017.

        """
        logging.info('### Labels to Bundles ###')

        io_it = self.get_io_iterator()
        for sf, lb, out_bundle in io_it:

            logging.info(sf)
            streamlines, header = load_trk(sf)
            logging.info(lb)
            location = np.load(lb)
            logging.info('Saving output files ...')
            save_trk(out_bundle, streamlines[location], np.eye(4))
            logging.info(out_bundle)
Esempio n. 33
0
def bundle_analysis(model_bundle_folder, bundle_folder, orig_bundle_folder,
                    metric_folder, group, subject, no_disks=100,
                    out_dir=''):
    """
    Applies statistical analysis on bundles and saves the results
    in a directory specified by ``out_dir``.

    Parameters
    ----------
    model_bundle_folder : string
        Path to the input model bundle files. This path may contain
        wildcards to process multiple inputs at once.
    bundle_folder : string
        Path to the input bundle files in common space. This path may
        contain wildcards to process multiple inputs at once.
    orig_folder : string
        Path to the input bundle files in native space. This path may
        contain wildcards to process multiple inputs at once.
    metric_folder : string
        Path to the input dti metric or/and peak files. It will be used as
        metric for statistical analysis of bundles.
    group : string
        what group subject belongs to e.g. control or patient
    subject : string
        subject id e.g. 10001
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)
    out_dir : string, optional
        Output directory (default input file directory)

    References
    ----------
    .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
    E. Garyfallidis, Bundle Analytics: a computational and statistical
    analyses framework for tractometric studies, Proceedings of:
    International Society of Magnetic Resonance in Medicine (ISMRM),
    Montreal, Canada, 2019.

    """

    dt = dict()

    mb = os.listdir(model_bundle_folder)
    mb.sort()
    bd = os.listdir(bundle_folder)
    bd.sort()
    org_bd = os.listdir(orig_bundle_folder)
    org_bd.sort()
    n = len(org_bd)

    for io in range(n):
        mbundles, _ = load_trk(os.path.join(model_bundle_folder, mb[io]))
        bundles, _ = load_trk(os.path.join(bundle_folder, bd[io]))
        orig_bundles, _ = load_trk(os.path.join(orig_bundle_folder,
                                   org_bd[io]))

        mbundle_streamlines = set_number_of_points(mbundles,
                                                   nb_points=no_disks)

        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=25., metric=metric)
        clusters = qb.cluster(mbundle_streamlines)
        centroids = Streamlines(clusters.centroids)

        print('Number of centroids ', len(centroids.data))
        print('Model bundle ', mb[io])
        print('Number of streamlines in bundle in common space ',
              len(bundles))
        print('Number of streamlines in bundle in original space ',
              len(orig_bundles))

        _, indx = cKDTree(centroids.data, 1,
                          copy_data=True).query(bundles.data, k=1)

        metric_files_names = os.listdir(metric_folder)
        _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz"))

        affine_r = np.linalg.inv(affine)
        transformed_orig_bundles = transform_streamlines(orig_bundles,
                                                         affine_r)

        for mn in range(0, len(metric_files_names)):

            ind = np.array(indx)
            fm = metric_files_names[mn][:2]
            bm = mb[io][:-4]
            dt = dict()
            metric_name = os.path.join(metric_folder,
                                       metric_files_names[mn])

            if metric_files_names[mn][2:] == '.nii.gz':
                metric, _ = load_nifti(metric_name)

                dti_measures(transformed_orig_bundles, metric, dt, fm,
                             bm, subject, group, ind, out_dir)

            else:
                fm = metric_files_names[mn][:3]
                metric = load_peaks(metric_name)
                peak_values(bundles, metric, dt, fm, bm, subject, group,
                            ind, out_dir)
Esempio n. 34
0
"""

import numpy as np
from dipy.data import get_fnames
from dipy.io.streamline import load_trk, save_trk
from dipy.tracking.streamline import Streamlines

"""
1. Read/write streamline files with DIPY.
"""

fname = get_fnames('fornix')
print(fname)

# Read Streamlines
streams, hdr = load_trk(fname)
streamlines = Streamlines(streams)

# Save Streamlines
save_trk("my_streamlines.trk", streamlines=streamlines, affine=np.eye(4))


"""
2. We also work on our HDF5 based file format which can read/write massive
   datasets (as big as the size of your free disk space). With `Dpy` we can
   support

  * direct indexing from the disk
  * memory usage always low
  * extensions to include different arrays in the same file
Esempio n. 35
0
    def run(self, streamline_files, model_bundle_files,
            greater_than=50, less_than=1000000,
            no_slr=False, clust_thr=15.,
            reduction_thr=15.,
            reduction_distance='mdf',
            model_clust_thr=2.5,
            pruning_thr=8.,
            pruning_distance='mdf',
            slr_metric='symmetric',
            slr_transform='similarity',
            slr_matrix='small',
            refine=False, r_reduction_thr=12.,
            r_pruning_thr=6., no_r_slr=False,
            out_dir='',
            out_recognized_transf='recognized.trk',
            out_recognized_labels='labels.npy'):
        """ Recognize bundles

        Parameters
        ----------
        streamline_files : string
            The path of streamline files where you want to recognize bundles
        model_bundle_files : string
            The path of model bundle files
        greater_than : int, optional
            Keep streamlines that have length greater than
            this value (default 50) in mm.
        less_than : int, optional
            Keep streamlines have length less than this value
            (default 1000000) in mm.
        no_slr : bool, optional
            Don't enable local Streamline-based Linear
            Registration (default False).
        clust_thr : float, optional
            MDF distance threshold for all streamlines (default 15)
        reduction_thr : float, optional
            Reduce search space by (mm) (default 15)
        reduction_distance : string, optional
            Reduction distance type can be mdf or mam (default mdf)
        model_clust_thr : float, optional
            MDF distance threshold for the model bundles (default 2.5)
        pruning_thr : float, optional
            Pruning after matching (default 8).
        pruning_distance : string, optional
            Pruning distance type can be mdf or mam (default mdf)
        slr_metric : string, optional
            Options are None, symmetric, asymmetric or diagonal
            (default symmetric).
        slr_transform : string, optional
            Transformation allowed. translation, rigid, similarity or scaling
            (Default 'similarity').
        slr_matrix : string, optional
            Options are 'nano', 'tiny', 'small', 'medium', 'large', 'huge'
            (default 'small')
        refine : bool, optional
            Enable refine recognized bunle (default False)
        r_reduction_thr : float, optional
            Refine reduce search space by (mm) (default 12)
        r_pruning_thr : float, optional
            Refine pruning after matching (default 6).
        no_r_slr : bool, optional
            Don't enable Refine local Streamline-based Linear
            Registration (default False).
        out_dir : string, optional
            Output directory (default input file directory)
        out_recognized_transf : string, optional
            Recognized bundle in the space of the model bundle
            (default 'recognized.trk')
        out_recognized_labels : string, optional
            Indices of recognized bundle in the original tractogram
            (default 'labels.npy')

        References
        ----------
        .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
         bundles using local and global streamline-based registration and
         clustering, Neuroimage, 2017.
        """
        slr = not no_slr
        r_slr = not no_r_slr

        bounds = [(-30, 30), (-30, 30), (-30, 30),
                  (-45, 45), (-45, 45), (-45, 45),
                  (0.8, 1.2), (0.8, 1.2), (0.8, 1.2)]

        slr_matrix = slr_matrix.lower()
        if slr_matrix == 'nano':
            slr_select = (100, 100)
        if slr_matrix == 'tiny':
            slr_select = (250, 250)
        if slr_matrix == 'small':
            slr_select = (400, 400)
        if slr_matrix == 'medium':
            slr_select = (600, 600)
        if slr_matrix == 'large':
            slr_select = (800, 800)
        if slr_matrix == 'huge':
            slr_select = (1200, 1200)

        slr_transform = slr_transform.lower()
        if slr_transform == 'translation':
            bounds = bounds[:3]
        if slr_transform == 'rigid':
            bounds = bounds[:6]
        if slr_transform == 'similarity':
            bounds = bounds[:7]
        if slr_transform == 'scaling':
            bounds = bounds[:9]

        logging.info('### RecoBundles ###')

        io_it = self.get_io_iterator()

        t = time()
        logging.info(streamline_files)
        streamlines, header = load_trk(streamline_files)

        logging.info(' Loading time %0.3f sec' % (time() - t,))

        rb = RecoBundles(streamlines, greater_than=greater_than,
                         less_than=less_than)

        for _, mb, out_rec, out_labels in io_it:
            t = time()
            logging.info(mb)
            model_bundle, _ = load_trk(mb)
            logging.info(' Loading time %0.3f sec' % (time() - t,))
            logging.info("model file = ")
            logging.info(mb)

            recognized_bundle, labels = \
                rb.recognize(
                    model_bundle,
                    model_clust_thr=model_clust_thr,
                    reduction_thr=reduction_thr,
                    reduction_distance=reduction_distance,
                    pruning_thr=pruning_thr,
                    pruning_distance=pruning_distance,
                    slr=slr,
                    slr_metric=slr_metric,
                    slr_x0=slr_transform,
                    slr_bounds=bounds,
                    slr_select=slr_select,
                    slr_method='L-BFGS-B')

            if refine:

                if len(recognized_bundle) > 1:

                    # affine
                    x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0])
                    affine_bounds = [(-30, 30), (-30, 30), (-30, 30),
                                     (-45, 45), (-45, 45), (-45, 45),
                                     (0.8, 1.2), (0.8, 1.2), (0.8, 1.2),
                                     (-10, 10), (-10, 10), (-10, 10)]

                    recognized_bundle, labels = \
                        rb.refine(
                            model_bundle,
                            recognized_bundle,
                            model_clust_thr=model_clust_thr,
                            reduction_thr=r_reduction_thr,
                            reduction_distance=reduction_distance,
                            pruning_thr=r_pruning_thr,
                            pruning_distance=pruning_distance,
                            slr=r_slr,
                            slr_metric=slr_metric,
                            slr_x0=x0,
                            slr_bounds=affine_bounds,
                            slr_select=slr_select,
                            slr_method='L-BFGS-B')

            if len(labels) > 0:
                ba, bmd = rb.evaluate_results(
                             model_bundle, recognized_bundle,
                             slr_select)

                logging.info("Bundle adjacency Metric {0}".format(ba))
                logging.info("Bundle Min Distance Metric {0}".format(bmd))

            save_trk(out_rec, recognized_bundle, np.eye(4))

            logging.info('Saving output files ...')
            np.save(out_labels, np.array(labels))
            logging.info(out_rec)
            logging.info(out_labels)
    from streamline_tools import *
else:
    # We'll need to know where the corpus callosum is from these variables:
    from dipy.data import (read_stanford_labels,
                           fetch_stanford_t1,
                           read_stanford_t1)
    hardi_img, gtab, labels_img = read_stanford_labels()
    labels = labels_img.get_data()
    cc_slice = labels == 2
    fetch_stanford_t1()
    t1 = read_stanford_t1()
    t1_data = t1.get_data()
    data = hardi_img.get_data()
# Read the candidates from file in voxel space:

candidate_sl, hdr = load_trk('lr-superiorfrontal.trk')
# candidate_sl = [s[0] for s in nib.trackvis.read('lr-superiorfrontal.trk',
#                                                  points_space='voxel')[0]]

"""

The streamlines that are entered into the model are termed 'candidate
streamlines' (or a 'candidate connectome'):

"""


"""

Let's visualize the initial candidate group of streamlines in 3D, relative to
the anatomical structure of this brain:
Esempio n. 37
0
    def run(self, static_files, moving_files,
            x0='affine',
            rm_small_clusters=50,
            qbx_thr=[40, 30, 20, 15],
            num_threads=None,
            greater_than=50,
            less_than=250,
            nb_pts=20,
            progressive=True,
            out_dir='',
            out_moved='moved.trk',
            out_affine='affine.txt',
            out_stat_centroids='static_centroids.trk',
            out_moving_centroids='moving_centroids.trk',
            out_moved_centroids='moved_centroids.trk'):
        """ Streamline-based linear registration.

        For efficiency we apply the registration on cluster centroids and
        remove small clusters.

        Parameters
        ----------
        static_files : string
        moving_files : string
        x0 : string, optional
            rigid, similarity or affine transformation model (default affine)
        rm_small_clusters : int, optional
            Remove clusters that have less than `rm_small_clusters`
            (default 50)
        qbx_thr : variable int, optional
            Thresholds for QuickBundlesX (default [40, 30, 20, 15])
        num_threads : int, optional
            Number of threads. If None (default) then all available threads
            will be used. Only metrics using OpenMP will use this variable.
        greater_than : int, optional
            Keep streamlines that have length greater than
            this value (default 50)
        less_than : int, optional
            Keep streamlines have length less than this value (default 250)
        np_pts : int, optional
            Number of points for discretizing each streamline (default 20)
        progressive : boolean, optional
            (default True)
        out_dir : string, optional
            Output directory (default input file directory)
        out_moved : string, optional
            Filename of moved tractogram (default 'moved.trk')
        out_affine : string, optional
            Filename of affine for SLR transformation (default 'affine.txt')
        out_stat_centroids : string, optional
            Filename of static centroids (default 'static_centroids.trk')
        out_moving_centroids : string, optional
            Filename of moving centroids (default 'moving_centroids.trk')
        out_moved_centroids : string, optional
            Filename of moved centroids (default 'moved_centroids.trk')

        Notes
        -----
        The order of operations is the following. First short or long
        streamlines are removed. Second the tractogram or a random selection
        of the tractogram is clustered with QuickBundlesX. Then SLR
        [Garyfallidis15]_ is applied.

        References
        ----------
        .. [Garyfallidis15] Garyfallidis et al. "Robust and efficient linear
        registration of white-matter fascicles in the space of
        streamlines", NeuroImage, 117, 124--140, 2015

        .. [Garyfallidis14] Garyfallidis et al., "Direct native-space fiber
        bundle alignment for group comparisons", ISMRM, 2014.

        .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
        bundles using local and global streamline-based registration
        and clustering, Neuroimage, 2017.
        """
        io_it = self.get_io_iterator()

        logging.info("QuickBundlesX clustering is in use")
        logging.info('QBX thresholds {0}'.format(qbx_thr))

        for static_file, moving_file, out_moved_file, out_affine_file, \
                static_centroids_file, moving_centroids_file, \
                moved_centroids_file in io_it:

            logging.info('Loading static file {0}'.format(static_file))
            logging.info('Loading moving file {0}'.format(moving_file))

            static, static_header = load_trk(static_file)
            moving, moving_header = load_trk(moving_file)

            moved, affine, centroids_static, centroids_moving = \
                slr_with_qbx(
                    static, moving, x0, rm_small_clusters=rm_small_clusters,
                    greater_than=greater_than, less_than=less_than,
                    qbx_thr=qbx_thr)

            logging.info('Saving output file {0}'.format(out_moved_file))
            save_trk(out_moved_file, moved, affine=np.eye(4),
                     header=static_header)

            logging.info('Saving output file {0}'.format(out_affine_file))
            np.savetxt(out_affine_file, affine)

            logging.info('Saving output file {0}'
                         .format(static_centroids_file))
            save_trk(static_centroids_file, centroids_static, affine=np.eye(4),
                     header=static_header)

            logging.info('Saving output file {0}'
                         .format(moving_centroids_file))
            save_trk(moving_centroids_file, centroids_moving,
                     affine=np.eye(4),
                     header=static_header)

            centroids_moved = transform_streamlines(centroids_moving, affine)

            logging.info('Saving output file {0}'
                         .format(moved_centroids_file))
            save_trk(moved_centroids_file, centroids_moved, affine=np.eye(4),
                     header=static_header)
Esempio n. 38
0
"""

if not (op.exists("CST_L.trk") and
        op.exists("AF_L.trk") and
        op.exists("slr_transform.npy")):
    import bundle_extraction

"""

Either way, we can use the `dipy.io` API to read in the bundles from file.
`load_trk` returns both the streamlines, as well as header information.

"""

from dipy.io.streamline import load_trk
cst_l, hdr = load_trk("CST_L.trk")
af_l, hdr = load_trk("AF_L.trk")

transform = np.load("slr_transform.npy")

"""

In the next step, we need to make sure that all the streamlines in each bundle
are oriented the same way. For example, for the CST, we want to make sure that
all the bundles have their cortical termination at one end of the streamline.
This is that when we later extract values from a volume, we won't have different
streamlines going in opposite directions.

To orient all the streamlines in each bundles, we will create standard
streamlines, by finding the centroids of the left AF and CST bundle models.