Esempio n. 1
0
def load_gifti_func(path_to_file):
    """
    #Wrapper function to load functional data from
    #a gifti file using nibabel. Returns data in shape
    #<num_verts x num_timepoints>
    """

    gifti_img = nib_load(path_to_file)
    gifti_list = [x.data for x in gifti_img.darrays]
    gifti_data = np.vstack(gifti_list).transpose()

    return gifti_data
Esempio n. 2
0
def _fetch_cammoun_parcellation(template: str, n_regions: int,
                                data_dir: Path) -> List[np.ndarray]:
    """Fetches Cammoun parcellations."""
    key = f"scale{n_regions:03}"
    bunch = nnt_datasets.fetch_cammoun2012(version=template,
                                           data_dir=str(data_dir))
    if template == "fslr32k":
        gifti = [nib_load(file) for file in bunch[key]]
        parcellations = [x.darrays[0].data for x in gifti]
    else:
        parcellations = [read_annot(file)[0] for file in bunch[key]]
    return parcellations
Esempio n. 3
0
def get_mri_js_html(record):
    mri_file = record.mri_file
    if mri_file.name != "":
        if os.path.isfile(mri_file.path) & (mri_file.path.endswith("nii")
                                            or mri_file.path.endswith("gz")):
            img = nib_load(mri_file.path)
            mri_js_html = ni_view_img(img,
                                      colorbar=False,
                                      bg_img=False,
                                      black_bg=True,
                                      cmap='gray')
            return mri_js_html, None
        return None, "ERROR: Either MRI file doesn't exist or doesn't end with .nii or .gz!"
    return None, None
Esempio n. 4
0
def _fetch_glasser_parcellation(template: str,
                                data_dir: Path) -> List[np.ndarray]:
    """Fetches Glasser parcellation."""
    urls = read_data_fetcher_json(
    )["parcellations"]["glasser"][template]["url"]
    filepaths = []
    for i, hemi in enumerate(("lh", "rh")):
        filename = "_".join(("glasser", "360", template, hemi)) + ".label.gii"
        filepaths.append(data_dir / filename)
        _download_file(urls[i], filepaths[i])
    gifti = [nib_load(file) for file in filepaths]
    parcellations = [x.darrays[0].data for x in gifti]
    parcellations[1] = (parcellations[1] + 180) * (parcellations[1] > 0)
    return parcellations
Esempio n. 5
0
def _fetch_schaefer_parcellation(template: str, n_regions: int,
                                 seven_networks: int,
                                 data_dir: Path) -> List[np.ndarray]:
    """Fetches Schaefer parcellations."""
    n_networks = 7 if seven_networks else 17
    key = f"{n_regions}Parcels{n_networks}Networks"
    bunch = nnt_datasets.fetch_schaefer2018(version=template,
                                            data_dir=str(data_dir))
    if template == "fslr32k":
        cifti = nib_load(bunch[key])
        parcellation_full = np.squeeze(cifti.get_fdata())
        parcellations = [x for x in np.reshape(parcellation_full, (2, -1))]
    else:
        parcellations = [read_annot(file)[0] for file in bunch[key]]
        parcellations[1][parcellations[1] != 0] += n_regions // 2
    return parcellations
def load_gifti_func_to_hdf5(path_to_func_file,
                            loaded_hdf5_file,
                            new_dataset_name,
                            num_verts_in_chunk=1000,
                            compression=None):
    """Function to load gifti data

    Wrapper to nibabel's load function that load's the gifti file and
    return's its data elements

    Parameters
    ----------

    path_to_file : str
        path to gifti file

    Returns
    -------

    gifti_data : np.ndarray
        array with gifti data having shape <n_vertices, n_dimensions>
    """

    gifti_img = nib_load(path_to_func_file)

    gifti_shape = (gifti_img.darrays[0].dims[0], len(gifti_img.darrays))

    #If it is a small gifti file, update default chunk size
    if num_verts_in_chunk < gifti_shape[0]:
        num_verts_in_chunk = gifti_shape[0]

    #Create dataset with/without compression
    if type(compression) == type(None):
        new_dataset = loaded_hdf5_file.create_dataset(
            new_dataset_name,
            gifti_shape,
            chunks=(num_verts_in_chunk, gifti_shape[1]))
    else:
        new_dataset = loaded_hdf5_file.create_dataset(
            new_dataset_name,
            gifti_shape,
            compression=compression,
            chunks=(num_verts_in_chunk, gifti_shape[1]))
    gifti_list = [x.data for x in gifti_img.darrays]
    new_dataset[...] = np.vstack(gifti_list).transpose()

    return
    def _run_interface(self, runtime):

        # Load the input timeseries
        img = nib_load(self.inputs.in_file)
        data = img.get_data()
        aff = img.get_affine()
        hdr = img.get_header()

        # Trim off the equilibrium TRs
        data = self.trim_timeseries(data)

        # Save the output timeseries as NIFTI1_PAIR (for spm .mat registration)
        new_img = Nifti1Pair(data, aff, hdr)
        self.fname = op.splitext(op.basename(self.inputs.in_file))[0] + ".img"
        new_img.to_filename(self.fname)

        return runtime
Esempio n. 8
0
def _fetch_yeo_parcellation(template: str, n_regions: int,
                            data_dir: Path) -> List[np.ndarray]:
    """Fetches Yeo parcellation."""
    filenames = [
        data_dir / f"{template}_{hemi}_yeo{n_regions}.label.gii"
        for hemi in ("lh", "rh")
    ]
    if not all([x.exists() for x in filenames]):
        url = read_data_fetcher_json()["parcellations"]["yeo"]["url"]
        with tempfile.NamedTemporaryFile(suffix=".zip") as f:
            downloaded_file = Path(f.name)
        try:
            _download_file(url, downloaded_file)
            with zipfile.ZipFile(downloaded_file, "r") as zip_ref:
                zip_ref.extractall(data_dir)
        finally:
            downloaded_file.unlink()

    return [nib_load(file).darrays[0].data for file in filenames]
Esempio n. 9
0
def process_data(each_patient, HM_SLICES, height_size, width_size):
    '''
    :param patient: path of each patient_dir
    :param visualize: whether figures are needed
    :return:
    '''

    voxels = nib_load(each_patient)
    voxels = np.array(voxels.get_data()) # (400, 400, 110) ~ (x, y, z)

    print("original shape",voxels.shape)
    voxels = np.transpose(voxels, (2, 0, 1)) # (110, 400, 400) ~ (z, x, y)
    voxels = np.rot90(voxels, k=1, axes=(1, 2))

    voxels = resize_3D(np.array(voxels), (HM_SLICES, height_size, width_size))

    # new_slices = []
    # slices = [cv2.resize(np.array(each_slice), (IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in voxels]

    # for slice_chunk in slice_it(slices, HM_SLICES):
    #     new_slices.append(np.mean(slice_chunk,axis=0))

    return np.array(voxels) # I changed for the objects to use only pythonic list
Esempio n. 10
0
    def _read_nifti_img(self, source_path):
        """
        
        :param source_path: 
        :return: list of Image obj 
        """
        nib_img = nib_load(source_path)
        nib_img = np.array(nib_img.get_data())
        if self._is2D :
            nib_img = np.rot90(nib_img, k=1, axes=(0, 1))
            return Image.fromarray(nib_img)
        else :
            if self._view == "axial" or self._view == "transaxial":
                nib_img = np.rot90(nib_img, k=1, axes=(0, 1))  # (95, 79, 68)
            elif self._view == "saggital":
                nib_img = np.rot90(nib_img, k=1, axes=(0, 2))  #
            elif self._view == "coronal":
                nib_img = np.rot90(nib_img, k=1, axes=(1, 2))  #
                nib_img = np.rot90(nib_img, k=3, axes=(0, 1))

            nib_img = nib_img.astype(np.uint8)
            nib_img = np.transpose(nib_img, [2, 0, 1])
            # print("3D shape", nib_img.shape)
            return [Image.fromarray(img) for img in nib_img]
def load_gifti_func(path_to_file):
    """Function to load gifti data

    Wrapper to nibabel's load function that load's the gifti file and
    return's its data elements

    Parameters
    ----------

    path_to_file : str
        path to gifti file

    Returns
    -------

    gifti_data : np.ndarray
        array with gifti data having shape <n_vertices, n_dimensions>
    """

    gifti_img = nib_load(path_to_file)
    gifti_list = [x.data for x in gifti_img.darrays]
    gifti_data = np.vstack(gifti_list).transpose()

    return gifti_data
Esempio n. 12
0
    #print("D changed", t_instance.shape)
    result = np.transpose(t_instance, [1, 0, 2])
    #print("result", result.shape)
    return result


objective_path = "/home/galaxy2/database/home/galaxy2/dataset/npy_data"
filename = "norm_amyloid_68_79_95.npy"
voxel = np.load(os.path.join(objective_path, filename))
print("#########3", np.array(voxel[0]).shape)
print("#########3", np.array(voxel[1]).shape)
print("#########3", np.array(voxel[2]).shape)

mask_path = "/home/galaxy2/database/home/galaxy2/dataset/mask/threshold_0.5/revised"
mask_name = "brainmask_grey_resize_79_95_68.nii"
mask = nib_load(os.path.join(mask_path, mask_name))
mask = np.array(mask.get_data())
mask_s = mask.shape

resized_mask = np.resize(mask, (mask_s[0], mask_s[1], mask_s[2]))
#resized_mask = resize_3D(mask, (mask_s[0], mask_s[1], mask_s[2]))
resized_mask = np.transpose(resized_mask, [2, 0, 1])

#print(resized_voxel.shape)

# for ax in mask_voxel:
#     plt.imshow(ax)
#     plt.show()

#masked_voxel = p_voxel*resized_voxel
# for ax in masked_voxel:
def calc_run_stats(path_to_confounds,
                   high_std_dvars_thresh=1.5,
                   high_motion_thresh=0.5):
    """Function to extract values from confounds.tsv

    Parameters
    ----------

    path_to_confounds : str
        path to fmriprep confounds tsv file
    high_std_dvars_thresh : float, optional
        the threshold to use for determining
        high std_dvars timepoints (defaults
        to 1.5)
    high_motion_thresh : float, optional
        the threshold to use for determining
        high motion timepoints (defaults to 1.5)


    Returns
    -------
        output_dict : dict
            dictionary with different statistics from the
            confounds file

    """

    confounds_df = pd.read_csv(path_to_confounds, delimiter='\t')
    output_dict = {}

    output_dict['mean_gs'] = np.nanmean(confounds_df['global_signal'].values)
    output_dict['mean_wm'] = np.nanmean(confounds_df['white_matter'].values)
    output_dict['mean_csf'] = np.nanmean(confounds_df['csf'].values)
    output_dict['mean_std_dvars'] = np.nanmean(
        confounds_df['std_dvars'].values)
    output_dict['num_high_std_dvars_tps'] = np.where(
        confounds_df['std_dvars'] > high_std_dvars_thresh)[0].shape[0]
    output_dict['max_std_dvars'] = np.nanmax(confounds_df['std_dvars'].values)
    output_dict['mean_dvars'] = np.nanmean(confounds_df['dvars'].values)
    output_dict['mean_fd'] = np.nanmean(
        confounds_df['framewise_displacement'].values)
    output_dict['num_high_motion_tps'] = np.where(
        confounds_df['framewise_displacement'] > high_motion_thresh
    )[0].shape[0]
    output_dict['max_fd'] = np.nanmax(
        confounds_df['framewise_displacement'].values)

    #Now calculate some metrics that need the image loaded....
    confounds_beginning = path_to_confounds[:-len(
        'desc-confounds_regressors.tsv')]
    reference_img_path = confounds_beginning + 'space-T1w_boldref.nii.gz'
    aparcaseg_img_path = confounds_beginning + 'space-T1w_desc-aparcaseg_dseg.nii.gz'
    brainmask_img_path = confounds_beginning + 'space-T1w_desc-brain_mask.nii.gz'

    reference_img_data = nib_load(reference_img_path).get_fdata()
    aparcaseg_img_path = nib_load(aparcaseg_img_path).get_fdata()
    brainmask_img_path = nib_load(brainmask_img_path).get_fdata()

    #local_dev_ratio, brainmask_var_component_ratio, gm_skin_1dil_var_component_ratio = batch_calc_alignment_metrics(reference_img_data, aparcaseg_img_path, brainmask_img_path)

    #output_dict['local_dev_ratio'] = local_dev_ratio
    #output_dict['brainmask_var_component_ratio'] = brainmask_var_component_ratio
    #output_dict['gm_skin_1dil_var_component_ratio'] = gm_skin_1dil_var_component_ratio

    return output_dict
Esempio n. 14
0
def extract_meta(fmris_meta, config):
    """
    Extract additional metadata from fMRIs listed in a dataframe by analyzing
    the stat-maps
    Warning: Since this function loads all stat-maps in memory and analyze
             their values, it can be quite long for thousands of fMRIs. It is
             one of the main bottlenecks of this fetching and filtering pipeline
             and should be parallelized if possible.

    :param fmris_meta: pandas.DataFrame
        Dataframe with the metadata loaded from the sources (Neurovault, HCP...)
    :param config: dict
        Description of some of the data to extract

    :return: pandas.DataFrame
        Input dataframe with additional metadata.
    """
    for idx, row in fmris_meta.iterrows():
        try:
            fmri = nib_load(row["absolute_path"])
        except BaseException:
            print("Error with", row["absolute_path"])
            raise RuntimeError("Error unzipping a file")

        res_x, res_y, res_z = fmri.header.get_zooms()
        dim_x, dim_y, dim_z = fmri.header.get_data_shape()
        fmris_meta.at[idx, "res_x"] = res_x
        fmris_meta.at[idx, "res_y"] = res_y
        fmris_meta.at[idx, "res_z"] = res_z
        fmris_meta.at[idx, "dim_x"] = dim_x
        fmris_meta.at[idx, "dim_y"] = dim_y
        fmris_meta.at[idx, "dim_z"] = dim_z

        try:
            mat = fmri.get_data().astype(float)
        except BaseException:
            print("Error with", row["absolute_path"])
            raise RuntimeError("Error unzipping a file")

        # mat = np.nan_to_num(mat)
        mat[mat == np.inf] = np.nan
        mat[mat == -np.inf] = np.nan
        fmris_meta.at[idx, "n_values"] = len(np.unique(mat[~np.isnan(mat)]))
        fmris_meta.at[idx, "min_value"] = np.nanmin(mat)
        fmris_meta.at[idx, "max_value"] = np.nanmax(mat)
        try:
            fmris_meta.at[idx, "min_pos_value"] = mat[mat > 0].nanmin()
        except BaseException:
            fmris_meta.at[idx, "min_pos_value"] = 0
        try:
            fmris_meta.at[idx, "max_neg_value"] = mat[mat < 0].nanmax()
        except BaseException:
            fmris_meta.at[idx, "max_neg_value"] = 0

        if mat[(~np.isnan(mat)) & (mat != 0)].any():
            fmris_meta.at[idx, "first_quantile"] = np.percentile(
                mat[(~np.isnan(mat)) & (mat != 0)],
                config["centered_param"]
            )
            fmris_meta.at[idx, "last_quantile"] = np.percentile(
                mat[(~np.isnan(mat)) & (mat != 0)],
                100 - config["centered_param"]
            )
        else:
            fmris_meta.at[idx, "first_quantile"] = 0
            fmris_meta.at[idx, "last_quantile"] = 0

        fmris_meta.at[idx, "hash"] = hash(mat[~np.isnan(mat)].tostring())
        fmri.uncache()

    return fmris_meta
Esempio n. 15
0
def load_cifti_func(path_to_file):

    cifti_img = nib_load(path_to_file)
    return np.asarray(cifti_img.dataobj).transpose()
Esempio n. 16
0
 def _read_nifti(filename):
     from nibabel import load as nib_load
     return np.asarray(
         nib_load(filename).get_fdata(
             caching='unchanged', dtype=np.float32))