Exemplo n.º 1
0
def _save_sprite(img,
                 vmax,
                 vmin,
                 output_sprite=None,
                 mask=None,
                 cmap="Greys",
                 format="png"):
    """ Generate a sprite from a 3D Niimg-like object.
        Returns: sprite
    """

    # Create sprite
    sprite = _data_to_sprite(_safe_get_data(img, ensure_finite=True))

    # Mask the sprite
    if mask is not None:
        mask = _data_to_sprite(_safe_get_data(mask, ensure_finite=True))
        sprite = np.ma.array(sprite, mask=mask)

    # Save the sprite
    if output_sprite is None:
        output_sprite = BytesIO()
        imsave(output_sprite,
               sprite,
               vmin=vmin,
               vmax=vmax,
               cmap=cmap,
               format=format)
        output_sprite = _bytesIO_to_base64(output_sprite)
    else:
        imsave(output_cmap, data, cmap=cmap, format=format)

    return output_sprite
Exemplo n.º 2
0
def _get_volume(img,
                threshold=0,
                atlas=None,
                stride=1,
                t_start=0,
                t_end=-1,
                n_t=50,
                t_r=None,
                marker_size=3,
                cmap=cm.cold_hot,
                symmetric_cmap=True,
                vmax=None,
                vmin=None):
    connectome = {}
    img = check_niimg_4d(img)
    t_unit = "" if not t_r else " s"
    if not t_r:
        t_r = 1
    if t_end < 0:
        t_end = img.shape[3] + t_end
    if not n_t:
        n_t = t_end - t_start
    t_idx = np.round(np.linspace(t_start, t_end, n_t)).astype(int)
    t_labels = [str(t_r * t) + t_unit for t in t_idx]
    data = _safe_get_data(img)[::stride, ::stride, ::stride, t_idx]
    mask = np.abs(data[:, :, :, 0]) > threshold
    i, j, k = mask.nonzero()
    x, y, z = coord_transform(i * stride, j * stride, k * stride, img.affine)
    for coord, cname in [(x, "x"), (y, "y"), (z, "z")]:
        connectome["_con_{}".format(cname)] = encode(
            np.asarray(coord, dtype='<f4'))
    colors = colorscale(cmap,
                        data.ravel(),
                        symmetric_cmap=symmetric_cmap,
                        vmax=vmax,
                        vmin=vmin)
    if atlas:
        atlas = check_niimg_3d(atlas)
        atlas_data = _safe_get_data(atlas)[::stride, ::stride, ::stride]
        connectome['atlas'] = encode(
            np.asarray(atlas_data[i, j, k], dtype='<f4'))
        connectome['atlas_nb'] = int(np.max(atlas_data))
    connectome['colorscale'] = colors['colors']
    connectome['cmin'] = float(colors['vmin'])
    connectome['cmax'] = float(colors['vmax'])
    connectome['n_time'] = n_t
    connectome['t_labels'] = t_labels
    values = [
        encode(np.asarray(data[i, j, k, t], dtype='<f4'))
        for t in range(data.shape[3])
    ]
    connectome['values'] = values

    return connectome
Exemplo n.º 3
0
def _draw_colorbar(
        stat_map_img,
        axes,
        threshold=.1,
        nb_ticks=5,
        edge_color="0.5",
        edge_alpha=1,
        aspect=40,
        fraction=0.025,
        anchor=(10.0, 0.5),
):
    if isinstance(stat_map_img, str):
        stat_map_img = path.abspath(path.expanduser(stat_map_img))
        stat_map_img = nib.load(stat_map_img)
    _, _, vmin, vmax, = _get_colorbar_and_data_ranges(
        _safe_get_data(stat_map_img, ensure_finite=True), None, "auto", "")
    cbar_ax, p_ax = make_axes(
        axes,
        aspect=aspect,
        fraction=fraction,
        # pad=-0.5,
        anchor=anchor,
        # panchor=(-110.0, 0.5),
    )
    ticks = np.linspace(vmin, vmax, nb_ticks)
    bounds = np.linspace(vmin, vmax, MYMAP.N)
    norm = mcolors.Normalize(vmin=vmin, vmax=vmax)
    # some colormap hacking
    cmaplist = [MYMAP(i) for i in range(MYMAP.N)]
    istart = int(norm(-threshold, clip=True) * (MYMAP.N - 1))
    istop = int(norm(threshold, clip=True) * (MYMAP.N - 1))
    for i in range(istart, istop):
        cmaplist[i] = (0.5, 0.5, 0.5, 1.)  # just an average gray color
    our_cmap = MYMAP.from_list('Custom cmap', cmaplist, MYMAP.N)

    cbar = ColorbarBase(
        cbar_ax,
        ticks=ticks,
        norm=norm,
        orientation="vertical",
        cmap=our_cmap,
        boundaries=bounds,
        spacing="proportional",
        format="%.2g",
    )

    cbar.outline.set_edgecolor(edge_color)
    cbar.outline.set_alpha(edge_alpha)

    cbar_ax.yaxis.tick_left()
    tick_color = 'k'
    for tick in cbar_ax.yaxis.get_ticklabels():
        tick.set_color(tick_color)
    cbar_ax.yaxis.set_tick_params(width=0)

    return cbar_ax, p_ax
Exemplo n.º 4
0
def _mask_stat_map(stat_map_img, threshold=None):
    """ Load a stat map and apply a threshold.
        Returns: mask_img, stat_map_img, data, threshold
    """
    # Load stat map
    stat_map_img = check_niimg_3d(stat_map_img, dtype="auto")
    data = _safe_get_data(stat_map_img, ensure_finite=True)

    # threshold the stat_map
    if threshold is not None:
        data, mask, threshold = _threshold_data(data, threshold)
        mask_img = new_img_like(stat_map_img, mask, stat_map_img.affine)
    else:
        mask_img = new_img_like(stat_map_img, np.zeros(data.shape),
                                stat_map_img.affine)
    return mask_img, stat_map_img, data, threshold
Exemplo n.º 5
0
def data_info(img):
    """Tool to report the image data shape, affine, voxel size

    Parameters
    ----------
    img : Nifti like image/object

    Returns
    -------
    shape, affine, vox_size
    """
    img = load_img(img)
    img_data = _safe_get_data(img)

    if len(img.shape) > 3:
        shape = img.shape[:3]
    else:
        shape = img.shape

    affine = img.get_affine()
    vox_size = np.prod(np.diag(abs(affine[:3])))

    return shape, affine, vox_size
def _region_extractor_labels_image(atlas, extract_type='connected_components',
                                   min_region_size=0):
    """ Function takes atlas image denoted as labels for each region
        and then imposes region extraction algorithm on the image to
        split them into regions apart.

    Parameters
    ----------
    atlas : 3D Nifti-like image
        An image contains labelled regions.

    extract_type : 'connected_components', 'local_regions'
        See nilearn.regions.connected_regions for full documentation

    min_region_size : in mm^3
        Minimum size of voxels in a region to be kept.

    """
    atlas_img = check_niimg(atlas)
    atlas_data = _safe_get_data(atlas_img)
    affine = atlas_img.get_affine()

    n_labels = np.unique(np.asarray(atlas_data))

    reg_imgs = []
    for label_id in n_labels:
        if label_id == 0:
            continue
        print("[Region Extraction] Processing with label {0}".format(label_id))
        region = (atlas_data == label_id) * atlas_data
        reg_img = new_img_like(atlas_img, region)
        regions, _ = connected_regions(reg_img, extract_type=extract_type,
                                       min_region_size=min_region_size)
        reg_imgs.append(regions)
    regions_extracted = concat_niimgs(reg_imgs)

    return regions_extracted, n_labels
Exemplo n.º 7
0
def save_sprite(img,
                output_sprite,
                output_cmap=None,
                output_json=None,
                vmax=None,
                vmin=None,
                cmap='Greys',
                threshold=None,
                n_colors=256,
                format='png',
                resample=True,
                interpolation='nearest'):
    """ Generate a sprite from a 3D Niimg-like object.

        Parameters
        ----------
        img :  Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
        output_file : string or file-like
            Path string to a filename, or a Python file-like object.
            If *format* is *None* and *fname* is a string, the output
            format is deduced from the extension of the filename.
        output_cmap : string, file-like or None, optional (default None)
            Path string to a filename, or a Python file-like object.
            The color map will be saved in that file (unless it is None).
            If *format* is *None* and *fname* is a string, the output format is
            deduced from the extension of the filename.
        output_json : string, file-like or None, optional (default None)
            Path string to a filename, or a Python file-like object.
            The parameters of the sprite will be saved in that file
            (unless it is None): Y and Z sizes, vmin, vmax, affine transform.
        vmax : float, or None, optional (default None)
            max value for mapping colors.
        vmin : float, or None, optional (default None)
            min value for mapping color.
        cmap : name of a matplotlib colormap, optional (default 'Greys')
            The colormap for the sprite. A matplotlib colormap can also
            be passed directly in cmap.
        threshold : a number, None, or 'auto', optional (default None)
            If None is given, the image is not thresholded.
            If a number is given, it is used to threshold the image:
            values below the threshold (in absolute value) are plotted
            as transparent. If auto is given, the threshold is determined
            magically by analysis of the image.
        n_colors : integer, optional (default 256)
            The number of discrete colors to use in the colormap, if it is
            generated.
        format : string, optional (default 'png')
            One of the file extensions supported by the active backend.  Most
            backends support png, pdf, ps, eps and svg.
        resample : boolean, optional (default True)
            Resample to isotropic voxels, with a LR/AP/VD orientation.
            This is necessary for proper rendering of arbitrary Niimg volumes,
            but not necessary if the image is in an isotropic standard space.
        interpolation : string, optional (default nearest)
            The interpolation method for resampling
            See nilearn.image.resample_img
        black_bg : boolean, optional
            If True, the background of the image is set to be black.

        Returns
        ----------
        sprite : numpy array with the sprite
    """

    # Get cmap
    if isinstance(cm, str):
        cmap = plt.cm.get_cmap(cmap)

    img = check_niimg_3d(img, dtype='auto')

    # resample to isotropic voxel with standard orientation
    if resample:
        img = _resample_to_self(img, interpolation)

    # Read data
    data = _safe_get_data(img, ensure_finite=True)
    if np.isnan(np.sum(data)):
        data = np.nan_to_num(data)

    # Deal with automatic settings of plot parameters
    if threshold == 'auto':
        # Threshold epsilon below a percentile value, to be sure that some
        # voxels pass the threshold
        threshold = fast_abs_percentile(data) - 1e-5

    # threshold
    threshold = float(threshold) if threshold is not None else None

    # Get vmin vmax
    show_nan_msg = False
    if vmax is not None and np.isnan(vmax):
        vmax = None
        show_nan_msg = True
    if vmin is not None and np.isnan(vmin):
        vmin = None
        show_nan_msg = True
    if show_nan_msg:
        nan_msg = ('NaN is not permitted for the vmax and vmin arguments.\n'
                   'Tip: Use np.nanmax() instead of np.max().')
        warnings.warn(nan_msg)

    if vmax is None:
        vmax = np.nanmax(data)
    if vmin is None:
        vmin = np.nanmin(data)

    # Create sprite
    sprite = _data2sprite(data)

    # Mask sprite
    if threshold is not None:
        if threshold == 0:
            sprite = np.ma.masked_equal(sprite, 0, copy=False)
        else:
            sprite = np.ma.masked_inside(sprite,
                                         -threshold,
                                         threshold,
                                         copy=False)
    # Save the sprite
    imsave(output_sprite,
           sprite,
           vmin=vmin,
           vmax=vmax,
           cmap=cmap,
           format=format)

    # Save the parameters
    if type(vmin).__module__ == 'numpy':
        vmin = vmin.tolist()  # json does not deal with numpy array
    if type(vmax).__module__ == 'numpy':
        vmax = vmax.tolist()  # json does not deal with numpy array

    if output_json is not None:
        params = {
            'nbSlice': {
                'X': data.shape[0],
                'Y': data.shape[1],
                'Z': data.shape[2]
            },
            'min': vmin,
            'max': vmax,
            'affine': img.affine.tolist()
        }
        if isinstance(output_json, str):
            f = open(output_json, 'w')
            f.write(json.dumps(params))
            f.close()
        else:
            output_json.write(json.dumps(params))

    # save the colormap
    if output_cmap is not None:
        data = np.arange(0, n_colors) / (n_colors - 1)
        data = data.reshape([1, n_colors])
        imsave(output_cmap, data, cmap=cmap, format=format)

    return sprite
Exemplo n.º 8
0
def get_stat_map(stat_map_img,
                 bg_img,
                 cut_coords=None,
                 colorbar=True,
                 title=None,
                 threshold=None,
                 annotate=True,
                 draw_cross=True,
                 black_bg='auto',
                 cmap=cm.cold_hot,
                 symmetric_cbar='auto',
                 dim='auto',
                 vmax=None,
                 resampling_interpolation='continuous',
                 n_colors=256,
                 opacity=1,
                 **kwargs):
    """
    Intarctive viewer of a statistical map, with optional background

    Parameters
    ----------
    stat_map_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The statistical map image.
    bg_img : Niimg-like object (default='MNI152')
        See http://nilearn.github.io/manipulating_images/input_output.html
        The background image that the stat map will be plotted on top of.
        If nothing is specified, the MNI152 template will be used.
        To turn off background image, just pass "bg_img=False".
    cut_coords : None, a tuple of floats, or an integer (default None)
        The MNI coordinates of the point where the cut is performed
        as a 3-tuple: (x, y, z). If None is given, the cuts is calculated
        automaticaly.
        This parameter is not currently supported.
    colorbar : boolean, optional (default True)
        If True, display a colorbar next to the plots.
    title : string or None (default=None)
        The title displayed on the figure (or None: no title).
        This parameter is not currently supported.
    threshold : str, number or None  (default=None)
        If None is given, the image is not thresholded.
        If a number is given, it is used to threshold the image:
        values below the threshold (in absolute value) are plotted
        as transparent. If auto is given, the threshold is determined
        magically by analysis of the image.
    annotate : boolean (default=True)
        If annotate is True, positions and left/right annotation
        are added to the plot.
    draw_cross : boolean (default=True)
        If draw_cross is True, a cross is drawn on the plot to
        indicate the cut plosition.
    black_bg : boolean (default='auto')
        If True, the background of the image is set to be black.
        Otherwise, a white background is used.
        If set to auto, an educated guess is made to find if the background
        is white or black.
    cmap : matplotlib colormap, optional
        The colormap for specified image. The colormap *must* be
        symmetrical.
    symmetric_cbar : boolean or 'auto' (default='auto')
        Specifies whether the colorbar should range from -vmax to vmax
        or from vmin to vmax. Setting to 'auto' will select the latter if
        the range of the whole image is either positive or negative.
        Note: The colormap will always be set to range from -vmax to vmax.
    dim : float, 'auto' (default='auto')
        Dimming factor applied to background image. By default, automatic
        heuristics are applied based upon the background image intensity.
        Accepted float values, where a typical scan is between -2 and 2
        (-2 = increase constrast; 2 = decrease contrast), but larger values
        can be used for a more pronounced effect. 0 means no dimming.
    vmax : float, or None (default=)
        max value for mapping colors.
    resampling_interpolation : string, optional (default nearest)
        The interpolation method for resampling
        See nilearn.image.resample_img
    n_colors : integer (default=256)
        The number of discrete colors to use in the colormap, if it is
        generated.
    opacity : float in [0,1] (default 1)
        The level of opacity of the overlay (0: transparent, 1: opaque)

    Returns
    -------
    StatMapView : plot of the stat map.
        It can be saved as an html page or rendered (transparently) by the
        Jupyter notebook.
    """

    # Load stat map
    stat_map_img = check_niimg_3d(stat_map_img, dtype='auto')

    _, _, vmin, vmax = _get_colorbar_and_data_ranges(
        _safe_get_data(stat_map_img, ensure_finite=True), vmax, symmetric_cbar,
        kwargs)

    # load background image, and resample stat map
    if bg_img is not None and bg_img is not False:
        bg_img, black_bg, bg_min, bg_max = _load_anat(bg_img,
                                                      dim=dim,
                                                      black_bg=black_bg)
        bg_img = _resample_to_self(bg_img,
                                   interpolation=resampling_interpolation)
        stat_map_img = image.resample_to_img(
            stat_map_img, bg_img, interpolation=resampling_interpolation)

    else:
        stat_map_img = _resample_to_self(
            stat_map_img, interpolation=resampling_interpolation)
        bg_img = image.new_img_like(stat_map_img, np.zeros(stat_map_img.shape),
                                    stat_map_img.affine)
        bg_min = 0
        bg_max = 0
        if black_bg == 'auto':
            black_bg = False

    # Select coordinates for the cut
    # https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/displays.py#L943
    if isinstance(cut_coords, numbers.Number):
        raise ValueError(
            "The input given for display_mode='ortho' needs to be "
            "a list of 3d world coordinates in (x, y, z). "
            "You provided single cut, cut_coords={0}".format(cut_coords))
    if cut_coords is None:
        cut_coords = find_xyz_cut_coords(stat_map_img,
                                         activation_threshold=threshold)
    print(cut_coords)

    # Create a base64 sprite for the background
    bg_sprite = BytesIO()
    save_sprite(bg_img,
                output_sprite=bg_sprite,
                cmap='gray',
                format='jpg',
                resample=False,
                vmin=bg_min,
                vmax=bg_max)
    bg_sprite.seek(0)
    bg_base64 = encodebytes(bg_sprite.read()).decode('utf-8')
    bg_sprite.close()

    # Create a base64 sprite for the stat map
    # Possibly, also generate a file with the colormap
    stat_map_sprite = BytesIO()
    stat_map_json = StringIO()
    if colorbar:
        stat_map_cm = BytesIO()
    else:
        stat_map_cm = None
    cmap_c = _custom_cmap(cmap, vmin, vmax, threshold)
    save_sprite(stat_map_img, stat_map_sprite, stat_map_cm, stat_map_json,
                vmax, vmin, cmap_c, threshold, n_colors, 'png', False)

    # Convert the sprite and colormap to base64
    stat_map_sprite.seek(0)
    stat_map_base64 = encodebytes(stat_map_sprite.read()).decode('utf-8')
    stat_map_sprite.close()

    if colorbar:
        stat_map_cm.seek(0)
        cm_base64 = encodebytes(stat_map_cm.read()).decode('utf-8')
        stat_map_cm.close()
    else:
        cm_base64 = ''
    # Load the sprite meta-data from the json dump
    stat_map_json.seek(0)
    params = json.load(stat_map_json)
    stat_map_json.close()

    # Convet cut coordinates into cut slices
    cut_slices = np.round(
        nb.affines.apply_affine(np.linalg.inv(stat_map_img.affine),
                                cut_coords))

    # Create a json-like structure
    # with all the brain sprite parameters
    sprite_params = {
        'canvas': '3Dviewer',
        'sprite': 'spriteImg',
        'nbSlice': params['nbSlice'],
        'overlay': {
            'sprite': 'overlayImg',
            'nbSlice': params['nbSlice'],
            'opacity': opacity
        },
        'colorBackground': '#000000',
        'colorFont': '#ffffff',
        'colorCrosshair': '#de101d',
        'crosshair': draw_cross,
        'affine': params['affine'],
        'flagCoordinates': annotate,
        'title': title,
        'flagValue': annotate,
        'numSlice': {
            'X': cut_slices[0],
            'Y': cut_slices[1],
            'Z': cut_slices[2]
        },
    }
    if colorbar:
        sprite_params['colorMap'] = {
            'img': 'colorMap',
            'min': params['min'],
            'max': params['max']
        }

    return sprite_params, bg_base64, stat_map_base64, cm_base64
Exemplo n.º 9
0
def get_clusters_table(stat_img,
                       stat_threshold,
                       cluster_threshold=None,
                       two_sided=False,
                       min_distance=8.):
    """Creates pandas dataframe with img cluster statistics.

    This function should work on any statistical maps where more extreme values
    indicate greater statistical significance.
    For example, z-statistic or -log10(p) maps are valid inputs, but a p-value
    map is not.

    .. important::

        For binary clusters (clusters comprised of only one value),
        the table reports the center of mass of the cluster,
        rather than any peaks/subpeaks.

        This center of mass may, in some cases, appear outside of the cluster.

    Parameters
    ----------
    stat_img : Niimg-like object
       Statistical image to threshold and summarize.

    stat_threshold : :obj:`float`
        Cluster forming threshold. This value must be in the same scale as
        ``stat_img``.

    cluster_threshold : :obj:`int` or None, optional
        Cluster size threshold, in :term:`voxels<voxel>`.
        If None, then no cluster size threshold will be applied. Default=None.

    two_sided : :obj:`bool`, optional
        Whether to employ two-sided thresholding or to evaluate positive values
        only. Default=False.

    min_distance : :obj:`float`, optional
        Minimum distance between subpeaks, in millimeters. Default=8.

        .. note::
            If two different clusters are closer than ``min_distance``, it can
            result in peaks closer than ``min_distance``.

    Returns
    -------
    df : :obj:`pandas.DataFrame`
        Table with peaks and subpeaks from thresholded ``stat_img``.
        The columns in this table include:

        ================== ====================================================
        Cluster ID         The cluster number. Subpeaks have letters after the
                           number.
        X/Y/Z              The coordinate for the peak, in millimeters.
        Peak Stat          The statistical value associated with the peak.
                           The statistic type is dependent on the type of the
                           statistical image.
        Cluster Size (mm3) The size of the cluster, in millimeters cubed.
                           Rows corresponding to subpeaks will not have a value
                           in this column.
        ================== ====================================================
    """
    cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
    # Replace None with 0
    cluster_threshold = 0 if cluster_threshold is None else cluster_threshold

    # check that stat_img is niimg-like object and 3D
    stat_img = check_niimg_3d(stat_img)

    # Apply threshold(s) to image
    stat_img = threshold_img(
        img=stat_img,
        threshold=stat_threshold,
        cluster_threshold=cluster_threshold,
        two_sided=two_sided,
        mask_img=None,
        copy=True,
    )

    # If cluster threshold is used, there is chance that stat_map will be
    # modified, therefore copy is needed
    stat_map = _safe_get_data(stat_img,
                              ensure_finite=True,
                              copy_data=(cluster_threshold is not None))

    # Define array for 6-connectivity, aka NN1 or "faces"
    bin_struct = ndimage.generate_binary_structure(rank=3, connectivity=1)

    voxel_size = np.prod(stat_img.header.get_zooms())

    signs = [1, -1] if two_sided else [1]
    no_clusters_found = True
    rows = []
    for sign in signs:
        # Flip map if necessary
        temp_stat_map = stat_map * sign

        # Binarize using cluster-defining threshold
        binarized = temp_stat_map > stat_threshold
        binarized = binarized.astype(int)

        # If the stat threshold is too high simply return an empty dataframe
        if np.sum(binarized) == 0:
            warnings.warn(
                'Attention: No clusters with stat {0} than {1}'.format(
                    'higher' if sign == 1 else 'lower',
                    stat_threshold * sign,
                ))
            continue

        # Now re-label and create table
        label_map = ndimage.measurements.label(binarized, bin_struct)[0]
        clust_ids = sorted(list(np.unique(label_map)[1:]))
        peak_vals = np.array(
            [np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
        # Sort by descending max value
        clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]

        for c_id, c_val in enumerate(clust_ids):
            cluster_mask = label_map == c_val
            masked_data = temp_stat_map * cluster_mask

            cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)

            # Get peaks, subpeaks and associated statistics
            subpeak_ijk, subpeak_vals = _local_max(
                masked_data,
                stat_img.affine,
                min_distance=min_distance,
            )
            subpeak_vals *= sign  # flip signs if necessary
            subpeak_xyz = np.asarray(
                coord_transform(
                    subpeak_ijk[:, 0],
                    subpeak_ijk[:, 1],
                    subpeak_ijk[:, 2],
                    stat_img.affine,
                )).tolist()
            subpeak_xyz = np.array(subpeak_xyz).T

            # Only report peak and, at most, top 3 subpeaks.
            n_subpeaks = np.min((len(subpeak_vals), 4))
            for subpeak in range(n_subpeaks):
                if subpeak == 0:
                    row = [
                        c_id + 1,
                        subpeak_xyz[subpeak, 0],
                        subpeak_xyz[subpeak, 1],
                        subpeak_xyz[subpeak, 2],
                        subpeak_vals[subpeak],
                        cluster_size_mm,
                    ]
                else:
                    # Subpeak naming convention is cluster num+letter:
                    # 1a, 1b, etc
                    sp_id = '{0}{1}'.format(
                        c_id + 1,
                        ascii_lowercase[subpeak - 1],
                    )
                    row = [
                        sp_id,
                        subpeak_xyz[subpeak, 0],
                        subpeak_xyz[subpeak, 1],
                        subpeak_xyz[subpeak, 2],
                        subpeak_vals[subpeak],
                        '',
                    ]
                rows += [row]

        # If we reach this point, there are clusters in this sign
        no_clusters_found = False

    if no_clusters_found:
        df = pd.DataFrame(columns=cols)
    else:
        df = pd.DataFrame(columns=cols, data=rows)

    return df
Exemplo n.º 10
0
    def transform(self, X, y=None, **kwargs):

        if len(X) < 1:
            raise Exception("Brain Atlas: Did not get any data in parameter X")

        if self.collection_mode == "list" or self.collection_mode == "concat":
            collection_mode = self.collection_mode
        else:
            collection_mode = "concat"
            logger.error(
                "Collection mode {} not supported. Use 'list' or 'concat' instead."
                "Falling back to concat mode.".format(self.collection_mode))

        # 1. validate if all X are in the same space and have the same voxelsize and have the same orientation

        # 2. load sample data to get target affine and target shape to adapt the brain atlas

        self.affine, self.shape = BrainMask.get_format_info_from_first_image(X)

        # load all niftis to memory
        if isinstance(X, list):
            n_subjects = len(X)
            X = image.load_img(X)
        elif isinstance(X, str):
            n_subjects = 1
            X = image.load_img(X)
        elif isinstance(X, np.ndarray):
            n_subjects = X.shape[0]
            X = image.load_img(X)
        else:
            n_subjects = X.shape[-1]

        # get ROI mask
        atlas_obj = AtlasLibrary().get_atlas(self.atlas_name, self.affine,
                                             self.shape, self.mask_threshold)
        roi_objects = self._get_rois(atlas_obj,
                                     which_rois=self.rois,
                                     background_id=self.background_id)

        roi_data = [list() for i in range(n_subjects)]
        roi_data_concat = list()
        t1 = time.time()

        # convert to series and C ordering since this will speed up the masking process
        series = _utils.as_ndarray(_safe_get_data(X),
                                   dtype="float32",
                                   order="C",
                                   copy=True)
        mask_indices = list()

        for i, roi in enumerate(roi_objects):
            logger.debug("Extracting ROI {}".format(roi.label))
            # simply call apply_mask to extract one roi
            extraction = self.apply_mask(series, roi.mask)
            if collection_mode == "list":
                for sub_i in range(extraction.shape[0]):
                    roi_data[sub_i].append(extraction[sub_i])
                mask_indices.append(i)
            else:
                roi_data_concat.append(extraction)
                mask_indices.append(np.ones(extraction[0].size) * i)

        if self.collection_mode == "concat":
            roi_data = np.concatenate(roi_data_concat, axis=1)
            self.mask_indices = np.concatenate(mask_indices)
        else:
            self.mask_indices = mask_indices

        elapsed_time = time.time() - t1
        logger.debug(
            "Time for extracting {} ROIs in {} subjects: {} seconds".format(
                len(roi_objects), n_subjects, elapsed_time))
        return roi_data
Exemplo n.º 11
0
def fetch_masks_dorr_2008(image_format='nifti',
                          downsample='30',
                          data_dir=None,
                          resume=True,
                          verbose=1):
    """Downloads DORR 2008 atlas first, then uses its labels to produce tissue
    masks.

    Parameters
    ----------
    image_format : one of {'nifti', 'minc'}, optional
        Format to download

    downsample : one of {'30', '100'}, optional
        Downsampling resolution in microns.

    data_dir : str, optional
        Path of the data directory. Use to forec data storage in a non-
        standard location. Default: None (meaning: default)

    resume : bool, optional
        whether to resumed download of a partly-downloaded file.

    verbose : int, optional
        verbosity level (0 means no message).

    Returns
    -------
    mask_imgs: sklearn.datasets.base.Bunch
        dictionary-like object, contains:

        - 'brain' : nibabel.nifti1.Nifti1Image brain mask image.

        - 'gm' : nibabel.nifti1.Nifti1Image grey matter mask image.

        - 'cc' : nibabel.nifti1.Nifti1Image eroded corpus callosum mask image.

        - 'ventricles' : nibabel.nifti1.Nifti1Image eroded ventricles mask
                         image.

    Notes
    -----
    This function relies on DORR 2008 atlas where we particularly pick
    ventricles and corpus callosum regions. Then, do a bit post processing
    such as binary closing operation to more compact brain and grey matter
    mask image and binary erosion to non-contaminated corpus callosum
    and ventricles mask images.
    Note: It is advised to check the mask images with your own data processing.

    See Also
    --------
    sammba.data_fetchers.fetch_atlas_dorr_2008: for details regarding
        the DORR 2008 atlas.
    """
    masks_dir = _get_dataset_dir('dorr_2008',
                                 data_dir=data_dir,
                                 verbose=verbose)
    if image_format == 'nifti':
        ext = '.nii.gz'
    elif image_format == 'minc':
        ext = '.minc'
    else:
        raise ValueError("Images format must be 'nifti' or 'minc', you "
                         "entered {0}".format(image_format))

    brain_mask_file = os.path.join(
        masks_dir, 'dorr_2008_brain_mask_{}{}'.format(downsample, ext))
    gm_mask_file = os.path.join(
        masks_dir, 'dorr_2008_gm_mask_{}{}'.format(downsample, ext))
    cc_mask_file = os.path.join(
        masks_dir, 'dorr_2008_cc_mask_{}{}'.format(downsample, ext))
    ventricles_mask_file = os.path.join(
        masks_dir, 'dorr_2008_ventricles_mask_{}{}'.format(downsample, ext))
    existing_mask_files = [
        os.path.isfile(f) for f in
        [brain_mask_file, gm_mask_file, cc_mask_file, ventricles_mask_file]
    ]
    if not np.all(existing_mask_files):
        # Fetching DORR 2008 atlas
        dorr = fetch_atlas_dorr_2008(image_format=image_format,
                                     downsample=downsample,
                                     data_dir=data_dir,
                                     resume=resume,
                                     verbose=verbose)
        atlas_img = check_niimg(dorr.maps)
        atlas_data = niimg._safe_get_data(atlas_img).astype(int)

    if not os.path.isfile(brain_mask_file):
        brain_mask = (atlas_data > 0)
        brain_mask = ndimage.binary_closing(brain_mask, iterations=2)
        brain_mask_img = image.new_img_like(atlas_img, brain_mask)
        brain_mask_img.to_filename(brain_mask_file)

    if not os.path.isfile(cc_mask_file):
        cc_labels = dorr.labels[np.in1d(
            dorr.names.astype(str),
            ['R corpus callosum', 'L corpus callosum'])]
        cc_mask = np.max([atlas_data == value for value in cc_labels], axis=0)
        eroded_cc_mask = ndimage.binary_erosion(cc_mask, iterations=2)
        cc_mask_img = image.new_img_like(atlas_img, eroded_cc_mask)
        cc_mask_img.to_filename(cc_mask_file)

    if not os.path.isfile(ventricles_mask_file):
        ventricles_names = [
            'R lateral ventricle', 'L lateral ventricle', 'third ventricle',
            'fourth ventricle'
        ]
        ventricles_labels = dorr.labels[np.in1d(dorr.names.astype(str),
                                                ventricles_names)]
        ventricles_mask = np.max(
            [atlas_data == value for value in ventricles_labels], axis=0)
        eroded_ventricles_mask = ndimage.binary_erosion(ventricles_mask,
                                                        iterations=2)
        ventricles_mask_img = image.new_img_like(atlas_img,
                                                 eroded_ventricles_mask)
        ventricles_mask_img.to_filename(ventricles_mask_file)

    if not os.path.isfile(gm_mask_file):
        gm_mask_img = image.math_img(
            'np.logical_not(np.logical_or(ventricles_mask_img, cc_mask_img))',
            ventricles_mask_img=ventricles_mask_img,
            cc_mask_img=cc_mask_img)
        gm_mask_img = image.math_img(
            'np.logical_and(brain_mask_img, gm_mask_img)',
            brain_mask_img=brain_mask_img,
            gm_mask_img=gm_mask_img)
        gm_mask_closed_data = ndimage.binary_closing(gm_mask_img.get_data(),
                                                     iterations=2)
        gm_mask_img = image.new_img_like(gm_mask_img, gm_mask_closed_data)
        gm_mask_img.to_filename(gm_mask_file)

    mask_files = {
        'brain': brain_mask_file,
        'gm': gm_mask_file,
        'cc': cc_mask_file,
        'ventricles': ventricles_mask_file
    }

    return Bunch(**mask_files)
Exemplo n.º 12
0
    def _raw_fit(self, data):
        """ Fits the parcellation method on this reduced data.
        Data are coming from a base decomposition estimator which computes
        the mask and reduces the dimensionality of images using
        randomized_svd.
        Parameters
        ----------
        data : ndarray
            Shape (n_samples, n_features)
        Returns
        -------
        labels_ : numpy.ndarray
            Labels to each cluster in the brain.
        connectivity_ : numpy.ndarray
            voxel-to-voxel connectivity matrix computed from a mask.
            Note that, this attribute is returned only for selected methods
            such as 'ward', 'complete', 'average'.
        """
        valid_methods = self.VALID_METHODS
        if self.method is None:
            raise ValueError("Parcellation method is specified as None. "
                             "Please select one of the method in "
                             "{0}".format(valid_methods))
        if self.method is not None and self.method not in valid_methods:
            raise ValueError("The method you have selected is not implemented "
                             "'{0}'. Valid methods are in {1}".format(
                                 self.method, valid_methods))

        # we delay importing Ward or AgglomerativeClustering and same
        # time import plotting module before that.

        # Because sklearn.cluster imports scipy hierarchy and hierarchy imports
        # matplotlib. So, we force import matplotlib first using our
        # plotting to avoid backend display error with matplotlib
        # happening in Travis
        try:
            from nilearn import plotting
        except:
            pass

        components = MultiPCA._raw_fit(self, data)

        mask_img_ = self.masker_.mask_img_
        if self.verbose:
            print("[{0}] computing {1}".format(self.__class__.__name__,
                                               self.method))

        if self.method == 'kmeans':
            from sklearn.cluster import MiniBatchKMeans
            kmeans = MiniBatchKMeans(n_clusters=self.n_parcels,
                                     init='k-means++',
                                     random_state=self.random_state,
                                     verbose=max(0, self.verbose - 1))
            labels = self._cache(_estimator_fit,
                                 func_memory_level=1)(components.T, kmeans)
        else:
            mask_ = _safe_get_data(mask_img_).astype(np.bool)
            shape = mask_.shape
            connectivity = image.grid_to_graph(n_x=shape[0],
                                               n_y=shape[1],
                                               n_z=shape[2],
                                               mask=mask_)

            # from data.new_agglo import NewAgglomerativeClustering as AgglomerativeClustering
            from sklearn.cluster import AgglomerativeClustering

            agglomerative = AgglomerativeClustering(n_clusters=self.n_parcels,
                                                    connectivity=connectivity,
                                                    linkage=self.method,
                                                    memory=self.memory,
                                                    compute_full_tree=True)

            labels = self._cache(_estimator_fit,
                                 func_memory_level=1)(components.T,
                                                      agglomerative)

            self.agglomerative = agglomerative
            self.connectivity_ = connectivity
            # Avoid 0 label
            labels = labels + 1
            self.labels_img_ = self.masker_.inverse_transform(labels)
            return self

        # Avoid 0 label
        labels = labels + 1
        self.labels_img_ = self.masker_.inverse_transform(labels)

        return self
def connected_label_regions(labels_img, min_size=None, connect_diag=True,
                            labels=None):
    """ Extract connected regions from a brain atlas image defined by labels
    (integers)

    For each label in an parcellations, separates out connected
    components and assigns to each separated region a unique label.

    Parameters
    ----------
    labels_img : Nifti-like image
        A 3D image which contains regions denoted as labels. Each region
        is assigned with integers.

    min_size : float, in mm^3 optional (default None)
        Minimum region size in volume required to keep after extraction.
        Removes small or spurious regions.

    connect_diag : bool (default True)
        If 'connect_diag' is True, two voxels are considered in the same region
        if they are connected along the diagonal (26-connectivity). If it is
        False, two voxels are considered connected only if they are within the
        same x, y, or z direction.

    labels : 1D numpy array or list of str, (default None), optional
        Each string in a list or array denote the name of the brain atlas
        regions given in labels_img input. If provided, same names will be
        re-assigned corresponding to each connected component based extraction
        of regions relabelling. The total number of names should match with the
        number of labels assigned in the image.
        NOTE: The order of the names given in labels should be appropriately
        matched with the unique labels (integers) assigned to each region
        given in labels_img.

    Returns
    -------
    new_labels_img : Nifti-like image
        A new image comprising of regions extracted on an input labels_img.
    new_labels : list, optional
        If labels are provided, new labels assigned to region extracted will
        be returned. Otherwise, only new labels image will be returned.

    """
    labels_img = check_niimg_3d(labels_img)
    labels_data = _safe_get_data(labels_img)
    affine = labels_img.get_affine()

    check_unique_labels = np.unique(labels_data)

    if min_size is not None and not isinstance(min_size, numbers.Number):
        raise ValueError("Expected 'min_size' to be specified as integer. "
                         "You provided {0}".format(min_size))
    if not isinstance(connect_diag, bool):
        raise ValueError("'connect_diag' must be specified as True or False. "
                         "You provided {0}".format(connect_diag))
    if np.any(check_unique_labels < 0):
        raise ValueError("The 'labels_img' you provided has unknown/negative "
                         "integers as labels {0} assigned to regions. "
                         "All regions in an image should have positive "
                         "integers assigned as labels."
                         .format(check_unique_labels))

    unique_labels = set(check_unique_labels)

    # check for background label indicated as 0
    if np.any(check_unique_labels == 0):
        unique_labels.remove(0)

    if labels is not None:
        if (not isinstance(labels, collections.Iterable) or
                isinstance(labels, _basestring)):
            labels = [labels, ]
        if len(unique_labels) != len(labels):
            raise ValueError("The number of labels: {0} provided as input "
                             "in labels={1} does not match with the number "
                             "of unique labels in labels_img: {2}. "
                             "Please provide appropriate match with unique "
                             "number of labels in labels_img."
                             .format(len(labels), labels, len(unique_labels)))
        new_names = []

    if labels is None:
        this_labels = [None] * len(unique_labels)
    else:
        this_labels = labels

    new_labels_data = np.zeros(labels_data.shape, dtype=np.int)
    current_max_label = 0
    for label_id, name in zip(unique_labels, this_labels):
        this_label_mask = (labels_data == label_id)
        # Extract regions assigned to each label id
        if connect_diag:
            structure = np.ones((3, 3, 3), dtype=np.int)
            regions, this_n_labels = ndimage.label(
                this_label_mask.astype(np.int), structure=structure)
        else:
            regions, this_n_labels = ndimage.label(this_label_mask.astype(np.int))

        if min_size is not None:
            index = np.arange(this_n_labels + 1)
            this_label_mask = this_label_mask.astype(np.int)
            regions = _remove_small_regions(regions, this_label_mask,
                                            index, affine, min_size=min_size)
            this_n_labels = regions.max()

        cur_regions = regions[regions != 0] + current_max_label
        new_labels_data[regions != 0] = cur_regions
        current_max_label += this_n_labels
        if name is not None:
            new_names.extend([name] * this_n_labels)

    new_labels_img = new_img_like(labels_img, new_labels_data, affine=affine)
    if labels is not None:
        new_labels = new_names
        return new_labels_img, new_labels

    return new_labels_img
Exemplo n.º 14
0
def find_region_names_using_cut_coords(coords, atlas_img, labels=None):
    """Given list of MNI space coordinates, get names of the brain regions.

    Names of the brain regions are returned by getting nearest coordinates
    in the given `atlas_img` space iterated over the provided list of
    `coords`. These new image coordinates are then used to grab the label
    number (int) and name assigned to it. Last, these names are returned.

    Parameters
    ----------
    coords : Tuples of coordinates in a list
        MNI coordinates.

    atlas_img : Nifti-like image
        Path to or Nifti-like object. The labels (integers) ordered in
        this image should be sequential. Example: [0, 1, 2, 3, 4] but not
        [0, 5, 6, 7]. Helps in returning correct names without errors.

    labels : str in a list
        Names of the brain regions assigned to each label in atlas_img.
        NOTE: label with index 0 is assumed as background. Example:
            harvard oxford atlas. Hence be removed.

    Returns
    -------
    new_labels : int in a list
        Labels in integers generated according to correspondence with
        given atlas image and provided coordinates.

    names : str in a list
        Names of the brain regions generated according to given inputs.
    """
    if not isinstance(coords, collections.Iterable):
        raise ValueError("coords given must be a list of triplets of "
                         "coordinates in native space [(1, 2, 3)]. "
                         "You provided {0}".format(type(coords)))

    if isinstance(atlas_img, _basestring):
        atlas_img = check_niimg(atlas_img)

    affine = get_affine(atlas_img)
    atlas_data = _safe_get_data(atlas_img, ensure_finite=True)
    check_labels_from_atlas = np.unique(atlas_data)

    if labels is not None:
        names = []
        if not isinstance(labels, collections.Iterable):
            labels = np.asarray(labels)

    if isinstance(labels, collections.Iterable) and \
            isinstance(check_labels_from_atlas, collections.Iterable):
        if len(check_labels_from_atlas) != len(labels):
            warnings.warn("The number of labels provided does not match "
                          "with number of unique labels with atlas image.",
                          stacklevel=2)

    coords = list(coords)
    nearest_coordinates = []

    for sx, sy, sz in coords:
        nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine)))
        nearest = nearest.astype(int)
        nearest = (nearest[0], nearest[1], nearest[2])
        nearest_coordinates.append(nearest)

    assert(len(nearest_coordinates) == len(coords))

    new_labels = []
    for coord_ in nearest_coordinates:
        # Grab index of current coordinate
        index = atlas_data[coord_]
        new_labels.append(index)
        if labels is not None:
            names.append(labels[index])

    if labels is not None:
        return new_labels, names
    else:
        return new_labels
Exemplo n.º 15
0
def fetch_masks_dorr_2008(image_format='nifti',
                          downsample='30',
                          data_dir=None,
                          resume=True,
                          verbose=1):
    """Downloads DORR 2008 atlas first, then uses its labels to produce tissue
    masks.

    Parameters
    ----------
    image_format : one of {'nifti', 'minc'}, optional
        Format to download

    downsample : one of {'30', '100'}, optional
        Downsampling resolution in microns.

    data_dir : str, optional
        Path of the data directory. Use to forec data storage in a non-
        standard location. Default: None (meaning: default)

    resume : bool, optional
        whether to resumed download of a partly-downloaded file.

    verbose : int, optional
        verbosity level (0 means no message).

    Returns
    -------
    mask_imgs: sklearn.datasets.base.Bunch
        dictionary-like object, contains:

        - 'brain' : nibabel.nifti1.Nifti1Image brain mask image.

        - 'gm' : nibabel.nifti1.Nifti1Image grey matter mask image.

        - 'cc' : nibabel.nifti1.Nifti1Image eroded corpus callosum image.

        - 'ventricles' : nibabel.nifti1.Nifti1Image eroded ventricles mask
        image.

    Notes
    -----
    This function relies on DORR 2008 atlas where we particularly pick
    ventricles and corpus callosum regions. Then, do a bit post processing
    such as binary closing operation to more compact brain and grey matter
    mask image and binary erosion to non-contaminated corpus callosum
    and ventricles mask images.
    Note: It is advised to check the mask images with your own data processing.

    See Also
    --------
    sammba.data_fetchers.fetch_atlas_dorr_2008: for details regarding
        the DORR 2008 atlas.
    """
    # Fetching DORR 2008 atlas
    dorr = fetch_atlas_dorr_2008(image_format=image_format,
                                 downsample=downsample,
                                 data_dir=data_dir,
                                 resume=resume,
                                 verbose=verbose)
    maps, names, labels = dorr['maps'], dorr['names'], dorr['labels']
    atlas_img = check_niimg(maps)
    atlas_data = niimg._safe_get_data(atlas_img).astype(int)

    brain_mask = (atlas_data > 0)
    brain_mask = ndimage.binary_closing(brain_mask, iterations=2)
    brain_mask_img = image.new_img_like(atlas_img, brain_mask)

    corpus_callosum_labels = labels[np.in1d(
        names.astype(str), ['R corpus callosum', 'L corpus callosum'])]
    print(
        np.in1d(names.astype(str), ['R corpus callosum', 'L corpus callosum']))
    print(corpus_callosum_labels)
    print(np.unique(atlas_data))
    corpus_callosum_mask = np.max(
        [atlas_data == value for value in corpus_callosum_labels], axis=0)
    eroded_corpus_callosum_mask = ndimage.binary_erosion(corpus_callosum_mask,
                                                         iterations=2)
    corpus_callosum_mask_img = image.new_img_like(atlas_img,
                                                  eroded_corpus_callosum_mask)

    ventricles_names = [
        'R lateral ventricle', 'L lateral ventricle', 'third ventricle',
        'fourth ventricle'
    ]
    ventricles_labels = labels[np.in1d(names.astype(str), ventricles_names)]
    ventricles_mask = np.max(
        [atlas_data == value for value in ventricles_labels], axis=0)
    eroded_ventricles_mask = ndimage.binary_erosion(ventricles_mask,
                                                    iterations=2)
    ventricles_mask_img = image.new_img_like(atlas_img, eroded_ventricles_mask)

    gm_mask = (atlas_data > 0)
    gm_mask[ventricles_mask] = 0
    gm_mask[corpus_callosum_mask] = 0
    gm_mask = ndimage.binary_closing(gm_mask, iterations=2)
    gm_mask_img = image.new_img_like(atlas_img, gm_mask)

    mask_imgs = {
        'brain': brain_mask_img,
        'gm': gm_mask_img,
        'cc': corpus_callosum_mask_img,
        'ventricles': ventricles_mask_img
    }

    return Bunch(**mask_imgs)
Exemplo n.º 16
0
def plot_carpet(
    func,
    atlaslabels=None,
    detrend=True,
    nskip=0,
    size=(950, 800),
    subplot=None,
    title=None,
    output_file=None,
    legend=False,
    tr=None,
    lut=None,
):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        func : string
            Path to NIfTI or CIFTI BOLD image
        atlaslabels: ndarray, optional
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
            Required if ``func`` is a NIfTI image.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int, optional
            Number of volumes at the beginning of the scan marked as nonsteady state.
            Not used.
        size : tuple, optional
            Size of figure.
        subplot : matplotlib Subplot, optional
            Subplot to plot figure on.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # of frames is plotted instead of time.
        lut : ndarray, optional
            Look up table for segmentations

    """
    epinii = None
    segnii = None
    nslices = None
    img = nb.load(func)

    if isinstance(img, nb.Cifti2Image):
        assert (
            img.nifti_header.get_intent()[0] == "ConnDenseSeries"
        ), "Not a dense timeseries"

        data = img.get_fdata().T
        matrix = img.header.matrix
        struct_map = {
            "LEFT_CORTEX": 1,
            "RIGHT_CORTEX": 2,
            "SUBCORTICAL": 3,
            "CEREBELLUM": 4,
        }
        seg = np.zeros((data.shape[0],), dtype="uint32")
        for bm in matrix.get_index_map(1).brain_models:
            if "CORTEX" in bm.brain_structure:
                lidx = (1, 2)["RIGHT" in bm.brain_structure]
            elif "CEREBELLUM" in bm.brain_structure:
                lidx = 4
            else:
                lidx = 3
            index_final = bm.index_offset + bm.index_count
            seg[bm.index_offset:index_final] = lidx
        assert len(seg[seg < 1]) == 0, "Unassigned labels"

        # Decimate data
        data, seg = _decimate_data(data, seg, size)
        # preserve as much continuity as possible
        order = seg.argsort(kind="stable")

        cmap = ListedColormap([cm.get_cmap("Paired").colors[i] for i in (1, 0, 7, 3)])
        assert len(cmap.colors) == len(
            struct_map
        ), "Mismatch between expected # of structures and colors"

        # ensure no legend for CIFTI
        legend = False

    else:  # Volumetric NIfTI
        img_nii = check_niimg_4d(img, dtype="auto",)
        func_data = _safe_get_data(img_nii, ensure_finite=True)
        ntsteps = func_data.shape[-1]
        data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
        oseg = atlaslabels[atlaslabels > 0].reshape(-1)

        # Map segmentation
        if lut is None:
            lut = np.zeros((256,), dtype="int")
            lut[1:11] = 1
            lut[255] = 2
            lut[30:99] = 3
            lut[100:201] = 4
        # Apply lookup table
        seg = lut[oseg.astype(int)]

        # Decimate data
        data, seg = _decimate_data(data, seg, size)
        # Order following segmentation labels
        order = np.argsort(seg)[::-1]
        # Set colormap
        cmap = ListedColormap(cm.get_cmap("tab10").colors[:4][::-1])

        if legend:
            epiavg = func_data.mean(3)
            epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
            segnii = nb.Nifti1Image(
                lut[atlaslabels.astype(int)], epinii.affine, epinii.header
            )
            segnii.set_data_dtype("uint8")
            nslices = epiavg.shape[-1]

    return _carpet(
        data,
        seg,
        order,
        cmap,
        epinii=epinii,
        segnii=segnii,
        nslices=nslices,
        tr=tr,
        subplot=subplot,
        title=title,
        output_file=output_file,
    )
Exemplo n.º 17
0
def _draw_colorbar(stat_map_img, axes,
	threshold=.1,
	nb_ticks=5,
	edge_color="0.5",
	edge_alpha=1,
	aspect=40,
	fraction=0.025,
	anchor=(10.0,0.5),
	cut_coords=None,
	positive_only=False,
	negative_only=False,
	cmap=None,
	):
	if isinstance(stat_map_img, str):
		stat_map_img = path.abspath(path.expanduser(stat_map_img))
		stat_map_img = nib.load(stat_map_img)
		stat_map_img_dat = _safe_get_data(stat_map_img, ensure_finite=True)

	if cmap:
		cmap = plt.cm.get_cmap(cmap)
		colors = cmap(np.linspace(0,1,256))
		cmap_minus = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors[0:128,:])
		cmap_plus = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors[128:255,:])
	else:
		cmap_minus = MYMAP_MINUS
		cmap_plus = MYMAP_PLUS
		cmap = MYMAP

	cbar_vmin,cbar_vmax,vmin, vmax = _get_colorbar_and_data_ranges(stat_map_img_dat,None,"auto","")
	if cbar_vmin is not None or positive_only:
		vmin = 0
		colmap = cmap_plus
	elif cbar_vmax is not None or negative_only:
		vmax = 0
		colmap = cmap_minus
	else:
		colmap = cmap

	cbar_ax, p_ax = make_axes(axes,
		aspect=aspect,
		fraction=fraction,
		# pad=-0.5,
		anchor=anchor,
		# panchor=(-110.0, 0.5),
		)
	ticks = np.linspace(vmin, vmax, nb_ticks)
	bounds = np.linspace(vmin, vmax, colmap.N)
	norm = mcolors.Normalize(vmin=vmin, vmax=vmax)
	# some colormap hacking
	cmaplist = [colmap(i) for i in range(colmap.N)]
	istart = int(norm(-threshold, clip=True) * (colmap.N - 1))
	istop = int(norm(threshold, clip=True) * (colmap.N - 1))
	for i in range(istart, (istop+1)):
		# just an average gray color
		cmaplist[i] = (0.5, 0.5, 0.5, 1.)
	our_cmap = colmap.from_list('Custom cmap', cmaplist, colmap.N)

	cbar = ColorbarBase(
		cbar_ax,
		ticks=ticks,
		norm=norm,
		orientation="vertical",
		cmap=our_cmap,
		boundaries=bounds,
		spacing="proportional",
		format="%.2g",
		)

	cbar.outline.set_edgecolor(edge_color)
	cbar.outline.set_alpha(edge_alpha)

	cbar_ax.yaxis.tick_left()
	tick_color = 'k'
	for tick in cbar_ax.yaxis.get_ticklabels():
		tick.set_color(tick_color)
	cbar_ax.yaxis.set_tick_params(width=0)
	return cbar_ax, p_ax,vmin,vmax,colmap
Exemplo n.º 18
0
def _draw_colorbar(stat_map_img, axes,
	threshold=.1,
	nb_ticks=5,
	edge_color="0.5",
	edge_alpha=1,
	aspect=40,
	fraction=0.025,
	anchor=(10.0,0.5),
	cut_coords=None,
	positive_only=False,
	negative_only=False,
	cmap=None,
	):
	if isinstance(stat_map_img, str):
		stat_map_img = path.abspath(path.expanduser(stat_map_img))
		stat_map_img = nib.load(stat_map_img)
		stat_map_img_dat = _safe_get_data(stat_map_img, ensure_finite=True)

	if cmap:
		cmap = plt.cm.get_cmap(cmap)
		colors = cmap(np.linspace(0,1,256))
		cmap_minus = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors[0:128,:])
		cmap_plus = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors[128:255,:])
	else:
		cmap_minus = MYMAP_MINUS
		cmap_plus = MYMAP_PLUS
		cmap = MYMAP

	cbar_vmin,cbar_vmax,vmin, vmax = _get_colorbar_and_data_ranges(stat_map_img_dat,None,"auto","")
	if cbar_vmin is not None or positive_only:
		vmin = 0
		colmap = cmap_plus
	elif cbar_vmax is not None or negative_only:
		vmax = 0
		colmap = cmap_minus
	else:
		colmap = cmap

	cbar_ax, p_ax = make_axes(axes,
		aspect=aspect,
		fraction=fraction,
		# pad=-0.5,
		anchor=anchor,
		# panchor=(-110.0, 0.5),
		)
	ticks = np.linspace(vmin, vmax, nb_ticks)
	bounds = np.linspace(vmin, vmax, colmap.N)
	norm = mcolors.Normalize(vmin=vmin, vmax=vmax)
	# some colormap hacking
	cmaplist = [colmap(i) for i in range(colmap.N)]
	istart = int(norm(-threshold, clip=True) * (colmap.N - 1))
	istop = int(norm(threshold, clip=True) * (colmap.N - 1))
	for i in range(istart, (istop+1)):
		# just an average gray color
		cmaplist[i] = (0.5, 0.5, 0.5, 1.)
	our_cmap = colmap.from_list('Custom cmap', cmaplist, colmap.N)

	cbar = ColorbarBase(
		cbar_ax,
		ticks=ticks,
		norm=norm,
		orientation="vertical",
		cmap=our_cmap,
		boundaries=bounds,
		spacing="proportional",
		format="%.2g",
		)

	cbar.outline.set_edgecolor(edge_color)
	cbar.outline.set_alpha(edge_alpha)

	cbar_ax.yaxis.tick_left()
	tick_color = 'k'
	for tick in cbar_ax.yaxis.get_ticklabels():
		tick.set_color(tick_color)
	cbar_ax.yaxis.set_tick_params(width=0)
	return cbar_ax, p_ax,vmin,vmax,colmap
Exemplo n.º 19
0
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(950, 800),
                subplot=None, title=None, output_file=None, legend=False,
                lut=None, tr=None):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        atlaslabels: ndarray
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int
            Number of volumes at the beginning of the scan marked as nonsteady state.
        long_cutoff : int
            Number of TRs to consider img too long (and decimate the time direction
            to save memory)
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # Frames is plotted instead of time.
    """

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.

    img_nii = check_niimg_4d(img, dtype='auto',)
    func_data = _safe_get_data(img_nii, ensure_finite=True)
    ntsteps = func_data.shape[-1]

    data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
    seg = atlaslabels[atlaslabels > 0].reshape(-1)

    # Map segmentation
    if lut is None:
        lut = np.zeros((256, ), dtype='int')
        lut[1:11] = 1
        lut[255] = 2
        lut[30:99] = 3
        lut[100:201] = 4

    # Apply lookup table
    newsegm = lut[seg.astype(int)]

    p_dec = 1 + data.shape[0] // size[0]
    if p_dec:
        data = data[::p_dec, :]
        newsegm = newsegm[::p_dec]

    t_dec = 1 + data.shape[1] // size[1]
    if t_dec:
        data = data[:, ::t_dec]

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # Order following segmentation labels
    order = np.argsort(newsegm)[::-1]

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto',
               cmap=mycolors, vmin=1, vmax=4)
    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray',
               vmin=v[0], vmax=v[1])

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
    labels = tr * (np.array(xticks)) * t_dec
    ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(
            5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0)
        epiavg = func_data.mean(3)
        epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
        segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header)
        segnii.set_data_dtype('uint8')

        nslices = epiavg.shape[-1]
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8)
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(segnii, bg_img=epinii, axes=ax2, display_mode='z',
                     annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
                     interpolation='nearest')

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        figure = None
        return output_file

    return [ax0, ax1], gs
Exemplo n.º 20
0
def plot_carpet(img, mask_img=None, detrend=True, output_file=None,
                figure=None, axes=None, title=None):
    """Plot an image representation of voxel intensities across time also know
        as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
        2017 Jul 1; 154:150-158.
        Parameters
        ----------
        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        mask_img : Niimg-like object, optional
            See http://nilearn.github.io/manipulating_images/input_output.html
            Limit plotted voxels to those inside the provided mask. If not
            specified a new mask will be derived from data.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        figure : matplotlib figure, optional
            Matplotlib figure used. If None is given, a
            new figure is created.
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
    """
    img_nii = _utils.check_niimg_4d(img, dtype='auto')
    img_data = _safe_get_data(img_nii, ensure_finite=True)

    # Define TR and number of frames
    tr = img_nii.header.get_zooms()[-1]
    ntsteps = img_nii.shape[-1]

    if not mask_img:
        nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
        nifti_masker.fit(img_nii)
        mask_data = nifti_masker.mask_img_.get_data().astype(bool)
    else:
        mask_nii = _utils.check_niimg_3d(mask_img, dtype='auto')
        mask_data = _safe_get_data(mask_nii, ensure_finite=True)

    data = img_data[mask_data > 0].reshape(-1, ntsteps)
    # Detrend data
    if detrend:
        data = clean(data.T, t_r=tr).T

    if not figure:
        if not axes:
            figure = plt.figure()
        else:
            figure = axes.figure

    if not axes:
        axes = figure.add_subplot(1, 1, 1)
    else:
        assert axes.figure is figure, ("The axes passed are not "
                                       "in the figure")

    # Avoid segmentation faults for long acquisitions by decimating the input
    # data
    long_cutoff = 800
    if data.shape[1] > long_cutoff:
        data = data[:, ::2]
    else:
        data = data[:, :]

    axes.imshow(data, interpolation='nearest',
                aspect='auto', cmap='gray', vmin=-2, vmax=2)

    axes.grid(False)
    axes.set_yticks([])
    axes.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max(
        (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    axes.set_xticks(xticks)

    axes.set_xlabel('time (s)')
    axes.set_ylabel('voxels')
    if title:
        axes.set_title(title)
    labels = tr * (np.array(xticks))
    if data.shape[1] > long_cutoff:
        labels *= 2
    axes.set_xticklabels(['%.02f' % t for t in labels.tolist()])

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        axes.spines[side].set_color('none')
        axes.spines[side].set_visible(False)

    axes.yaxis.set_ticks_position('left')
    axes.xaxis.set_ticks_position('bottom')
    axes.spines["bottom"].set_position(('outward', 20))
    axes.spines["left"].set_position(('outward', 20))

    if output_file is not None:
        figure.savefig(output_file)
        figure.close()
        figure = None

    return figure
Exemplo n.º 21
0
def mask_and_reduce(masker, imgs,
                    confounds=None,
                    reduction_ratio='auto',
                    reduction_method=None,
                    n_components=None, random_state=None,
                    memory_level=0,
                    memory=Memory(cachedir=None),
                    as_shelved_list=False,
                    n_jobs=1,
                    verbose=0):
    """Mask and reduce provided 4D images with given masker.

    Uses a PCA (randomized for small reduction ratio) or a range finding matrix
    on time series to reduce data size in time direction. For multiple images,
    the concatenation of data is returned, either as an ndarray or a memorymap
    (useful for big datasets that do not fit in memory).

    Parameters
    ----------
    masker: NiftiMasker or MultiNiftiMasker
        Instance used to mask provided data.

    imgs: list of 4D Niimg-like objects
        See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
        List of subject data to mask, reduce and stack.

    confounds: CSV file path or 2D matrix, optional
        This parameter is passed to signal.clean. Please see the
        corresponding documentation for details.

    reduction_method: 'svd' | 'rf' | 'ss' | None

    reduction_ratio: 'auto' or float in [0., 1.], optional
        - Between 0. or 1. : controls compression of data, 1. means no
        compression
        - if set to 'auto', estimator will set the number of components per
          reduced session to be n_components.

    n_components: integer, optional
        Number of components per subject to be extracted by dimension reduction

    random_state: int or RandomState
        Pseudo number generator state used for random sampling.

    memory_level: integer, optional
        Integer indicating the level of memorization. The higher, the more
        function calls are cached.

    memory: joblib.Memory
        Used to cache the function calls.

    Returns
    ------
    data: ndarray or memorymap
        Concatenation of reduced data.
    """
    if not hasattr(imgs, '__iter__'):
        imgs = [imgs]

    if reduction_ratio == 'auto':
        if n_components is None:
            # Reduction ratio is 1 if
            # neither n_components nor ratio is provided
            reduction_ratio = 1
    else:
        if reduction_ratio is None:
            reduction_ratio = 1
        else:
            reduction_ratio = float(reduction_ratio)
        if not 0 <= reduction_ratio <= 1:
            raise ValueError('Reduction ratio should be between 0. and 1.,'
                             'got %.2f' % reduction_ratio)

    if confounds is None:
        confounds = itertools.repeat(confounds)

    if reduction_ratio == 'auto':
        n_samples = n_components
        reduction_ratio = None
    else:
        # We'll let _mask_and_reduce_single decide on the number of
        # samples based on the reduction_ratio
        n_samples = None

    data_list = Parallel(n_jobs=n_jobs, verbose=verbose)(
        delayed(cached_mask_and_reduce_single)(
            masker,
            img, confound,
            reduction_ratio=reduction_ratio,
            reduction_method=reduction_method,
            n_samples=n_samples,
            memory=memory,
            memory_level=memory_level,
            random_state=random_state,
            as_shelved_list=as_shelved_list
        ) for img, confound in zip(imgs, confounds))

    if as_shelved_list:
        return data_list
    else:
        subject_n_samples = [subject_data.shape[0]
                             for subject_data in data_list]

        n_samples = np.sum(subject_n_samples)
        n_voxels = np.sum(_safe_get_data(masker.mask_img_))
        data = np.empty((n_samples, n_voxels), order='F',
                        dtype='float64')

        current_position = 0
        for i, next_position in enumerate(np.cumsum(subject_n_samples)):
            data[current_position:next_position] = data_list[i]
            current_position = next_position
            # Clear memory as fast as possible: remove the reference on
            # the corresponding block of data
            data_list[i] = None
        return data
Exemplo n.º 22
0
def mask_and_reduce(masker,
                    imgs,
                    confounds=None,
                    reduction_ratio='auto',
                    reduction_method=None,
                    n_components=None,
                    random_state=None,
                    memory_level=0,
                    memory=Memory(cachedir=None),
                    as_shelved_list=False,
                    n_jobs=1,
                    verbose=0):
    """Mask and reduce provided 4D images with given masker.

    Uses a PCA (randomized for small reduction ratio) or a range finding matrix
    on time series to reduce data size in time direction. For multiple images,
    the concatenation of data is returned, either as an ndarray or a memorymap
    (useful for big datasets that do not fit in memory).

    Parameters
    ----------
    masker: NiftiMasker or MultiNiftiMasker
        Instance used to mask provided data.

    imgs: list of 4D Niimg-like objects
        See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
        List of subject data to mask, reduce and stack.

    confounds: CSV file path or 2D matrix, optional
        This parameter is passed to signal.clean. Please see the
        corresponding documentation for details.

    reduction_method: 'svd' | 'rf' | 'ss' | None

    reduction_ratio: 'auto' or float in [0., 1.], optional
        - Between 0. or 1. : controls compression of data, 1. means no
        compression
        - if set to 'auto', estimator will set the number of components per
          reduced session to be n_components.

    n_components: integer, optional
        Number of components per subject to be extracted by dimension reduction

    random_state: int or RandomState
        Pseudo number generator state used for random sampling.

    memory_level: integer, optional
        Integer indicating the level of memorization. The higher, the more
        function calls are cached.

    memory: joblib.Memory
        Used to cache the function calls.

    Returns
    ------
    data: ndarray or memorymap
        Concatenation of reduced data.
    """
    if not hasattr(imgs, '__iter__'):
        imgs = [imgs]

    if reduction_ratio == 'auto':
        if n_components is None:
            # Reduction ratio is 1 if
            # neither n_components nor ratio is provided
            reduction_ratio = 1
    else:
        if reduction_ratio is None:
            reduction_ratio = 1
        else:
            reduction_ratio = float(reduction_ratio)
        if not 0 <= reduction_ratio <= 1:
            raise ValueError('Reduction ratio should be between 0. and 1.,'
                             'got %.2f' % reduction_ratio)

    if confounds is None:
        confounds = itertools.repeat(confounds)

    if reduction_ratio == 'auto':
        n_samples = n_components
        reduction_ratio = None
    else:
        # We'll let _mask_and_reduce_single decide on the number of
        # samples based on the reduction_ratio
        n_samples = None

    data_list = Parallel(n_jobs=n_jobs, verbose=verbose)(
        delayed(cached_mask_and_reduce_single)(
            masker,
            img,
            confound,
            reduction_ratio=reduction_ratio,
            reduction_method=reduction_method,
            n_samples=n_samples,
            memory=memory,
            memory_level=memory_level,
            random_state=random_state,
            as_shelved_list=as_shelved_list)
        for img, confound in zip(imgs, confounds))

    if as_shelved_list:
        return data_list
    else:
        subject_n_samples = [
            subject_data.shape[0] for subject_data in data_list
        ]

        n_samples = np.sum(subject_n_samples)
        n_voxels = np.sum(_safe_get_data(masker.mask_img_))
        data = np.empty((n_samples, n_voxels), order='F', dtype='float64')

        current_position = 0
        for i, next_position in enumerate(np.cumsum(subject_n_samples)):
            data[current_position:next_position] = data_list[i]
            current_position = next_position
            # Clear memory as fast as possible: remove the reference on
            # the corresponding block of data
            data_list[i] = None
        return data
Exemplo n.º 23
0
    def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(4000, 3000),
                    subplot=None, title=None, output_file=None, legend=False,
                    lut=None):
        """
        Adapted from: https://github.com/poldracklab/niworkflows

        Plot an image representation of voxel intensities across time also know
        as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
        2017 Jul 1; 154:150-158.
        Parameters
        ----------
            img : Niimg-like object
                See http://nilearn.github.io/manipulating_images/input_output.html
                4D input image
            atlaslabels: ndarray
                A 3D array of integer labels from an atlas, resampled into ``img`` space.
            detrend : boolean, optional
                Detrend and standardize the data prior to plotting.
            nskip : int
                Number of volumes at the beginning of the scan marked as nonsteady state.
            long_cutoff : int
                Number of TRs to consider img too long (and decimate the time direction
                to save memory)
            axes : matplotlib axes, optional
                The axes used to display the plot. If None, the complete
                figure is used.
            title : string, optional
                The title displayed on the figure.
            output_file : string, or None, optional
                The name of an image file to export the plot to. Valid extensions
                are .png, .pdf, .svg. If output_file is not None, the plot
                is saved to a file, and the display is closed.
            legend : bool
                Whether to render the average functional series with ``atlaslabels`` as
                overlay.
        """
        import numpy as np
        import nibabel as nb

        import matplotlib.pyplot as plt
        from matplotlib import gridspec as mgs
        import matplotlib.cm as cm
        from matplotlib.colors import ListedColormap

        from nilearn.plotting import plot_img
        from nilearn.signal import clean
        from nilearn._utils import check_niimg_4d
        from nilearn._utils.niimg import _safe_get_data

        # actually load data
        img = nb.load(img)
        atlaslabels = nb.load(atlaslabels).get_data()

        img_nii = check_niimg_4d(img, dtype='auto')
        func_data = _safe_get_data(img_nii, ensure_finite=True)

        minimum = np.min(func_data)
        maximum = np.max(func_data)
        myrange = maximum - minimum

        # Define TR and number of frames
        tr = img_nii.header.get_zooms()[-1]
        ntsteps = func_data.shape[-1]

        data = func_data.reshape(-1, ntsteps)#[atlaslabels > 0].reshape(-1, ntsteps)
        seg = atlaslabels[atlaslabels > 0].reshape(-1)

        # Map segmentation
        if lut is None:
            lut = np.zeros((256,), dtype='int')
            #lut[1:11] = 1
            #lut[255] = 2
            #lut[30:99] = 3
            #lut[100:201] = 4

            lut[1] = 1
            lut[2] = 2
            lut[3] = 3
            lut[4] = 4
            lut[5] = 5
            lut[6] = 6
            lut[7] = 7

        # Apply lookup table
        newsegm = lut[seg.astype(int)]

        p_dec = 1 + data.shape[0] // size[0]
        #if p_dec:
        #    data = data[::p_dec, :]
        #    newsegm = newsegm[::p_dec]

        #t_dec = 1 + data.shape[1] // size[1]
        #if t_dec:
        #    data = data[:, ::t_dec]

        # Detrend data
        v = (None, None)
        if detrend:
            data = clean(data.T, t_r=tr).T
            v = (-2, 2)

        # Order following segmentation labels
        order = np.argsort(newsegm)[::-1]

        # If subplot is not defined
        if subplot is None:
            subplot = mgs.GridSpec(1, 1)[0]

        # Define nested GridSpec
        wratios = [1, 100, 20]
        gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
                                         width_ratios=wratios[:2 + int(legend)],
                                         wspace=0.0)

        mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

        # Segmentation colorbar
        ax0 = plt.subplot(gs[0])
        ax0.set_yticks([])
        ax0.set_xticks([])
        ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto',
                   cmap=mycolors, vmin=1, vmax=4)
        ax0.grid(False)
        ax0.spines["left"].set_visible(False)
        ax0.spines["bottom"].set_color('none')
        ax0.spines["bottom"].set_visible(False)

        # Carpet plot
        ax1 = plt.subplot(gs[1])
        print("*****************************************************************************************************")
        print(order)
        print(data.shape)
        ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray',
                   vmin=v[0], vmax=v[1])

        ax1.grid(False)
        ax1.set_yticks([])
        ax1.set_yticklabels([])

        ax1.annotate(
            'intensity range: ' + str(myrange), xy=(0.0, 1.02), xytext=(0, 0), xycoords='axes fraction',
            textcoords='offset points', va='center', ha='left',
            color='r', size=6,
            bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none',
                  'color': 'none', 'lw': 0, 'alpha': 0.0})

        # Set 10 frame markers in X axis
        interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
        xticks = list(range(0, data.shape[-1])[::interval])
        ax1.set_xticks(xticks)
        ax1.set_xlabel('time (s)')
        labels = tr * (np.array(xticks)) * t_dec
        ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

        # Remove and redefine spines
        for side in ["top", "right"]:
            # Toggle the spine objects
            ax0.spines[side].set_color('none')
            ax0.spines[side].set_visible(False)
            ax1.spines[side].set_color('none')
            ax1.spines[side].set_visible(False)

        ax1.yaxis.set_ticks_position('left')
        ax1.xaxis.set_ticks_position('bottom')
        ax1.spines["bottom"].set_visible(False)
        ax1.spines["left"].set_color('none')
        ax1.spines["left"].set_visible(False)

        if legend:
            gslegend = mgs.GridSpecFromSubplotSpec(
                5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0)
            epiavg = func_data.mean(3)
            epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
            segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header)
            segnii.set_data_dtype('uint8')

            nslices = epiavg.shape[-1]
            coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8)
            for i, c in enumerate(coords.tolist()):
                ax2 = plt.subplot(gslegend[i])
                plot_img(segnii, axes=ax2, display_mode='z',
                         annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
                         interpolation='nearest')

        if output_file is not None:
            figure = plt.gcf()
            figure.savefig(output_file, bbox_inches='tight')
            plt.close(figure)
            figure = None
            return output_file

        return [ax0, ax1], gs
Exemplo n.º 24
0
def plot_carpet(img,
                atlaslabels,
                detrend=True,
                nskip=0,
                size=(950, 800),
                subplot=None,
                title=None,
                output_file=None,
                legend=False,
                lut=None,
                tr=None):
    """
    Plot an image representation of voxel intensities across time also know
    as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
    2017 Jul 1; 154:150-158.

    Parameters
    ----------

        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        atlaslabels: ndarray
            A 3D array of integer labels from an atlas, resampled into ``img`` space.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        nskip : int
            Number of volumes at the beginning of the scan marked as nonsteady state.
        long_cutoff : int
            Number of TRs to consider img too long (and decimate the time direction
            to save memory)
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        legend : bool
            Whether to render the average functional series with ``atlaslabels`` as
            overlay.
        tr : float , optional
            Specify the TR, if specified it uses this value. If left as None,
            # Frames is plotted instead of time.
    """

    # Define TR and number of frames
    notr = False
    if tr is None:
        notr = True
        tr = 1.

    img_nii = check_niimg_4d(
        img,
        dtype='auto',
    )
    func_data = _safe_get_data(img_nii, ensure_finite=True)
    ntsteps = func_data.shape[-1]

    data = func_data[atlaslabels > 0].reshape(-1, ntsteps)
    seg = atlaslabels[atlaslabels > 0].reshape(-1)

    # Map segmentation
    if lut is None:
        lut = np.zeros((256, ), dtype='int')
        lut[1:11] = 1
        lut[255] = 2
        lut[30:99] = 3
        lut[100:201] = 4

    # Apply lookup table
    newsegm = lut[seg.astype(int)]

    p_dec = 1 + data.shape[0] // size[0]
    if p_dec:
        data = data[::p_dec, :]
        newsegm = newsegm[::p_dec]

    t_dec = 1 + data.shape[1] // size[1]
    if t_dec:
        data = data[:, ::t_dec]

    # Detrend data
    v = (None, None)
    if detrend:
        data = clean(data.T, t_r=tr).T
        v = (-2, 2)

    # Order following segmentation labels
    order = np.argsort(newsegm)[::-1]

    # If subplot is not defined
    if subplot is None:
        subplot = mgs.GridSpec(1, 1)[0]

    # Define nested GridSpec
    wratios = [1, 100, 20]
    gs = mgs.GridSpecFromSubplotSpec(1,
                                     2 + int(legend),
                                     subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1])

    # Segmentation colorbar
    ax0 = plt.subplot(gs[0])
    ax0.set_yticks([])
    ax0.set_xticks([])
    ax0.imshow(newsegm[order, np.newaxis],
               interpolation='none',
               aspect='auto',
               cmap=mycolors,
               vmin=1,
               vmax=4)
    ax0.grid(False)
    ax0.spines["left"].set_visible(False)
    ax0.spines["bottom"].set_color('none')
    ax0.spines["bottom"].set_visible(False)

    # Carpet plot
    ax1 = plt.subplot(gs[1])
    ax1.imshow(data[order, ...],
               interpolation='nearest',
               aspect='auto',
               cmap='gray',
               vmin=v[0],
               vmax=v[1])

    ax1.grid(False)
    ax1.set_yticks([])
    ax1.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max(
        (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    ax1.set_xticks(xticks)
    if notr:
        ax1.set_xlabel('time (frame #)')
    else:
        ax1.set_xlabel('time (s)')
    labels = tr * (np.array(xticks)) * t_dec
    ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        ax0.spines[side].set_color('none')
        ax0.spines[side].set_visible(False)
        ax1.spines[side].set_color('none')
        ax1.spines[side].set_visible(False)

    ax1.yaxis.set_ticks_position('left')
    ax1.xaxis.set_ticks_position('bottom')
    ax1.spines["bottom"].set_visible(False)
    ax1.spines["left"].set_color('none')
    ax1.spines["left"].set_visible(False)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(5,
                                               1,
                                               subplot_spec=gs[2],
                                               wspace=0.0,
                                               hspace=0.0)
        epiavg = func_data.mean(3)
        epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header)
        segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine,
                                epinii.header)
        segnii.set_data_dtype('uint8')

        nslices = epiavg.shape[-1]
        coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices),
                             5).astype(np.uint8)
        for i, c in enumerate(coords.tolist()):
            ax2 = plt.subplot(gslegend[i])
            plot_img(segnii,
                     bg_img=epinii,
                     axes=ax2,
                     display_mode='z',
                     annotate=False,
                     cut_coords=[c],
                     threshold=0.1,
                     cmap=mycolors,
                     interpolation='nearest')

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        figure = None
        return output_file

    return [ax0, ax1], gs
Exemplo n.º 25
0
def get_clusters_table(stat_img,
                       stat_threshold,
                       cluster_threshold=None,
                       two_sided=False,
                       min_distance=8.):
    """Creates pandas dataframe with img cluster statistics.

    Parameters
    ----------
    stat_img : Niimg-like object,
       Statistical image (presumably in z- or p-scale).

    stat_threshold : `float`
        Cluster forming threshold in same scale as `stat_img` (either a
        p-value or z-scale value).

    cluster_threshold : `int` or `None`, optional
        Cluster size threshold, in voxels.

    two_sided : `bool`, optional
        Whether to employ two-sided thresholding or to evaluate positive values
        only. Default=False.

    min_distance : `float`, optional
        Minimum distance between subpeaks in mm. Default=8mm.

    Returns
    -------
    df : `pandas.DataFrame`
        Table with peaks and subpeaks from thresholded `stat_img`. For binary
        clusters (clusters with >1 voxel containing only one value), the table
        reports the center of mass of the cluster,
        rather than any peaks/subpeaks.

    """
    cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
    # Replace None with 0
    cluster_threshold = 0 if cluster_threshold is None else cluster_threshold

    # check that stat_img is niimg-like object and 3D
    stat_img = check_niimg_3d(stat_img)

    # Apply threshold(s) to image
    stat_img = threshold_img(
        img=stat_img,
        threshold=stat_threshold,
        cluster_threshold=cluster_threshold,
        two_sided=two_sided,
        mask_img=None,
        copy=True,
    )

    # If cluster threshold is used, there is chance that stat_map will be
    # modified, therefore copy is needed
    stat_map = _safe_get_data(stat_img,
                              ensure_finite=True,
                              copy_data=(cluster_threshold is not None))

    # Define array for 6-connectivity, aka NN1 or "faces"
    conn_mat = np.zeros((3, 3, 3), int)
    conn_mat[1, 1, :] = 1
    conn_mat[1, :, 1] = 1
    conn_mat[:, 1, 1] = 1
    voxel_size = np.prod(stat_img.header.get_zooms())

    signs = [1, -1] if two_sided else [1]
    no_clusters_found = True
    rows = []
    for sign in signs:
        # Flip map if necessary
        temp_stat_map = stat_map * sign

        # Binarize using CDT
        binarized = temp_stat_map > stat_threshold
        binarized = binarized.astype(int)

        # If the stat threshold is too high simply return an empty dataframe
        if np.sum(binarized) == 0:
            warnings.warn(
                'Attention: No clusters with stat {0} than {1}'.format(
                    'higher' if sign == 1 else 'lower',
                    stat_threshold * sign,
                ))
            continue

        # Now re-label and create table
        label_map = ndimage.measurements.label(binarized, conn_mat)[0]
        clust_ids = sorted(list(np.unique(label_map)[1:]))
        peak_vals = np.array(
            [np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
        # Sort by descending max value
        clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]

        for c_id, c_val in enumerate(clust_ids):
            cluster_mask = label_map == c_val
            masked_data = temp_stat_map * cluster_mask

            cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)

            # Get peaks, subpeaks and associated statistics
            subpeak_ijk, subpeak_vals = _local_max(
                masked_data,
                stat_img.affine,
                min_distance=min_distance,
            )
            subpeak_vals *= sign  # flip signs if necessary
            subpeak_xyz = np.asarray(
                coord_transform(
                    subpeak_ijk[:, 0],
                    subpeak_ijk[:, 1],
                    subpeak_ijk[:, 2],
                    stat_img.affine,
                )).tolist()
            subpeak_xyz = np.array(subpeak_xyz).T

            # Only report peak and, at most, top 3 subpeaks.
            n_subpeaks = np.min((len(subpeak_vals), 4))
            for subpeak in range(n_subpeaks):
                if subpeak == 0:
                    row = [
                        c_id + 1,
                        subpeak_xyz[subpeak, 0],
                        subpeak_xyz[subpeak, 1],
                        subpeak_xyz[subpeak, 2],
                        subpeak_vals[subpeak],
                        cluster_size_mm,
                    ]
                else:
                    # Subpeak naming convention is cluster num+letter:
                    # 1a, 1b, etc
                    sp_id = '{0}{1}'.format(
                        c_id + 1,
                        ascii_lowercase[subpeak - 1],
                    )
                    row = [
                        sp_id,
                        subpeak_xyz[subpeak, 0],
                        subpeak_xyz[subpeak, 1],
                        subpeak_xyz[subpeak, 2],
                        subpeak_vals[subpeak],
                        '',
                    ]
                rows += [row]

        # If we reach this point, there are clusters in this sign
        no_clusters_found = False

    if no_clusters_found:
        df = pd.DataFrame(columns=cols)
    else:
        df = pd.DataFrame(columns=cols, data=rows)

    return df