def save_brainmap(data, lh, rh, fname, **kwargs):
    """
    Plots parcellated `data` to the surface and saves to `fname`

    Parameters
    ----------
    plot : array_like
        Parcellated data to be plotted to the surface. Should be in the order
        {left,right} hemisphere
    {lh,rh} : str or os.pathLike
        Annotation files for the {left,right} hemisphere, matching `data`.
        By default assumes these are 'fsaverage' resolution. Set `subject_id`
        kwarg if different.
    fname : str or os.PathLike
        Filepath where plotted figure should be saved
    """

    opts = dict(alpha=1.0,
                views=['lat'],
                colormap=PARULA,
                colorbar=True,
                surf='inflated',
                subject_id='fsaverage',
                size_per_view=500,
                offscreen=True,
                noplot=[
                    b'unknown', b'corpuscallosum',
                    b'Background+FreeSurfer_Defined_Medial_Wall'
                ])
    opts.update(kwargs)
    fig = nnplot.plot_fsaverage(data, lhannot=lh, rhannot=rh, **opts)
    fname.parent.mkdir(parents=True, exist_ok=True)
    fig.save_image(fname)
    fig.close()
Example #2
0
def save_brainmap(data, fname, lh=None, rh=None, **kwargs):
    """
    Plots `data` to the surface and saves to `fname`

    Parameters
    ----------
    plot : array_like
        Parcellated data to be plotted to the surface. Should be in the order
        {left,right} hemisphere
    fname : str or os.PathLike
        Filepath where plotted figure should be saved
    {lh,rh} : str or os.pathLike, optional
        Annotation files for the {left,right} hemisphere if `data` are
        in parcellated format. By default assumes files are 'fsaverage'
        resolution; set `subject_id` kwarg if different. Default: None
    kwargs : key-value pairs
        Passed to :func:`netneurotools.plotting.plot_fsvertex` (default) or
        :func:`netneurotools.plotting.plot_fsaverage` (if data are parcellated)

    Returns
    -------
    fname : os.PathLike
        Same as provided `fname`
    """

    if (lh is not None and rh is None) or (lh is None and rh is not None):
        raise ValueError('Both lh and rh must be provided')

    opts = dict(
        alpha=1.0,
        views=['lat'],
        colormap='RdBu_r',
        colorbar=True,
        surf='inflated',
        subject_id='fsaverage',
        size_per_view=500,
        offscreen=True,
    )
    opts.update(kwargs)

    if lh is None and rh is None:
        fig = plot_fsvertex(data, **opts)
    else:
        fig = plot_fsaverage(data, lhannot=lh, rhannot=rh, **opts)

    pathify(fname).parent.mkdir(parents=True, exist_ok=True)
    fig.save_image(fname)
    fig.close()

    return fname
def plot_brain_surface(values,
                       network,
                       hemi=None,
                       cmap="viridis",
                       alpha=0.8,
                       colorbar=True,
                       centered=False,
                       vmin=None,
                       vmax=None,
                       representation='surface'):
    '''
    Function to plot data on the brain, on a surface parcellation.

    PARAMETERS
    ----------
    values : ndarray (n,)
        Values to be plotted on the brain, where n is the number of nodes in
        the parcellation.
    network : dictionary
        Dictionary storing the network on associated with the values (to be
        used to identify the adequate surface parcellation)
    '''

    cortical_hemi_mask = network['hemi_mask'][network['subcortex_mask'] == 0]
    n = len(cortical_hemi_mask)

    if hemi is None:
        hemi = network['info']['hemi']

    if hemi == "L":
        scores = np.zeros((n)) + np.mean(values)
        scores[cortical_hemi_mask == 1] = values
        values = scores
    elif hemi == "R":
        scores = np.zeros((n)) + np.mean(values)
        scores[cortical_hemi_mask == 0] = values
        values = scores

    order = network["order"]
    noplot = network["noplot"]
    lh = network["lhannot"]
    rh = network["rhannot"]

    if os.path.isfile(lh) or os.path.isfile(rh) is False:
        fetch_cammoun2012(version='fsaverage')
        fetch_schaefer2018()

    # Adjust colormap based on parameters
    if centered is True:
        m = max(abs(np.amin(values)), np.amax(values))
        vmin = -m
        vmax = m
    else:
        if vmin is None:
            vmin = np.amin(values)
        if vmax is None:
            vmax = np.amax(values)

    # Plot the brain surface
    im = plot_fsaverage(values,
                        lhannot=lh,
                        rhannot=rh,
                        noplot=noplot,
                        order=order,
                        views=['lateral', 'm'],
                        vmin=vmin,
                        vmax=vmax,
                        colormap=cmap,
                        alpha=alpha,
                        colorbar=colorbar,
                        data_kws={'representation': representation},
                        show_toolbar=True)

    return im
Example #4
0
def figure_5(data, method='laplacian', panels='all',
             show=True, save=False, save_path=None):
    '''
    Function to create Figure 5

    Parameters
    ----------
    data : dict
        Dictionary of the data to be used to generate the figures. If the
        required data is not found in this dictionary, the item of figure
        1 that requires this data will not be created and a message
        will be printed.
    method : str
        Method used to compute the transition probabilities. The purpose of
        this parameter is to choose whether the x_scale of our figures should
        be linear or logarithmic.
    panels : str or list
        List of the panels of Figure 1 that we want to create. If we want
        to create all of them, use 'all'. Otherwise, individual panels can
        be specified. For example, we could have panels=['a'] or
        panels=['a', 'b'].
    show : Boolean
        If True, the figures will be displayed. If not, the figures will
        bot be displayed.
    save : Boolean
        If True, the figures will be saved in the folder specified by the
        save_path parameter.
    save_path : str
        Path of the folder in which the figures will be saved.
    '''

    if show is False:
        plt.ioff()

    if panels == 'all':
        panels = ['a', 'b', 'c', 'd', 'e', 'f']

    n = len(data['sc'])
    k = len(data['t_points'])

    # Colors for measures in panels (a) and (b)
    c = Spectral_4.hex_colors

    # Create a mask that ignores nodes on the diagonal and nodes with fc
    # fc score bootstrapped to 0.
    mask = np.zeros((n, n), dtype="bool")
    mask[:] = True
    mask[np.diag_indices(n)] = False
    mask[data['fc'] <= 0] = False

    # Identify single-scale measure to be used
    communicability = communicability_wei(data["sc"])
    single_scale_measures = [data['sc'], -data['sp'], communicability]
    labels = ['SC weights', "(-)shortest path", "communicability"]

    # Compute correlations between FC and single-scale measures
    m = len(single_scale_measures)
    single_scale_corrs = np.zeros((m))
    for i, measure in enumerate(single_scale_measures):
        single_scale_corrs[i] = pearsonr(measure[mask], data['fc'][mask])[0]

    # Compute correlations between FC and neighborhood similarity
    nei_sim_corrs = np.zeros((k))
    for i in range(k):
        nei_sim_corrs[i] = pearsonr(data['nei_sim'][i, :, :][mask],
                                    data['fc'][mask])[0]
    best_n_sim_t = np.argmax(nei_sim_corrs)
    best_n_sim = data['nei_sim'][best_n_sim_t, :, :]

    # Local correlations between neighborhood similarity and fc
    # Compute only if not already computed
    if 'local_fc_rhos' not in data:
        rhos = np.zeros((k, n))
        for i in tqdm.trange(k, desc='local fc-similarity rho\'s'):
            for j in range(n):
                rhos[i, j] = spearmanr(np.delete(data['nei_sim'][i, j, :], j),
                                       np.delete(data["fc"][j, :], j))[0]
        data['local_fc_rhos'] = rhos.copy()
    else:
        rhos = data['local_fc_rhos']

    best_t_id = np.argmax(rhos, axis=0)
    rhos_sorted = rhos[:, np.argsort(best_t_id)]
    rhos_sorted_z = zscore(rhos_sorted, axis=0)

    if 'a' in panels:

        required_entries = ['t_points']
        requirements = check_requirements(data, required_entries)
        if requirements:

            fig = plt.figure(figsize=(4, 4))
            for i in range(m):
                plt.plot([data['t_points'][0], data['t_points'][-1]],
                         [single_scale_corrs[i], single_scale_corrs[i]],
                         c=c[i],
                         label=labels[i])
            plt.plot(data['t_points'],
                     nei_sim_corrs,
                     c=c[i+1],
                     linestyle="dashed",
                     label="neighborhood similarity")

            if method == 'laplacian':
                plt.xscale('log')

            plt.xlabel("t")
            plt.ylabel("Pearson's r")
            plt.legend()

            if save:
                figure_name = 'global_fc_correlations.png'
                fig.savefig(os.path.join(save_path, figure_name))

    if 'b' in panels:

        required_entries = ['fc']
        requirements = check_requirements(data, required_entries)
        if requirements:

            for i in range(m):
                plt.figure(figsize=(2, 2))
                plt.scatter(single_scale_measures[i][mask],
                            data["fc"][mask],
                            c=c[i],
                            s=3,
                            alpha=0.1,
                            rasterized=True)
                p = np.polyfit(single_scale_measures[i][mask],
                               data["fc"][mask],
                               1)
                x_fit = np.array([np.amin(single_scale_measures[i][mask]),
                                  np.amax(single_scale_measures[i][mask])])
                y_fit = x_fit * p[0] + p[1]
                plt.plot(x_fit, y_fit, c="black", linestyle="dashed")
                plt.title("r: "+str(round(single_scale_corrs[i], 4)))

                if save:
                    figure_name = "scatter_fc_" + labels[i] + ".png"
                    plt.savefig(os.path.join(save_path, figure_name))

            plt.figure(figsize=(2, 2))
            plt.scatter(best_n_sim[mask],
                        data["fc"][mask], c=c[i+1],
                        alpha=0.1,
                        s=3, rasterized=True)
            p = np.polyfit(best_n_sim[mask],
                           data["fc"][mask], 1)
            x_fit = np.array([np.amin(best_n_sim[mask]),
                              np.amax(best_n_sim[mask])])
            y_fit = x_fit * p[0] + p[1]
            plt.plot(x_fit, y_fit, c="black", linestyle="dashed")
            plt.title("r: "+str(round(np.amax(nei_sim_corrs), 4)))

            if save:
                figure_name = "scatter_fc_nei_similarity.png"
                plt.savefig(os.path.join(save_path, figure_name))

    if 'c' in panels:

        required_entries = ['sc', 'sp']
        requirements = check_requirements(data, required_entries)
        if requirements:

            hm_labels = ["S", "C", "A", r'$\Phi$']
            masked_measures = [best_n_sim[mask],
                               communicability[mask],
                               data['sc'][mask],
                               -data['sp'][mask]]

            correlations = np.corrcoef(np.array(masked_measures))

            fig, ax = plt.subplots()
            im, cbar = heatmap(correlations, hm_labels, hm_labels, ax=ax,
                               cmap=Spectral_4_r.mpl_colormap,
                               cbarlabel="Pearson's r", vmin=0, vmax=1,
                               aspect="equal", grid_width=3)
            annotate_heatmap(im,
                             valfmt="{x:.2f}",
                             textcolors=["black", "white"])

            if save:
                figure_name = "heatmap_fc_correlations.png"
                fig.savefig(os.path.join(save_path, figure_name))

    if 'd' in panels:

        # Heatmap of best Spearman's correlation across alpha values
        plt.figure()
        m = max(abs(np.percentile(rhos_sorted_z, 2.5)),
                np.percentile(rhos_sorted_z, 97.5)
                )
        plt.imshow(rhos_sorted_z.T,
                   aspect='auto',
                   cmap=RdBu_11_r.mpl_colormap,
                   vmin=-m,
                   vmax=m)
        cbar = plt.colorbar()
        cbar.set_label("spearman's rho (row-standardized)")

        if save is True:
            figure_name = 'heatmap_local_fc_correlations.png'
            plt.savefig(os.path.join(save_path, figure_name))

    if 'e' in panels:

        required_entries = ['t_points', 'lhannot', 'rhannot', 'noplot',
                            'order']
        requirements = check_requirements(data, required_entries)
        if requirements:

            log_best_t = np.log10(data['t_points'][best_t_id])
            im = plot_fsaverage(log_best_t,
                                lhannot=data['lhannot'],
                                rhannot=data['rhannot'],
                                noplot=data['noplot'],
                                order=data['order'],
                                views=['lateral', 'm'],
                                vmin=np.amin(log_best_t),
                                vmax=np.amax(log_best_t),
                                colormap=Spectral_4_r.mpl_colormap)

            if save:
                figure_name = "log_best_t_brain_surface.png"
                im.save_image(os.path.join(save_path, figure_name),
                              mode='rgba')

    if show is False:
        plt.close('all')
        plt.ion()
Example #5
0
def figure_4(data, specify, t_ids_b, method='laplacian', panels='all',
             show=True, save=False, save_path=None):
    '''
    Function to create Figure 4

    Parameters
    ----------
    data : dict
        Dictionary of the data to be used to generate the figures. If the
        required data is not found in this dictionary, the item of figure
        1 that requires this data will not be created and a message
        will be printed.
    specify : dict
        Dictionary containing information about the trajectories that will be
        shown in panel a (see main_script.py for an example).
    t_ids_b: List
        List of time points indices indicating the time points at which
        the centrality slope distributions will be plotted on the surface
        of the brain (panel b).
    method : str
        Method used to compute the transition probabilities. The purpose of
        this parameter is to choose whether the x_scale of our figures should
        be linear or logarithmic.
    panels : str or list
        List of the panels of Figure 1 that we want to create. If we want
        to create all of them, use 'all'. Otherwise, individual panels can
        be specified. For example, we could have panels=['a'] or
        panels=['a', 'b'].
    show : Boolean
        If True, the figures will be displayed. If not, the figures will
        bot be displayed.
    save : Boolean
        If True, the figures will be saved in the folder specified by the
        save_path parameter.
    save_path : str
        Path of the folder in which the figures will be saved.
    '''

    if show is False:
        plt.ioff()

    if panels == 'all':
        panels = ['a', 'b', 'c']

    n = len(data['sc'])
    k = len(data['t_points'])

    # Slopes of the closeness centrality trajectories
    slopes = np.gradient(data['cmulti'], axis=0)

    if 'a' in panels:

        required_entries = ['t_points', 'cmulti']
        requirements = check_requirements(data, required_entries)
        if requirements:

            node_ids = specify['ID']

            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        data['cmulti'][:, i],
                        c='lightgray')

            abs_max_color = max(-1 * np.amin(slopes), np.amax(slopes))

            for i, id in enumerate(node_ids):
                norm = plt.Normalize(-abs_max_color, abs_max_color)
                slope_colors = RdBu_11_r.mpl_colormap(norm(slopes[:, id]))
                for ii in range(k-1):
                    plt.plot([data['t_points'][ii],
                              data['t_points'][ii+1]],
                             [data['cmulti'][ii, id],
                              data['cmulti'][ii+1, id]],
                             c=slope_colors[ii, :],
                             linewidth=3)

            if method == 'laplacian':
                plt.xscale('log')

            plt.xlabel('t')
            plt.ylabel("c_multi")

            if save:
                figure_name = 'cmulti_with_gradient.png'
                fig.savefig(os.path.join(save_path, figure_name))

    if 'b' in panels:

        required_entries = ['t_points', 'lhannot', 'rhannot', 'noplot',
                            'order']
        requirements = check_requirements(data, required_entries)
        if requirements:

            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        slopes[:, i],
                        c='lightgray',
                        zorder=0)
            for t_id in t_ids_b:
                plt.scatter(np.zeros((n))+data['t_points'][t_id],
                            slopes[t_id, :],
                            marker='s', c=slopes[t_id, :],
                            cmap=RdBu_11_r.mpl_colormap, rasterized=True,
                            zorder=1)

            if method == 'laplacian':
                plt.xscale('log')

            plt.xlabel('t')
            plt.ylabel("slope")

            if save:
                figure_name = 'slopes.png'
                fig.savefig(os.path.join(save_path, figure_name))

            for t_id in t_ids_b:
                im = plot_fsaverage(slopes[t_id, :],
                                    lhannot=data['lhannot'],
                                    rhannot=data['rhannot'],
                                    noplot=data['noplot'],
                                    order=data['order'],
                                    views=['lateral', 'm'],
                                    vmin=np.amin(slopes[t_id, :]),
                                    vmax=np.amax(slopes[t_id, :]),
                                    colormap=RdBu_11_r.mpl_colormap)

                if save:
                    figure_name = ('slopes_brain_surface_' +
                                   str(int(round(data['t_points'][t_id]))) +
                                   '.png')
                    im.save_image(os.path.join(save_path, figure_name),
                                  mode='rgba')

    if 'c' in panels:

        required_entries = ['sc', 't_points', 'ci']
        requirements = check_requirements(data, required_entries)
        if requirements:

            measures = []
            labels = []

            measures.append(np.sum(data['sc'], axis=0))
            labels.append("strength")

            measures.append(-bct.clustering_coef_wu(data['sc']))
            labels.append("clustering(-)")

            for ci in data['ci']:
                measures.append(bct.participation_coef(data['sc'], ci))
                labels.append(("participation (" +
                               str(int(round(ci.max()))) +
                               ")"))

            k = len(data['t_points'])
            m = len(measures)

            corrs = np.zeros((m, k))
            for i in range(m):
                for j in range(k):
                    corrs[i, j] = pearsonr(slopes[j, :], measures[i])[0]

            corr_min = np.amin(corrs)
            corr_max = np.amax(corrs)

            for i in range(m):
                plt.figure()
                plt.imshow(corrs[i, :][np.newaxis, :],
                           cmap=Spectral_4_r.mpl_colormap,
                           vmin=corr_min,
                           vmax=corr_max,
                           aspect=0.1 * k)
                plt.axis('off')
                plt.title(labels[i])

                if save is True:
                    figure_name = "correlations_" + labels[i] + ".png"
                    plt.savefig(os.path.join(save_path, figure_name))

    if show is False:
        plt.close('all')
        plt.ion()
Example #6
0
def figure_1(data, specify, t_ids_a, t_ids_c, method='laplacian',
             panels='all', show=True, save=False, save_path=None):
    '''
    Function to create Figure 1

    Parameters
    ----------
    data : dict
        Dictionary of the data to be used to generate the figures. If the
        required data is not found in this dictionary, the item of figure
        1 that requires this data will not be created and a message
        will be printed.
    specify : dict
        Dictionary containing information about the nodes that will be
        shown in panel a (see main_script.py for an example).
    t_ids_a: List
        List of time points indices indicating the time points at which
        the transition probabilities will be shown for our nodes of interest
        (panel a).
    t_ids_c: List
        List of time points indices indicating the time points at which
        the multiscale centrality distribution will be plotted on the surface
        of the brain (panel c).
    method : str
        Method used to compute the transition probabilities. Here, we specify
        either laplacian or pagerank. If transition probabilities were
        generated using a laplacian matrix, the contrained walks in panel a
        will be generated as normal random walks regardless of the type of
        Laplacian matrix that was actually used.
    panels : str or list
        List of the panels of Figure 1 that we want to create. If we want
        to create all of them, use 'all'. Otherwise, individual panels can
        be specified. For example, we could have panels=['a'] or
        panels=['a', 'b'].
    show : Boolean
        If True, the figures will be displayed. If not, the figures will
        bot be displayed.
    save : Boolean
        If True, the figures will be saved in the folder specified by the
        save_path parameter.
    save_path : str
        Path of the folder in which the figures will be saved.
    '''

    if show is False:
        plt.ioff()

    if panels == 'all':
        panels = ['a', 'b', 'c']

    if 'a' in panels:

        required_entries = ['sc', 'coords', 't_points']
        requirements = check_requirements(data, required_entries)

        if requirements is True:

            walk_type = method
            if method == 'laplacian':
                walk_type = 'normal'

            for i, seed_node in enumerate(specify['ID']):
                for t_id_a in t_ids_a:

                    walk_type = method
                    if method == 'laplacian':
                        walk_type = 'normal'
                    fig = plot_constrained_walk(data,
                                                seed_node,
                                                data['t_points'][t_id_a],
                                                walk_type=walk_type,
                                                color=specify['colors'][i])
                    plt.title(specify['labels'][i])
                    if save:
                        fig_name = ("constrained_walk_" +
                                    str(seed_node) + "_" +
                                    str(round(data['t_points'][t_id_a], 0)) +
                                    ".png")
                        fig.savefig(os.path.join(save_path, fig_name))

    if 'b' in panels:

        required_entries = ['sc', 't_points', 'cmulti']
        requirements = check_requirements(data, required_entries)

        if requirements is True:

            n = len(data['sc'])
            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'], data['cmulti'][:, i], c='lightgray')
            for i, ID in enumerate(specify["ID"]):
                ax.plot(data['t_points'],
                        data['cmulti'][:, ID],
                        c=specify["colors"][i],
                        label=specify["labels"][i])
            ax.legend()
            ax.set_xlabel('t')
            ax.set_ylabel('c_multi')
            if method == 'laplacian':
                ax.set_xscale('log')

            if save:
                figure_name = 'c_multi_trajectory.png'
                fig.savefig(os.path.join(save_path, figure_name))

    if 'c' in panels:

        required_entries = ['sc', 't_points', 'cmulti', 'lhannot', 'rhannot',
                            'noplot', 'order']
        requirements = check_requirements(data, required_entries)

        if requirements is True:

            cmaps = [YlOrRd_9, YlOrBr_9, YlGn_9, YlGnBu_9]

            n = len(data['sc'])
            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        data['cmulti'][:, i],
                        c='lightgray',
                        zorder=0)
            for i, t in enumerate(t_ids_c):
                c = rankdata(data['cmulti'][t, :])
                plt.scatter(np.zeros((n))+data['t_points'][t],
                            data['cmulti'][t, :],
                            marker='s', c=c,
                            cmap=cmaps[i].mpl_colormap,
                            rasterized=True,
                            zorder=1)
            if method == 'laplacian':
                ax.set_xscale('log')
            ax.set_ylabel('c_multi')
            ax.set_xlabel('t')

            if save:
                figure_name = 'c_multi_trajectory_2.png'
                fig.savefig(os.path.join(save_path, figure_name))

            for i, t in enumerate(t_ids_c):

                scores = rankdata(data['cmulti'][t, :])
                im = plot_fsaverage(scores,
                                    lhannot=data['lhannot'],
                                    rhannot=data['rhannot'],
                                    noplot=data['noplot'],
                                    order=data['order'],
                                    views=['lateral', 'm'],
                                    vmin=np.amin(scores),
                                    vmax=np.amax(scores),
                                    colormap=cmaps[i].mpl_colormap)

                if save:
                    figure_name = 'cmulti_surface_'+str(t)+'.png'
                    im.save_image(os.path.join(save_path, figure_name),
                                  mode='rgba')

    if show is False:
        plt.close('all')
        plt.ion()
Example #7
0
def figure_2(data, method='laplacian', panels='all', show=True, save=False,
             save_path=None):
    '''
    Function to create Figure 2

    Parameters
    ----------
    data : dict
        Dictionary of the data to be used to generate the figures. If the
        required data is not found in this dictionary, the item of figure
        1 that requires this data will not be created and a message
        will be printed.
    method : str
        Method used to compute the transition probabilities. The purpose of
        this parameter is to choose whether the x_scale of our figures should
        be linear or logarithmic.
    panels : str or list
        List of the panels of Figure 1 that we want to create. If we want
        to create all of them, use 'all'. Otherwise, individual panels can
        be specified. For example, we could have panels=['a'] or
        panels=['a', 'b'].
    show : Boolean
        If True, the figures will be displayed. If not, the figures will
        bot be displayed.
    save : Boolean
        If True, the figures will be saved in the folder specified by the
        save_path parameter.
    save_path : str
        Path of the folder in which the figures will be saved.
    '''

    if show is False:
        plt.ioff()

    if panels == 'all':
        panels = ['a', 'b', 'c']

    n = len(data['sc'])
    k = len(data['t_points'])

    # Compute optimal centrality scale for individual nodes
    opti = np.argmax(data['cmulti'], axis=0)

    if 'a' in panels:

        required_entries = ['t_points', 'cmulti', 'sc', 'lhannot', 'rhannot',
                            'noplot', 'order']
        requirements = check_requirements(data, required_entries)
        if requirements is True:

            norm = plt.Normalize(np.amin(opti), np.amax(opti))
            optimal_colors = Spectral_4.mpl_colormap(norm(opti))

            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        data['cmulti'][:, i],
                        c=optimal_colors[i, :])

            ax.set_xlabel('t')
            ax.set_ylabel('c_multi')
            if method == 'laplacian':
                ax.set_xscale('log')

            if save:
                fig_name = "cmulti_trajectory_colored.png"
                plt.savefig(os.path.join(save_path, fig_name))

            log_topti = np.log10(data['t_points'][opti])
            im = plot_fsaverage(log_topti,
                                lhannot=data['lhannot'],
                                rhannot=data['rhannot'],
                                noplot=data['noplot'],
                                order=data['order'],
                                views=['lateral', 'm'],
                                vmin=np.amin(log_topti),
                                vmax=np.amax(log_topti),
                                colormap=Spectral_4.mpl_colormap)

            if save:
                figure_name = 'optimal_brain_surface.png'
                im.save_image(os.path.join(save_path, figure_name),
                              mode='rgba')

    if 'b' in panels:

        required_entries = ['rsn', 'rsn_names', 'cmulti', 't_points']
        requirements = check_requirements(data, required_entries)
        if requirements:

            opti_rsn = []
            median_rsn = np.zeros((7))
            for i in range(7):
                rsn_ids = np.where(data['rsn'] == i+1)[0]
                opti_rsn.append(data['t_points'][opti[rsn_ids]])
                median_rsn[i] = np.median(opti[rsn_ids])

            rsn_average = np.zeros((k, 7))
            for i in range(k):
                for j in range(7):
                    rsn_ids = np.where(data['rsn'] == j+1)[0]
                    rsn_average[i, j] = np.mean(data['cmulti'][i, rsn_ids])

            # Sort resting-state networks according to median optimal scores.
            rsn_order = np.argsort(median_rsn)
            opti_rsn_sorted = np.array(opti_rsn)[rsn_order].tolist()
            rsn_names_sorted = np.array(data['rsn_names'])[rsn_order].tolist()

            colormap = Spectral_7
            c_nb = [0, 1, 2, 3, 4, 5, 6]

            # optimal_rsn_violin
            plt.figure(figsize=(6, 3))
            sns.violinplot(data=opti_rsn_sorted,
                           palette=np.array(colormap.hex_colors)[c_nb],
                           orient='v')
            plt.ylabel("t_opti")
            plt.xticks(np.arange(0, 7), rsn_names_sorted)
            if method == "laplacian":
                y_min = np.amin(data['t_points'])
                y_max = np.amax(data['t_points'])
                plt.yticks([y_min, y_max],
                           [round(y_min, 0), round(y_max, 0)])

            if save:
                figure_name = 'optimal_rsn_violin'
                plt.savefig(os.path.join(save_path, figure_name))

            # optimal_rsn_average
            plt.figure(figsize=(6, 3))
            for j in range(7):
                plot_color = colormap.hex_colors[c_nb[j]]
                idx = np.argsort(median_rsn)[j]
                plt.plot(data['t_points'], rsn_average[:, idx],
                         label=data['rsn_names'][idx], c=plot_color,
                         linewidth=3)
            plt.xlabel("t")
            plt.ylabel('c_multi')
            if method == 'laplacian':
                plt.xscale('log')
            plt.legend()

            if save:
                figure_name = 'optimal_rsn_average'
                plt.savefig(os.path.join(save_path, figure_name))

    if 'c' in panels:

        required_entries = ['ve', 've_names', 't_points', 'cmulti']
        requirements = check_requirements(data, required_entries)
        if requirements:
            opti_ve = []
            median_ve = np.zeros((7))
            for i in range(7):
                ve_ids = np.where(data['ve'] == i+1)[0]
                opti_ve.append(data['t_points'][opti[ve_ids]])
                median_ve[i] = np.median(opti[np.where(data['ve'] == i+1)[0]])

            ve_average = np.zeros((k, 7))
            for i in range(k):
                for j in range(7):
                    ve_ids = np.where(data['ve'] == j+1)[0]
                    ve_average[i, j] = np.mean(data['cmulti'][i, ve_ids])

            # Sort resting-state networks according to median optimal scores.
            ve_order = np.argsort(median_ve)
            opti_ve_sorted = np.array(opti_ve)[ve_order].tolist()
            ve_names_sorted = np.array(data['ve_names'])[ve_order].tolist()

            colormap = Spectral_11
            c_nb = [1, 2, 3, 4, 7, 8, 9]

            # optimal_ve_violin
            plt.figure(figsize=(6, 3))
            sns.violinplot(data=opti_ve_sorted,
                           palette=np.array(colormap.hex_colors)[c_nb],
                           orient='v')
            plt.ylabel("t_opti")
            plt.xticks(np.arange(0, 7), ve_names_sorted)
            if method == "laplacian":
                y_min = np.amin(data['t_points'])
                y_max = np.amax(data['t_points'])
                plt.yticks([y_min, y_max],
                           [round(y_min, 0), round(y_max, 0)])

            if save:
                figure_name = 'optimal_ve_violin'
                plt.savefig(os.path.join(save_path, figure_name))

            # optimal_ve_average
            plt.figure(figsize=(6, 3))
            for j in range(7):
                plot_color = colormap.hex_colors[c_nb[j]]
                idx = np.argsort(median_ve)[j]
                plt.plot(data['t_points'], ve_average[:, idx],
                         label=data['ve_names'][idx], c=plot_color,
                         linewidth=3)
            plt.xlabel("t")
            plt.ylabel('c_multi')
            if method == 'laplacian':
                plt.xscale('log')
            plt.legend()

            if save:
                figure_name = 'optimal_ve_average'
                plt.savefig(os.path.join(save_path, figure_name))

    if show is False:
        plt.close('all')
        plt.ion()