예제 #1
0
def parcellate_voronoi_vol(mask, nb_parcels, seeds=None):
    """
    Produce a parcellation from a Voronoi diagram built on random seeds.
    The number of seeds is equal to the nb of parcels.
    Seed are randomly placed within the mask, expect on edge positions

    Args:
        - mask (numpy.ndarray): binary 3D array of valid position to parcellate
        - nb_parcels (int): the required number of parcels
        - seeds: TODO

    Return:
        - the parcellation (numpy.ndarray): a 3D array of integers
        -
    """
    parcellation = np.zeros(mask.shape, dtype=int)
    nvox = (mask !=0).sum()
    for cc_mask in mg.split_mask_into_cc_iter(mask!=0):
        # compute the required number of parcels within the current CC:
        size_cc = cc_mask.sum()
        cc_np = max(int(np.round(nb_parcels * size_cc / (nvox*1.))), 1)
        pyhrf.verbose(2, 'Treating a connected component (CC) of %d positions' \
                          %cc_mask.sum())
        if cc_mask.sum() < 6:
            continue
        if seeds is None:
            # perform voronoi on random seeds

            eroded_mask = peelVolume3D(cc_mask)
            eroded_mask_size = eroded_mask.sum()
            if eroded_mask_size < nb_parcels: #do no erode, mask too small
                eroded_mask_size = nvox
                eroded_mask = mask.copy()
            cc_seeds = np.random.randint(0,eroded_mask_size, cc_np)
            mask_for_seed = np.zeros(eroded_mask_size, dtype=int)
            mask_for_seed[cc_seeds] = 1
            mask_for_seed  = expand_array_in_mask(mask_for_seed, eroded_mask)
        else:
            mask_for_seed = seeds * cc_mask

        pyhrf.verbose(2, 'Nb of seeds in current CC: %d' \
                          %mask_for_seed.sum())
        cc_parcellation = voronoi(np.vstack(np.where(cc_mask)).T,
                                  np.vstack(np.where(mask_for_seed)).T) + 1
        pyhrf.verbose(3, 'CC parcellation labels: %s' \
                          %str(np.unique(cc_parcellation)))
        maxp = parcellation.max()
        parcellation += expand_array_in_mask(cc_parcellation + maxp, cc_mask)
        pyhrf.verbose(3, 'Current parcellation labels: %s' \
                          %str(np.unique(parcellation)))
    pyhrf.verbose(1, 'voronoi parcellation: %s, %s' \
                      %(str(parcellation.shape), str(parcellation.dtype)))

    return parcellation
예제 #2
0
def parcellate_voronoi_vol(mask, nb_parcels, seeds=None):
    """
    Produce a parcellation from a Voronoi diagram built on random seeds.
    The number of seeds is equal to the nb of parcels.
    Seed are randomly placed within the mask, expect on edge positions

    Args:
        - mask (numpy.ndarray): binary 3D array of valid position to parcellate
        - nb_parcels (int): the required number of parcels
        - seeds: TODO

    Return:
        - the parcellation (numpy.ndarray): a 3D array of integers
        -
    """
    parcellation = np.zeros(mask.shape, dtype=int)
    nvox = (mask != 0).sum()
    for cc_mask in mg.split_mask_into_cc_iter(mask != 0):
        # compute the required number of parcels within the current CC:
        size_cc = cc_mask.sum()
        cc_np = max(int(np.round(nb_parcels * size_cc / (nvox * 1.))), 1)
        logger.info('Treating a connected component (CC) of %d positions',
                    cc_mask.sum())
        if cc_mask.sum() < 6:
            continue
        if seeds is None:
            # perform voronoi on random seeds

            eroded_mask = peelVolume3D(cc_mask)
            eroded_mask_size = eroded_mask.sum()
            if eroded_mask_size < nb_parcels:  # do no erode, mask too small
                eroded_mask_size = nvox
                eroded_mask = mask.copy()
            cc_seeds = np.random.randint(0, eroded_mask_size, cc_np)
            mask_for_seed = np.zeros(eroded_mask_size, dtype=int)
            mask_for_seed[cc_seeds] = 1
            mask_for_seed = expand_array_in_mask(mask_for_seed, eroded_mask)
        else:
            mask_for_seed = seeds * cc_mask

        logger.info('Nb of seeds in current CC: %d', mask_for_seed.sum())
        cc_parcellation = voronoi(
            np.vstack(np.where(cc_mask)).T,
            np.vstack(np.where(mask_for_seed)).T) + 1
        logger.info('CC parcellation labels: %s',
                    str(np.unique(cc_parcellation)))
        maxp = parcellation.max()
        parcellation += expand_array_in_mask(cc_parcellation + maxp, cc_mask)
        logger.info('Current parcellation labels: %s',
                    str(np.unique(parcellation)))
    logger.info('voronoi parcellation: %s, %s', str(parcellation.shape),
                str(parcellation.dtype))

    return parcellation
예제 #3
0
파일: core.py 프로젝트: rcherbonnier/pyhrf
    def save(self, output_dir):
        """
        Save paradigm to output_dir/paradigm.csv,
        BOLD to output_dir/bold.nii, mask to output_dir/mask.nii
        #TODO: handle multi-session

        Return: tuple of file names in this order: (paradigm, bold, mask)
        """
        from pyhrf.tools._io import write_volume, write_texture
        paradigm_file = op.join(output_dir, 'paradigm.csv')
        self.paradigm.save_csv(paradigm_file)
        if self.data_type == 'volume':
            # unflatten bold
            bold_vol = expand_array_in_mask(self.bold, self.roiMask, 1)
            bold_vol = np.rollaxis(bold_vol, 0, 4)
            bold_file = op.join(output_dir, 'bold.nii')
            write_volume(bold_vol, bold_file, self.meta_obj)

            mask_file = op.join(output_dir, 'mask.nii')
            write_volume(self.roiMask, mask_file, self.meta_obj)

        elif self.data_type == 'surface':  # TODO surface
            bold_file = op.join(output_dir, 'bold.gii')
            write_texture(self.bold_vol, bold_file, self.meta_obj)
            pass

        return paradigm_file, bold_file, mask_file
예제 #4
0
 def save_time_series(k):
     if simulation.has_key(k):
         fn_stim_induced = add_prefix(op.join(output_dir, k + '.nii'),
                                      prefix)
         logger.info('%s flat shape %s', k, str(simulation[k].shape))
         vol = expand_array_in_mask(simulation[k], mask_vol, flat_axis=1)
         write_volume(np.rollaxis(vol, 0, 4), fn_stim_induced)
예제 #5
0
    def test_ward_spatial_scikit_with_mask(self):
        from pyhrf.parcellation import parcellation_dist, parcellation_ward_spatial
        from pyhrf.graph import graph_from_lattice, kerMask2D_4n
        from pyhrf.ndarray import expand_array_in_mask

        if debug:
            print 'data:'
            print self.p1
            print ''

        mask = self.p1 != 0
        graph = graph_from_lattice(mask, kerMask2D_4n)

        X = self.p1[np.where(mask)].reshape(-1,1)

        labels = parcellation_ward_spatial(X, n_clusters=4, graph=graph)

        labels = expand_array_in_mask(labels, mask)



        # print 'labels:'
        # print labels

        #+1 because parcellation_dist sees 0 as background:
        dist = parcellation_dist(self.p1+1, labels+1)[0]
        self.assertEqual(dist, 0)
예제 #6
0
    def save(self, output_dir):
        """
        Save paradigm to output_dir/paradigm.csv,
        BOLD to output_dir/bold.nii, mask to output_dir/mask.nii
        #TODO: handle multi-session

        Return: tuple of file names in this order: (paradigm, bold, mask)
        """
        from pyhrf.tools._io import write_volume, write_texture
        paradigm_file = op.join(output_dir, 'paradigm.csv')
        self.paradigm.save_csv(paradigm_file)
        if self.data_type == 'volume':
            # unflatten bold
            bold_vol = expand_array_in_mask(self.bold, self.roiMask, 1)
            bold_vol = np.rollaxis(bold_vol, 0, 4)
            bold_file = op.join(output_dir, 'bold.nii')
            write_volume(bold_vol, bold_file, self.meta_obj)

            mask_file = op.join(output_dir, 'mask.nii')
            write_volume(self.roiMask, mask_file, self.meta_obj)

        elif self.data_type == 'surface':  # TODO surface
            bold_file = op.join(output_dir, 'bold.gii')
            write_texture(self.bold_vol, bold_file, self.meta_obj)
            pass

        return paradigm_file, bold_file, mask_file
예제 #7
0
파일: scenarios.py 프로젝트: pyhrf/pyhrf
 def save_time_series(k):
     if simulation.has_key(k):
         fn_stim_induced = add_prefix(
             op.join(output_dir, k + '.nii'), prefix)
         logger.info('%s flat shape %s', k, str(simulation[k].shape))
         vol = expand_array_in_mask(simulation[k], mask_vol, flat_axis=1)
         write_volume(np.rollaxis(vol, 0, 4), fn_stim_induced)
예제 #8
0
def parcellation_dist(p1, p2, mask=None):
    """
    Compute the distance between the two parcellation p1 and p2 as the minimum
    number of positions to remove in order to obtain equal partitions.
    "mask" may be a binary mask to limit the distance computation to some
    specific positions.
    Important convention: parcel label 0 is treated as background and
    corresponding positions are discarded if no mask is provided.

    Return:
        (distance value, parcellation overlap)
    """
    assert np.issubdtype(p1.dtype, np.int)
    assert np.issubdtype(p2.dtype, np.int)

    assert p1.shape == p2.shape

    from munkres import Munkres
    if mask is None:
        mask = (p1 != 0)


    m = np.where(mask)
    pyhrf.verbose(6,'Nb pos inside mask: %d' %len(m[0]))

    fp1 = p1[m].astype(np.int32)
    fp2 = p2[m].astype(np.int32)

    # allocate cost matrix, assume that region labels are contiguous
    # ie all labels in [1, label_max] are represented
    cost_matrix = np.zeros((fp1.max()+1, fp2.max()+1), dtype=np.int32)

    pyhrf.verbose(6,'Cost matrix : %s' %str(cost_matrix.shape))
    compute_intersection_matrix(fp1, fp2, cost_matrix)

    # discard 0-labelled parcels (background)
    cost_matrix = cost_matrix[1:,1:]

    # solve the assignement problem:
    indexes = np.array(Munkres().compute((cost_matrix*-1).tolist()))

    if 0:
        print 'assignement indexes:'
        print indexes
        print '->'
        print (indexes[:,0], indexes[:,1])
        print cost_matrix[(indexes[:,0], indexes[:,1])]

    assignement = cost_matrix[(indexes[:,0], indexes[:,1])].sum()

    to_keep = np.zeros_like(fp1)
    for s1, s2 in indexes:
        to_keep += np.bitwise_and(fp1==(s1+1), fp2==(s2+1))

    return fp1.size - assignement, expand_array_in_mask(to_keep, mask)
예제 #9
0
def parcellation_dist(p1, p2, mask=None):
    """
    Compute the distance between the two parcellation p1 and p2 as the minimum
    number of positions to remove in order to obtain equal partitions.
    "mask" may be a binary mask to limit the distance computation to some
    specific positions.
    Important convention: parcel label 0 is treated as background and
    corresponding positions are discarded if no mask is provided.

    Return:
        (distance value, parcellation overlap)
    """
    assert np.issubdtype(p1.dtype, np.int)
    assert np.issubdtype(p2.dtype, np.int)

    assert p1.shape == p2.shape

    from munkres import Munkres
    if mask is None:
        mask = (p1 != 0)

    m = np.where(mask)
    logger.debug('Nb pos inside mask: %d', len(m[0]))

    fp1 = p1[m].astype(np.int32)
    fp2 = p2[m].astype(np.int32)

    # allocate cost matrix, assume that region labels are contiguous
    # ie all labels in [1, label_max] are represented
    cost_matrix = np.zeros((fp1.max() + 1, fp2.max() + 1), dtype=np.int32)

    logger.debug('Cost matrix : %s', str(cost_matrix.shape))
    compute_intersection_matrix(fp1, fp2, cost_matrix)

    # discard 0-labelled parcels (background)
    cost_matrix = cost_matrix[1:, 1:]

    # solve the assignement problem:
    indexes = np.array(Munkres().compute((cost_matrix * -1).tolist()))

    if 0:
        print 'assignement indexes:'
        print indexes
        print '->'
        print(indexes[:, 0], indexes[:, 1])
        print cost_matrix[(indexes[:, 0], indexes[:, 1])]

    assignement = cost_matrix[(indexes[:, 0], indexes[:, 1])].sum()

    to_keep = np.zeros_like(fp1)
    for s1, s2 in indexes:
        to_keep += np.bitwise_and(fp1 == (s1 + 1), fp2 == (s2 + 1))

    return fp1.size - assignement, expand_array_in_mask(to_keep, mask)
예제 #10
0
파일: parcellation.py 프로젝트: Solvi/pyhrf
def parcellation_hemodynamics(fmri_data, feature_extraction_method,
                              parcellation_method, nb_clusters):
    """
    Perform a hemodynamic-driven parcellation on masked fMRI data

    Args:
        - fmri_data (pyhrf.core.FmriData): input fMRI data
        - feature_extraction_method (str): one of
          'glm_hderiv', 'glm_hdisp' ...
        - parcellation_method (str): one of
          'spatial_ward', 'spatial_ward_uncertainty', ...

    Return:
         parcellation array (numpy array of integers) of the same shape
         as fmri_data.roiMask

    Examples
    >>> fd = pyhrf.core.FmriData.from_simu_ui()
    >>> parcellation_hemodynamics(fd, 'glm_hdisp', nb_clusters=2)
    array([[[1], [1]], [[2], [2]]]) #dummy result #TOCHECK
    """

    roi_ids = np.unique(fmri_data.roi_ids_in_mask)
    if len(roi_ids) == 0:
        # glm
        #TODO
        features, uncertainty = feature_extraction(fmri_data,
                                                   feature_extraction_method)

        # parcellation process
        if parcellation_method == 'spatial_ward':
            parcellation = spatial_ward(features) #TODO
        else:
            parcellation = spatial_ward_with_uncertainty(features) #TODO

    else: #parcellate each ROI separately
        nb_voxels_all = fmri_data.nb_voxels_in_mask
        parcellation = np.zeros(fmri_data.nb_voxels_all, dtype=int)
        for rfd in fmri_data.roi_split():
            # multiply nb_clusters by the fraction of the roi size
            nb_clusters_roi = round(nb_clusters * rfd.nb_voxels_in_mask /   \
                                    nb_voxels_all)
            p_roi = parcellation_hemodynamics(rfd, feature_extraction_method,
                                              parcellation_method,
                                              nb_clusters_roi)
            parcellation += p_roi + parcellation.max()

    return expand_array_in_mask(parcellation, fmri_data.roiMask)
예제 #11
0
파일: graph.py 프로젝트: pyhrf/pyhrf
def split_mask_into_cc_iter(mask, min_size=0, kerMask=None):
    """ Return an iterator over all connected components (CC) within input mask. CC which are smaller than `min_size`
    are discarded. `kerMask` defines the connectivity, e.g., kerMask3D_6n for 6-neighbours in 3D.

    Examples
    --------

    .. code::

        vol = np.array( [[1,1,0,1,1],
                        [1,1,0,1,1],
                        [0,0,0,0,0],
                        [1,0,1,1,0],
                        [0,0,1,1,0]], dtype=int )
        for cc in split_mask_into_cc_iter(vol):
            print cc


    Should output:

    .. code::

        np.array( [[1,1,0,0,0],
                   [1,1,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0]]
        np.array( [[0,0,0,1,1],
                   [0,0,0,1,1],
                   [0,0,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0]]

    """
    assert isinstance(min_size, int)
    flat_mask = mask[np.where(mask)]
    # print 'flat_mask:', flat_mask.shape
    g = graph_from_lattice(mask, kerMask)
    for cc in connected_components_iter(g):
        # print 'cc:'
        # print cc
        if len(cc) >= min_size:
            flat_mask[:] = 0
            flat_mask[cc] = 1
            # print expand_array_in_mask(flat_mask, mask)
            yield expand_array_in_mask(flat_mask, mask)
예제 #12
0
def split_mask_into_cc_iter(mask, min_size=0, kerMask=None):
    """ Return an iterator over all connected components (CC) within input mask. CC which are smaller than `min_size`
    are discarded. `kerMask` defines the connectivity, e.g., kerMask3D_6n for 6-neighbours in 3D.

    Examples
    --------

    .. code::

        vol = np.array( [[1,1,0,1,1],
                        [1,1,0,1,1],
                        [0,0,0,0,0],
                        [1,0,1,1,0],
                        [0,0,1,1,0]], dtype=int )
        for cc in split_mask_into_cc_iter(vol):
            print cc


    Should output:

    .. code::

        np.array( [[1,1,0,0,0],
                   [1,1,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0]]
        np.array( [[0,0,0,1,1],
                   [0,0,0,1,1],
                   [0,0,0,0,0],
                   [0,0,0,0,0],
                   [0,0,0,0,0]]

    """
    assert isinstance(min_size, int)
    flat_mask = mask[np.where(mask)]
    # print 'flat_mask:', flat_mask.shape
    g = graph_from_lattice(mask, kerMask)
    for cc in connected_components_iter(g):
        # print 'cc:'
        # print cc
        if len(cc) >= min_size:
            flat_mask[:] = 0
            flat_mask[cc] = 1
            # print expand_array_in_mask(flat_mask, mask)
            yield expand_array_in_mask(flat_mask, mask)
예제 #13
0
def parcellate_balanced_vol(mask, nb_parcels):
    """
    Performs a balanced partitioning on the input mask using a balloon patroling
     algorithm [Eurol 2009]. Values with 0 are discarded position in the mask.

    Args:
        - mask (numpy.ndarray): binary 3D array of valid position to parcellate
        - nb_parcels (int): the required number of parcels

    Return:
        - the parcellation (numpy.ndarray): a 3D array of integers
    """

    parcellation = np.zeros(mask.shape, dtype=int)
    nvox = (mask != 0).sum()

    # Iterate over connected components in the input mask
    for cc_mask in mg.split_mask_into_cc_iter(mask != 0):
        logger.info('Treating a connected component (CC) of %d positions',
                    cc_mask.sum())
        g = mg.graph_from_lattice(cc_mask)
        size_cc = cc_mask.sum()
        # compute the required number of parcels within the current CC:
        cc_np = max(int(np.round(nb_parcels * size_cc / (nvox * 1.))), 1)

        logger.info('Split (CC) into %d parcels', cc_np)

        cc_labels = np.ones(cc_mask.sum(), dtype=int)
        if cc_np > 1:
            split_parcel(cc_labels, {1: g},
                         1,
                         cc_np,
                         inplace=True,
                         verbosity=2,
                         balance_tolerance='draft')
        else:  # only one parcel expected, CC must be too small to be splited
            cc_labels[:] = 1
        logger.info('Split done!')

        # accumulate parcellation result
        maxp = parcellation.max()
        parcellation += expand_array_in_mask(cc_labels + maxp, cc_mask > 0)

    return parcellation
예제 #14
0
    def test_ward_spatial_scikit_with_mask(self):
        from pyhrf.parcellation import parcellation_dist, parcellation_ward_spatial
        from pyhrf.graph import graph_from_lattice, kerMask2D_4n
        from pyhrf.ndarray import expand_array_in_mask

        if debug:
            print 'data:'
            print self.p1
            print ''

        mask = self.p1 != 0
        graph = graph_from_lattice(mask, kerMask2D_4n)

        X = self.p1[np.where(mask)].reshape(-1, 1)

        labels = parcellation_ward_spatial(X, n_clusters=4, graph=graph)

        labels = expand_array_in_mask(labels, mask)
        #+1 because parcellation_dist sees 0 as background:
        dist = parcellation_dist(self.p1 + 1, labels + 1)[0]
        self.assertEqual(dist, 0)
예제 #15
0
def parcellate_balanced_vol(mask, nb_parcels):
    """
    Performs a balanced partitioning on the input mask using a balloon patroling
     algorithm [Eurol 2009]. Values with 0 are discarded position in the mask.

    Args:
        - mask (numpy.ndarray): binary 3D array of valid position to parcellate
        - nb_parcels (int): the required number of parcels

    Return:
        - the parcellation (numpy.ndarray): a 3D array of integers
    """

    parcellation = np.zeros(mask.shape, dtype=int)
    nvox = (mask != 0).sum()

    # Iterate over connected components in the input mask
    for cc_mask in mg.split_mask_into_cc_iter(mask != 0):
        pyhrf.verbose(2, 'Treating a connected component (CC) of %d positions' \
                          %cc_mask.sum())
        g = mg.graph_from_lattice(cc_mask)
        size_cc = cc_mask.sum()
        # compute the required number of parcels within the current CC:
        cc_np = max(int(np.round(nb_parcels * size_cc / (nvox*1.))), 1)

        pyhrf.verbose(2, 'Split (CC) into %d parcels' %cc_np)

        cc_labels = np.ones(cc_mask.sum(), dtype=int)
        if cc_np > 1:
            split_parcel(cc_labels, {1:g}, 1, cc_np, inplace=True,
                         verbosity=2, balance_tolerance='draft')
        else: #only one parcel expected, CC must be too small to be splited
            cc_labels[:] = 1
        pyhrf.verbose(2, 'Split done!')

        # accumulate parcellation result
        maxp = parcellation.max()
        parcellation += expand_array_in_mask(cc_labels + maxp, cc_mask>0)

    return parcellation
예제 #16
0
def parcellation_for_jde(fmri_data, avg_parcel_size=250, output_dir=None,
                         method='gkm', glm_drift='Cosine', glm_hfcut=128):
    """
    method: gkm, ward, ward_and_gkm
    """

    if output_dir is None:
        output_dir = tempfile.mkdtemp(prefix='pyhrf_JDE_parcellation_GLM',
                                      dir=pyhrf.cfg['global']['tmp_path'])
    glm_output_dir = op.join(output_dir, 'GLM_for_parcellation')
    if not op.exists(glm_output_dir): os.makedirs(glm_output_dir)

    pyhrf.verbose(1, 'GLM for parcellation')

    # if fmri_data.data_type == 'volume':
    #     paradigm_file, bold_file, mask_file = fmri_data.save(glm_output_dir)
    #     beta_files = glm_nipy_from_files(bold_file, fmri_data.tr, paradigm_file,
    #                                      glm_output_dir, mask_file,
    #                                      drift_model=glm_drift, hfcut=glm_hfcut)
    # elif fmri_data.data_type == 'surface':
    #     beta_files = glm_nipy(fmri_data, glm_output_dir,
    #                           drift_model=glm_drift, hfcut=glm_hfcut)

    g, dm, cons = glm_nipy(fmri_data, drift_model=glm_drift, hfcut=glm_hfcut)

    pval_files = []
    if cons is not None:
        func_data = [('con_pval_%s' %cname, con.pvalue()) \
                         for cname, con in cons.iteritems()]
    else:
        reg_cst_drift = re.compile(".*constant.*|.*drift.*")
        func_data = [('beta_%s' %reg_name, g.beta[ir]) \
                         for ir,reg_name in enumerate(dm.names) \
                         if not reg_cst_drift.match(reg_name)]

    for name, data in func_data:
        val_vol = expand_array_in_mask(data, fmri_data.roiMask>0)
        val_fn = op.join(glm_output_dir, '%s.nii' %name)
        write_volume(val_vol, val_fn, fmri_data.meta_obj)
        pval_files.append(val_fn)

    mask_file = op.join(glm_output_dir,'mask.nii')
    write_volume(fmri_data.roiMask>0, mask_file, fmri_data.meta_obj)

    nvox = fmri_data.get_nb_vox_in_mask()
    nparcels = round_nb_parcels(nvox * 1. / avg_parcel_size)

    pyhrf.verbose(1, 'Parcellation from GLM outputs, method: %s, ' \
                      'nb parcels: %d' %(method, nparcels))

    if fmri_data.data_type == 'volume':
        parcellation_file = op.join(output_dir, 'parcellation_%s_np%d.nii'
                                    %(method, nparcels))

        make_parcellation_from_files(pval_files, mask_file, parcellation_file,
                                     nparcels, method)
        parcellation,_ = read_volume(parcellation_file)
    else:
        mesh_file = fmri_data.data_files[-1]
        parcellation_file = op.join(output_dir, 'parcellation_%s_np%d.gii'
                                    %(method, nparcels))
        make_parcellation_surf_from_files(pval_files, mesh_file,
                                          parcellation_file, nparcels, method,
                                          verbose=1)
        parcellation,_ = read_texture(parcellation_file)
    #print parcellation_file


    pyhrf.verbose(1, parcellation_report(parcellation))

    return parcellation, parcellation_file
예제 #17
0
파일: scenarios.py 프로젝트: pyhrf/pyhrf
def simulation_save_vol_outputs(
    simulation, output_dir, bold_3D_vols_dir=None, simulation_graph_output=None, prefix=None, vol_meta=None
):
    """ simulation_graph_output : None, 'simple', 'thumbnails' #TODO
    """

    if simulation.has_key("paradigm"):
        fn = add_prefix(op.join(output_dir, "paradigm.csv"), prefix)
        simulation["paradigm"].save_csv(fn)

    # Save all volumes in nifti format:
    if simulation.has_key("labels_vol"):
        mask_vol = np.ones_like(simulation["labels_vol"][0])
    elif simulation.has_key("mask"):
        mask_vol = simulation.get("mask", None)
    elif simulation.has_key("labels"):
        mask_vol = np.ones_like(simulation["labels"][0])
    else:
        raise Exception("Dunno where to get mask")

    logger.info("Vol mask of shape %s", str(mask_vol.shape))

    fn_mask = add_prefix(op.join(output_dir, "mask.nii"), prefix)
    write_volume(mask_vol.astype(np.int32), fn_mask, vol_meta)

    if simulation.has_key("hrf_territories"):
        fn_h_territories = add_prefix(op.join(output_dir, "hrf_territories.nii"), prefix)

        ht = expand_array_in_mask(simulation["hrf_territories"] + 1, mask_vol)
        write_volume(ht, fn_h_territories, vol_meta)

    if simulation.has_key("hrf"):
        from pyhrf.ndarray import MRI3Daxes

        fn_hrf = add_prefix(op.join(output_dir, "hrf.nii"), prefix)
        logger.info("hrf flat shape %s", str(simulation["hrf"].shape))
        if simulation["hrf"].ndim == 1:
            hrf = np.ones(mask_vol.size) * simulation["hrf"][:, np.newaxis]
        else:
            hrf = simulation["hrf"]

        hrfs_vol = expand_array_in_mask(hrf, mask_vol, flat_axis=1)
        dt = simulation["dt"]
        chrfs = xndarray(
            hrfs_vol, axes_names=["time"] + MRI3Daxes, axes_domains={"time": np.arange(hrfs_vol.shape[0]) * dt}
        )
        chrfs.save(fn_hrf, vol_meta)

        ttp_vol = hrfs_vol.argmax(0)
        fn_ttp = add_prefix(op.join(output_dir, "ttp.nii"), prefix)
        write_volume(ttp_vol, fn_ttp, vol_meta)

    if simulation.has_key("brf"):
        from pyhrf.ndarray import MRI3Daxes

        fn_brf = add_prefix(op.join(output_dir, "brf.nii"), prefix)
        logger.info("brf flat shape %s", str(simulation["brf"].shape))
        brfs_vol = expand_array_in_mask(simulation["brf"], mask_vol, flat_axis=1)
        dt = simulation["dt"]
        cbrfs = xndarray(
            brfs_vol, axes_names=["time"] + MRI3Daxes, axes_domains={"time": np.arange(brfs_vol.shape[0]) * dt}
        )
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key("prf"):
        from pyhrf.ndarray import MRI3Daxes

        fn_brf = add_prefix(op.join(output_dir, "prf.nii"), prefix)
        logger.info("prf flat shape %s", str(simulation["prf"].shape))
        brfs_vol = expand_array_in_mask(simulation["prf"], mask_vol, flat_axis=1)
        dt = simulation["dt"]
        cbrfs = xndarray(
            brfs_vol, axes_names=["time"] + MRI3Daxes, axes_domains={"time": np.arange(brfs_vol.shape[0]) * dt}
        )
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key("drift"):
        fn_drift = add_prefix(op.join(output_dir, "drift.nii"), prefix)
        logger.info("drift flat shape %s", str(simulation["drift"].shape))
        drift_vol = expand_array_in_mask(simulation["drift"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key("drift_coeffs"):
        fn_drift = add_prefix(op.join(output_dir, "drift_coeffs.nii"), prefix)
        logger.info("drift flat shape %s", str(simulation["drift_coeffs"].shape))
        drift_vol = expand_array_in_mask(simulation["drift"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key("noise"):
        fn_noise = add_prefix(op.join(output_dir, "noise.nii"), prefix)
        logger.info("noise flat shape %s", str(simulation["noise"].shape))
        noise_vol = expand_array_in_mask(simulation["noise"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(noise_vol, 0, 4), fn_noise, vol_meta)

        fn_noise = add_prefix(op.join(output_dir, "noise_emp_var.nii"), prefix)
        noise_vol = expand_array_in_mask(simulation["noise"].var(0), mask_vol)
        write_volume(noise_vol, fn_noise, vol_meta)

    if simulation.has_key("noise_var"):
        fn_noise_var = add_prefix(op.join(output_dir, "noise_var.nii"), prefix)
        logger.info("noise_var flat shape %s", str(simulation["noise_var"].shape))
        noise_var_vol = expand_array_in_mask(simulation["noise_var"], mask_vol)
        write_volume(noise_var_vol, fn_noise_var, vol_meta)

    if simulation.has_key("stim_induced_signal"):
        fn_stim_induced = add_prefix(op.join(output_dir, "stim_induced.nii"), prefix)
        logger.info("stim_induced flat shape %s", str(simulation["stim_induced_signal"].shape))
        stim_induced_vol = expand_array_in_mask(simulation["stim_induced_signal"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key("perf_stim_induced"):
        fn_stim_induced = add_prefix(op.join(output_dir, "perf_stim_induced.nii"), prefix)
        logger.info("asl_stim_induced flat shape %s", str(simulation["perf_stim_induced"].shape))
        stim_induced_vol = expand_array_in_mask(simulation["perf_stim_induced"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

        fn_stim_induced = add_prefix(op.join(output_dir, "perf_stim_induced_ct.nii"), prefix)
        logger.info("asl_stim_induced flat shape %s", str(simulation["perf_stim_induced"].shape))

        dsf = simulation["dsf"]
        perf = np.dot(simulation["ctrl_tag_mat"], simulation["perf_stim_induced"][0:-1:dsf])
        stim_induced_vol = expand_array_in_mask(perf, mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key("perf_baseline"):
        fn = add_prefix(op.join(output_dir, "perf_baseline.nii"), prefix)
        pb = np.zeros_like(simulation["bold"]) + simulation["perf_baseline"]
        write_volume(expand_array_in_mask(pb[0], mask_vol), fn, vol_meta)

    if simulation.has_key("bold_stim_induced"):
        fn_stim_induced = add_prefix(op.join(output_dir, "bold_stim_induced.nii"), prefix)
        logger.info("asl_stim_induced flat shape %s", str(simulation["bold_stim_induced"].shape))
        stim_induced_vol = expand_array_in_mask(simulation["bold_stim_induced"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    m = np.where(mask_vol)
    labels_and_mask = mask_vol.copy()[m]

    for ic in xrange(simulation["labels"].shape[0]):
        if simulation.has_key("condition_defs"):
            c_name = simulation["condition_defs"][ic].name
        else:
            c_name = "cond%d" % ic
        fn_labels = add_prefix(op.join(output_dir, "labels_%s.nii" % c_name), prefix)
        if simulation.has_key("labels"):
            labels_c = simulation["labels"][ic]
            labels_and_mask[np.where(labels_c)] = ic + 2
            write_volume(expand_array_in_mask(labels_c, mask_vol).astype(np.int32), fn_labels, vol_meta)
        elif simulation.has_key("labels_vol"):
            labels_c = simulation["labels_vol"][ic]
            labels_and_mask[np.where(labels_c[m])] = ic + 2
            write_volume(labels_c.astype(np.int32), fn_labels, vol_meta)

        if simulation.has_key("nrls"):
            nrls_c = simulation["nrls"][ic]
            fn = add_prefix(op.join(output_dir, "nrls_%s.nii" % c_name), prefix)
            write_volume(expand_array_in_mask(nrls_c, mask_vol), fn, vol_meta)
        if simulation.has_key("nrls_session"):
            nrls_session_c = simulation["nrls_session"][ic]
            fn = add_prefix(op.join(output_dir, "nrls_session_%s.nii" % (c_name)), prefix)
            write_volume(expand_array_in_mask(nrls_session_c, mask_vol), fn, vol_meta)

        if simulation.has_key("brls"):
            brls_c = simulation["brls"][ic]
            fn = add_prefix(op.join(output_dir, "brls_%s.nii" % c_name), prefix)
            write_volume(expand_array_in_mask(brls_c, mask_vol), fn, vol_meta)
        if simulation.has_key("prls"):
            prls_c = simulation["prls"][ic]
            fn = add_prefix(op.join(output_dir, "prls_%s.nii" % c_name), prefix)
            write_volume(expand_array_in_mask(prls_c, mask_vol), fn, vol_meta)

        if simulation.has_key("neural_efficacies"):
            ne_c = simulation["neural_efficacies"][ic]
            fn = add_prefix(op.join(output_dir, "neural_efficacies_%s.nii" % c_name), prefix)
            write_volume(expand_array_in_mask(ne_c, mask_vol), fn, vol_meta)

    fn_labels_and_mask = add_prefix(op.join(output_dir, "mask_and_labels.nii"), prefix)

    write_volume(expand_array_in_mask(labels_and_mask, mask_vol).astype(int), fn_labels_and_mask, vol_meta)

    if simulation.has_key("bold_full_vol") or simulation.has_key("bold"):
        fn = add_prefix(op.join(output_dir, "bold.nii"), prefix)
        if simulation.has_key("bold_full_vol"):
            bold4D = simulation["bold_full_vol"]
        else:
            bold = simulation["bold"]
            bold4D = expand_array_in_mask(bold, mask_vol, flat_axis=1)

        write_volume(np.rollaxis(bold4D, 0, 4), fn, vol_meta)

    def save_time_series(k):
        if simulation.has_key(k):
            fn_stim_induced = add_prefix(op.join(output_dir, k + ".nii"), prefix)
            logger.info("%s flat shape %s", k, str(simulation[k].shape))
            vol = expand_array_in_mask(simulation[k], mask_vol, flat_axis=1)
            write_volume(np.rollaxis(vol, 0, 4), fn_stim_induced)

    save_time_series("flow_induction")
    save_time_series("cbv")
    save_time_series("hbr")
    save_time_series("bold_stim_induced_rescaled")

    if simulation.has_key("asl"):
        fn = add_prefix(op.join(output_dir, "asl.nii"), prefix)
        asl4D = expand_array_in_mask(simulation["asl"], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(asl4D, 0, 4), fn, vol_meta)

    if simulation.has_key("outliers"):
        fn = add_prefix(op.join(output_dir, "outliers.nii"), prefix)
        outliers = expand_array_in_mask(simulation["outliers"], mask_vol, flat_axis=1)

        write_volume(np.rollaxis(outliers, 0, 4), fn, vol_meta)

    if simulation.has_key("hrf_group"):
        hrfgroup = simulation["hrf_group"]
        nb_vox = mask_vol.size
        fn_hrf = add_prefix(op.join(output_dir, "hrf_group.nii"), prefix)
        logger.info("hrf group shape %s", str(simulation["hrf_group"].shape))
        hrfGd = duplicate_hrf(nb_vox, hrfgroup)
        hrfs_vol = expand_array_in_mask(hrfGd, mask_vol, flat_axis=1)
        dt = simulation["dt"]
        chrfs = xndarray(
            hrfs_vol, axes_names=["time"] + MRI3Daxes, axes_domains={"time": np.arange(hrfs_vol.shape[0]) * dt}
        )
        chrfs.save(fn_hrf, vol_meta)

    if bold_3D_vols_dir is not None:
        assert op.exists(bold_3D_vols_dir)
        for iscan, bscan in enumerate(bold4D):
            fnout = add_prefix("bold_%06d.nii" % (iscan), prefix)
            write_volume(bscan, op.join(bold_3D_vols_dir, fnout), vol_meta)
예제 #18
0
파일: scenarios.py 프로젝트: pyhrf/pyhrf
def simulation_save_vol_outputs(simulation, output_dir, bold_3D_vols_dir=None,
                                simulation_graph_output=None, prefix=None,
                                vol_meta=None):
    """ simulation_graph_output : None, 'simple', 'thumbnails' #TODO
    """

    if simulation.has_key('paradigm'):
        fn = add_prefix(op.join(output_dir, 'paradigm.csv'), prefix)
        simulation['paradigm'].save_csv(fn)

    # Save all volumes in nifti format:
    if simulation.has_key('labels_vol'):
        mask_vol = np.ones_like(simulation['labels_vol'][0])
    elif simulation.has_key('mask'):
        mask_vol = simulation.get('mask', None)
    elif simulation.has_key('labels'):
        mask_vol = np.ones_like(simulation['labels'][0])
    else:
        raise Exception('Dunno where to get mask')

    logger.info('Vol mask of shape %s', str(mask_vol.shape))

    fn_mask = add_prefix(op.join(output_dir, 'mask.nii'), prefix)
    write_volume(mask_vol.astype(np.int32), fn_mask, vol_meta)

    if simulation.has_key('hrf_territories'):
        fn_h_territories = add_prefix(op.join(output_dir, 'hrf_territories.nii'),
                                      prefix)

        ht = expand_array_in_mask(simulation['hrf_territories'] + 1, mask_vol)
        write_volume(ht, fn_h_territories, vol_meta)

    if simulation.has_key('hrf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_hrf = add_prefix(op.join(output_dir, 'hrf.nii'), prefix)
        logger.info('hrf flat shape %s', str(simulation['hrf'].shape))
        if simulation['hrf'].ndim == 1:
            hrf = (np.ones(mask_vol.size) * simulation['hrf'][:, np.newaxis])
        else:
            hrf = simulation['hrf']

        hrfs_vol = expand_array_in_mask(hrf, mask_vol, flat_axis=1)
        dt = simulation['dt']
        chrfs = xndarray(hrfs_vol, axes_names=['time', ] + MRI3Daxes,
                         axes_domains={'time': np.arange(hrfs_vol.shape[0]) * dt})
        chrfs.save(fn_hrf, vol_meta)

        ttp_vol = hrfs_vol.argmax(0)
        fn_ttp = add_prefix(op.join(output_dir, 'ttp.nii'), prefix)
        write_volume(ttp_vol, fn_ttp, vol_meta)

    if simulation.has_key('brf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_brf = add_prefix(op.join(output_dir, 'brf.nii'), prefix)
        logger.info('brf flat shape %s', str(simulation['brf'].shape))
        brfs_vol = expand_array_in_mask(
            simulation['brf'], mask_vol, flat_axis=1)
        dt = simulation['dt']
        cbrfs = xndarray(brfs_vol, axes_names=['time', ] + MRI3Daxes,
                         axes_domains={'time': np.arange(brfs_vol.shape[0]) * dt})
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key('prf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_brf = add_prefix(op.join(output_dir, 'prf.nii'), prefix)
        logger.info('prf flat shape %s', str(simulation['prf'].shape))
        brfs_vol = expand_array_in_mask(
            simulation['prf'], mask_vol, flat_axis=1)
        dt = simulation['dt']
        cbrfs = xndarray(brfs_vol, axes_names=['time', ] + MRI3Daxes,
                         axes_domains={'time': np.arange(brfs_vol.shape[0]) * dt})
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key('drift'):
        fn_drift = add_prefix(op.join(output_dir, 'drift.nii'), prefix)
        logger.info('drift flat shape %s', str(simulation['drift'].shape))
        drift_vol = expand_array_in_mask(simulation['drift'], mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key('drift_coeffs'):
        fn_drift = add_prefix(op.join(output_dir, 'drift_coeffs.nii'), prefix)
        logger.info(
            'drift flat shape %s', str(simulation['drift_coeffs'].shape))
        drift_vol = expand_array_in_mask(simulation['drift'], mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key('noise'):
        fn_noise = add_prefix(op.join(output_dir, 'noise.nii'), prefix)
        logger.info('noise flat shape %s', str(simulation['noise'].shape))
        noise_vol = expand_array_in_mask(simulation['noise'], mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(noise_vol, 0, 4), fn_noise, vol_meta)

        fn_noise = add_prefix(op.join(output_dir, 'noise_emp_var.nii'), prefix)
        noise_vol = expand_array_in_mask(simulation['noise'].var(0), mask_vol)
        write_volume(noise_vol, fn_noise, vol_meta)

    if simulation.has_key('noise_var'):
        fn_noise_var = add_prefix(op.join(output_dir, 'noise_var.nii'), prefix)
        logger.info(
            'noise_var flat shape %s', str(simulation['noise_var'].shape))
        noise_var_vol = expand_array_in_mask(simulation['noise_var'], mask_vol)
        write_volume(noise_var_vol, fn_noise_var, vol_meta)

    if simulation.has_key('stim_induced_signal'):
        fn_stim_induced = add_prefix(op.join(output_dir, 'stim_induced.nii'),
                                     prefix)
        logger.info('stim_induced flat shape %s',
                    str(simulation['stim_induced_signal'].shape))
        stim_induced_vol = expand_array_in_mask(simulation['stim_induced_signal'],
                                                mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key('perf_stim_induced'):
        fn_stim_induced = add_prefix(op.join(output_dir, 'perf_stim_induced.nii'),
                                     prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['perf_stim_induced'].shape))
        stim_induced_vol = expand_array_in_mask(simulation['perf_stim_induced'],
                                                mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

        fn_stim_induced = add_prefix(op.join(output_dir,
                                             'perf_stim_induced_ct.nii'),
                                     prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['perf_stim_induced'].shape))

        dsf = simulation['dsf']
        perf = np.dot(simulation['ctrl_tag_mat'],
                      simulation['perf_stim_induced'][0:-1:dsf])
        stim_induced_vol = expand_array_in_mask(perf, mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key('perf_baseline'):
        fn = add_prefix(op.join(output_dir, 'perf_baseline.nii'), prefix)
        pb = np.zeros_like(simulation['bold']) + simulation['perf_baseline']
        write_volume(expand_array_in_mask(pb[0], mask_vol), fn, vol_meta)

    if simulation.has_key('bold_stim_induced'):
        fn_stim_induced = add_prefix(op.join(output_dir, 'bold_stim_induced.nii'),
                                     prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['bold_stim_induced'].shape))
        stim_induced_vol = expand_array_in_mask(simulation['bold_stim_induced'],
                                                mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    m = np.where(mask_vol)
    labels_and_mask = mask_vol.copy()[m]

    for ic in xrange(simulation['labels'].shape[0]):
        if simulation.has_key('condition_defs'):
            c_name = simulation['condition_defs'][ic].name
        else:
            c_name = 'cond%d' % ic
        fn_labels = add_prefix(op.join(output_dir, 'labels_%s.nii' % c_name),
                               prefix)
        if simulation.has_key('labels'):
            labels_c = simulation['labels'][ic]
            labels_and_mask[np.where(labels_c)] = ic + 2
            write_volume(expand_array_in_mask(labels_c, mask_vol).astype(np.int32),
                         fn_labels, vol_meta)
        elif simulation.has_key('labels_vol'):
            labels_c = simulation['labels_vol'][ic]
            labels_and_mask[np.where(labels_c[m])] = ic + 2
            write_volume(labels_c.astype(np.int32), fn_labels, vol_meta)

        if simulation.has_key('nrls'):
            nrls_c = simulation['nrls'][ic]
            fn = add_prefix(
                op.join(output_dir, 'nrls_%s.nii' % c_name), prefix)
            write_volume(expand_array_in_mask(nrls_c, mask_vol), fn, vol_meta)
        if simulation.has_key('nrls_session'):
            nrls_session_c = simulation['nrls_session'][ic]
            fn = add_prefix(op.join(output_dir, 'nrls_session_%s.nii'
                                    % (c_name)), prefix)
            write_volume(expand_array_in_mask(nrls_session_c, mask_vol),
                         fn, vol_meta)

        if simulation.has_key('brls'):
            brls_c = simulation['brls'][ic]
            fn = add_prefix(
                op.join(output_dir, 'brls_%s.nii' % c_name), prefix)
            write_volume(expand_array_in_mask(brls_c, mask_vol), fn, vol_meta)
        if simulation.has_key('prls'):
            prls_c = simulation['prls'][ic]
            fn = add_prefix(
                op.join(output_dir, 'prls_%s.nii' % c_name), prefix)
            write_volume(expand_array_in_mask(prls_c, mask_vol), fn, vol_meta)

        if simulation.has_key('neural_efficacies'):
            ne_c = simulation['neural_efficacies'][ic]
            fn = add_prefix(op.join(output_dir, 'neural_efficacies_%s.nii'
                                    % c_name), prefix)
            write_volume(expand_array_in_mask(ne_c, mask_vol), fn, vol_meta)

    fn_labels_and_mask = add_prefix(op.join(output_dir, 'mask_and_labels.nii'),
                                    prefix)

    write_volume(expand_array_in_mask(labels_and_mask, mask_vol).astype(int),
                 fn_labels_and_mask, vol_meta)

    if simulation.has_key('bold_full_vol') or simulation.has_key('bold'):
        fn = add_prefix(op.join(output_dir, 'bold.nii'), prefix)
        if simulation.has_key('bold_full_vol'):
            bold4D = simulation['bold_full_vol']
        else:
            bold = simulation['bold']
            bold4D = expand_array_in_mask(bold, mask_vol, flat_axis=1)

        write_volume(np.rollaxis(bold4D, 0, 4), fn, vol_meta)

    def save_time_series(k):
        if simulation.has_key(k):
            fn_stim_induced = add_prefix(
                op.join(output_dir, k + '.nii'), prefix)
            logger.info('%s flat shape %s', k, str(simulation[k].shape))
            vol = expand_array_in_mask(simulation[k], mask_vol, flat_axis=1)
            write_volume(np.rollaxis(vol, 0, 4), fn_stim_induced)
    save_time_series('flow_induction')
    save_time_series('cbv')
    save_time_series('hbr')
    save_time_series('bold_stim_induced_rescaled')

    if simulation.has_key('asl'):
        fn = add_prefix(op.join(output_dir, 'asl.nii'), prefix)
        asl4D = expand_array_in_mask(simulation['asl'], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(asl4D, 0, 4), fn, vol_meta)

    if simulation.has_key('outliers'):
        fn = add_prefix(op.join(output_dir, 'outliers.nii'), prefix)
        outliers = expand_array_in_mask(simulation['outliers'], mask_vol,
                                        flat_axis=1)

        write_volume(np.rollaxis(outliers, 0, 4), fn, vol_meta)

    if simulation.has_key('hrf_group'):
        hrfgroup = simulation['hrf_group']
        nb_vox = mask_vol.size
        fn_hrf = add_prefix(op.join(output_dir, 'hrf_group.nii'), prefix)
        logger.info('hrf group shape %s', str(simulation['hrf_group'].shape))
        hrfGd = duplicate_hrf(nb_vox, hrfgroup)
        hrfs_vol = expand_array_in_mask(hrfGd, mask_vol, flat_axis=1)
        dt = simulation['dt']
        chrfs = xndarray(hrfs_vol, axes_names=['time', ] + MRI3Daxes,
                         axes_domains={'time': np.arange(hrfs_vol.shape[0]) * dt})
        chrfs.save(fn_hrf, vol_meta)

    if bold_3D_vols_dir is not None:
        assert op.exists(bold_3D_vols_dir)
        for iscan, bscan in enumerate(bold4D):
            fnout = add_prefix('bold_%06d.nii' % (iscan), prefix)
            write_volume(bscan, op.join(bold_3D_vols_dir, fnout), vol_meta)
예제 #19
0
def simulation_save_vol_outputs(simulation,
                                output_dir,
                                bold_3D_vols_dir=None,
                                simulation_graph_output=None,
                                prefix=None,
                                vol_meta=None):
    """ simulation_graph_output : None, 'simple', 'thumbnails' #TODO
    """

    if simulation.has_key('paradigm'):
        fn = add_prefix(op.join(output_dir, 'paradigm.csv'), prefix)
        simulation['paradigm'].save_csv(fn)

    # Save all volumes in nifti format:
    if simulation.has_key('labels_vol'):
        mask_vol = np.ones_like(simulation['labels_vol'][0])
    elif simulation.has_key('mask'):
        mask_vol = simulation.get('mask', None)
    elif simulation.has_key('labels'):
        mask_vol = np.ones_like(simulation['labels'][0])
    else:
        raise Exception('Dunno where to get mask')

    logger.info('Vol mask of shape %s', str(mask_vol.shape))

    fn_mask = add_prefix(op.join(output_dir, 'mask.nii'), prefix)
    write_volume(mask_vol.astype(np.int32), fn_mask, vol_meta)

    if simulation.has_key('hrf_territories'):
        fn_h_territories = add_prefix(
            op.join(output_dir, 'hrf_territories.nii'), prefix)

        ht = expand_array_in_mask(simulation['hrf_territories'] + 1, mask_vol)
        write_volume(ht, fn_h_territories, vol_meta)

    if simulation.has_key('hrf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_hrf = add_prefix(op.join(output_dir, 'hrf.nii'), prefix)
        logger.info('hrf flat shape %s', str(simulation['hrf'].shape))
        if simulation['hrf'].ndim == 1:
            hrf = (np.ones(mask_vol.size) * simulation['hrf'][:, np.newaxis])
        else:
            hrf = simulation['hrf']

        hrfs_vol = expand_array_in_mask(hrf, mask_vol, flat_axis=1)
        dt = simulation['dt']
        chrfs = xndarray(
            hrfs_vol,
            axes_names=[
                'time',
            ] + MRI3Daxes,
            axes_domains={'time': np.arange(hrfs_vol.shape[0]) * dt})
        chrfs.save(fn_hrf, vol_meta)

        ttp_vol = hrfs_vol.argmax(0)
        fn_ttp = add_prefix(op.join(output_dir, 'ttp.nii'), prefix)
        write_volume(ttp_vol, fn_ttp, vol_meta)

    if simulation.has_key('brf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_brf = add_prefix(op.join(output_dir, 'brf.nii'), prefix)
        logger.info('brf flat shape %s', str(simulation['brf'].shape))
        brfs_vol = expand_array_in_mask(simulation['brf'],
                                        mask_vol,
                                        flat_axis=1)
        dt = simulation['dt']
        cbrfs = xndarray(
            brfs_vol,
            axes_names=[
                'time',
            ] + MRI3Daxes,
            axes_domains={'time': np.arange(brfs_vol.shape[0]) * dt})
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key('prf'):
        from pyhrf.ndarray import MRI3Daxes
        fn_brf = add_prefix(op.join(output_dir, 'prf.nii'), prefix)
        logger.info('prf flat shape %s', str(simulation['prf'].shape))
        brfs_vol = expand_array_in_mask(simulation['prf'],
                                        mask_vol,
                                        flat_axis=1)
        dt = simulation['dt']
        cbrfs = xndarray(
            brfs_vol,
            axes_names=[
                'time',
            ] + MRI3Daxes,
            axes_domains={'time': np.arange(brfs_vol.shape[0]) * dt})
        cbrfs.save(fn_brf, vol_meta)

    if simulation.has_key('drift'):
        fn_drift = add_prefix(op.join(output_dir, 'drift.nii'), prefix)
        logger.info('drift flat shape %s', str(simulation['drift'].shape))
        drift_vol = expand_array_in_mask(simulation['drift'],
                                         mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key('drift_coeffs'):
        fn_drift = add_prefix(op.join(output_dir, 'drift_coeffs.nii'), prefix)
        logger.info('drift flat shape %s',
                    str(simulation['drift_coeffs'].shape))
        drift_vol = expand_array_in_mask(simulation['drift'],
                                         mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(drift_vol, 0, 4), fn_drift)

    if simulation.has_key('noise'):
        fn_noise = add_prefix(op.join(output_dir, 'noise.nii'), prefix)
        logger.info('noise flat shape %s', str(simulation['noise'].shape))
        noise_vol = expand_array_in_mask(simulation['noise'],
                                         mask_vol,
                                         flat_axis=1)
        write_volume(np.rollaxis(noise_vol, 0, 4), fn_noise, vol_meta)

        fn_noise = add_prefix(op.join(output_dir, 'noise_emp_var.nii'), prefix)
        noise_vol = expand_array_in_mask(simulation['noise'].var(0), mask_vol)
        write_volume(noise_vol, fn_noise, vol_meta)

    if simulation.has_key('noise_var'):
        fn_noise_var = add_prefix(op.join(output_dir, 'noise_var.nii'), prefix)
        logger.info('noise_var flat shape %s',
                    str(simulation['noise_var'].shape))
        noise_var_vol = expand_array_in_mask(simulation['noise_var'], mask_vol)
        write_volume(noise_var_vol, fn_noise_var, vol_meta)

    if simulation.has_key('stim_induced_signal'):
        fn_stim_induced = add_prefix(op.join(output_dir, 'stim_induced.nii'),
                                     prefix)
        logger.info('stim_induced flat shape %s',
                    str(simulation['stim_induced_signal'].shape))
        stim_induced_vol = expand_array_in_mask(
            simulation['stim_induced_signal'], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key('perf_stim_induced'):
        fn_stim_induced = add_prefix(
            op.join(output_dir, 'perf_stim_induced.nii'), prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['perf_stim_induced'].shape))
        stim_induced_vol = expand_array_in_mask(
            simulation['perf_stim_induced'], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

        fn_stim_induced = add_prefix(
            op.join(output_dir, 'perf_stim_induced_ct.nii'), prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['perf_stim_induced'].shape))

        dsf = simulation['dsf']
        perf = np.dot(simulation['ctrl_tag_mat'],
                      simulation['perf_stim_induced'][0:-1:dsf])
        stim_induced_vol = expand_array_in_mask(perf, mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    if simulation.has_key('perf_baseline'):
        fn = add_prefix(op.join(output_dir, 'perf_baseline.nii'), prefix)
        pb = np.zeros_like(simulation['bold']) + simulation['perf_baseline']
        write_volume(expand_array_in_mask(pb[0], mask_vol), fn, vol_meta)

    if simulation.has_key('bold_stim_induced'):
        fn_stim_induced = add_prefix(
            op.join(output_dir, 'bold_stim_induced.nii'), prefix)
        logger.info('asl_stim_induced flat shape %s',
                    str(simulation['bold_stim_induced'].shape))
        stim_induced_vol = expand_array_in_mask(
            simulation['bold_stim_induced'], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(stim_induced_vol, 0, 4), fn_stim_induced)

    m = np.where(mask_vol)
    labels_and_mask = mask_vol.copy()[m]

    for ic in xrange(simulation['labels'].shape[0]):
        if simulation.has_key('condition_defs'):
            c_name = simulation['condition_defs'][ic].name
        else:
            c_name = 'cond%d' % ic
        fn_labels = add_prefix(op.join(output_dir, 'labels_%s.nii' % c_name),
                               prefix)
        if simulation.has_key('labels'):
            labels_c = simulation['labels'][ic]
            labels_and_mask[np.where(labels_c)] = ic + 2
            write_volume(
                expand_array_in_mask(labels_c, mask_vol).astype(np.int32),
                fn_labels, vol_meta)
        elif simulation.has_key('labels_vol'):
            labels_c = simulation['labels_vol'][ic]
            labels_and_mask[np.where(labels_c[m])] = ic + 2
            write_volume(labels_c.astype(np.int32), fn_labels, vol_meta)

        if simulation.has_key('nrls'):
            nrls_c = simulation['nrls'][ic]
            fn = add_prefix(op.join(output_dir, 'nrls_%s.nii' % c_name),
                            prefix)
            write_volume(expand_array_in_mask(nrls_c, mask_vol), fn, vol_meta)
        if simulation.has_key('nrls_session'):
            nrls_session_c = simulation['nrls_session'][ic]
            fn = add_prefix(
                op.join(output_dir, 'nrls_session_%s.nii' % (c_name)), prefix)
            write_volume(expand_array_in_mask(nrls_session_c, mask_vol), fn,
                         vol_meta)

        if simulation.has_key('brls'):
            brls_c = simulation['brls'][ic]
            fn = add_prefix(op.join(output_dir, 'brls_%s.nii' % c_name),
                            prefix)
            write_volume(expand_array_in_mask(brls_c, mask_vol), fn, vol_meta)
        if simulation.has_key('prls'):
            prls_c = simulation['prls'][ic]
            fn = add_prefix(op.join(output_dir, 'prls_%s.nii' % c_name),
                            prefix)
            write_volume(expand_array_in_mask(prls_c, mask_vol), fn, vol_meta)

        if simulation.has_key('neural_efficacies'):
            ne_c = simulation['neural_efficacies'][ic]
            fn = add_prefix(
                op.join(output_dir, 'neural_efficacies_%s.nii' % c_name),
                prefix)
            write_volume(expand_array_in_mask(ne_c, mask_vol), fn, vol_meta)

    fn_labels_and_mask = add_prefix(op.join(output_dir, 'mask_and_labels.nii'),
                                    prefix)

    write_volume(
        expand_array_in_mask(labels_and_mask, mask_vol).astype(int),
        fn_labels_and_mask, vol_meta)

    if simulation.has_key('bold_full_vol') or simulation.has_key('bold'):
        fn = add_prefix(op.join(output_dir, 'bold.nii'), prefix)
        if simulation.has_key('bold_full_vol'):
            bold4D = simulation['bold_full_vol']
        else:
            bold = simulation['bold']
            bold4D = expand_array_in_mask(bold, mask_vol, flat_axis=1)

        write_volume(np.rollaxis(bold4D, 0, 4), fn, vol_meta)

    def save_time_series(k):
        if simulation.has_key(k):
            fn_stim_induced = add_prefix(op.join(output_dir, k + '.nii'),
                                         prefix)
            logger.info('%s flat shape %s', k, str(simulation[k].shape))
            vol = expand_array_in_mask(simulation[k], mask_vol, flat_axis=1)
            write_volume(np.rollaxis(vol, 0, 4), fn_stim_induced)

    save_time_series('flow_induction')
    save_time_series('cbv')
    save_time_series('hbr')
    save_time_series('bold_stim_induced_rescaled')

    if simulation.has_key('asl'):
        fn = add_prefix(op.join(output_dir, 'asl.nii'), prefix)
        asl4D = expand_array_in_mask(simulation['asl'], mask_vol, flat_axis=1)
        write_volume(np.rollaxis(asl4D, 0, 4), fn, vol_meta)

    if simulation.has_key('outliers'):
        fn = add_prefix(op.join(output_dir, 'outliers.nii'), prefix)
        outliers = expand_array_in_mask(simulation['outliers'],
                                        mask_vol,
                                        flat_axis=1)

        write_volume(np.rollaxis(outliers, 0, 4), fn, vol_meta)

    if simulation.has_key('hrf_group'):
        hrfgroup = simulation['hrf_group']
        nb_vox = mask_vol.size
        fn_hrf = add_prefix(op.join(output_dir, 'hrf_group.nii'), prefix)
        logger.info('hrf group shape %s', str(simulation['hrf_group'].shape))
        hrfGd = duplicate_hrf(nb_vox, hrfgroup)
        hrfs_vol = expand_array_in_mask(hrfGd, mask_vol, flat_axis=1)
        dt = simulation['dt']
        chrfs = xndarray(
            hrfs_vol,
            axes_names=[
                'time',
            ] + MRI3Daxes,
            axes_domains={'time': np.arange(hrfs_vol.shape[0]) * dt})
        chrfs.save(fn_hrf, vol_meta)

    if bold_3D_vols_dir is not None:
        assert op.exists(bold_3D_vols_dir)
        for iscan, bscan in enumerate(bold4D):
            fnout = add_prefix('bold_%06d.nii' % (iscan), prefix)
            write_volume(bscan, op.join(bold_3D_vols_dir, fnout), vol_meta)
예제 #20
0
파일: glm.py 프로젝트: thomas-vincent/pyhrf
def glm_nipy_from_files(bold_file, tr,  paradigm_csv_file, output_dir,
                        mask_file, session=0, contrasts=None,
                        con_test_baseline=0.0,
                        hrf_model='Canonical',
                        drift_model='Cosine', hfcut=128,
                        residuals_model='spherical', fit_method='ols',
                        fir_delays=[0]):
    """
    #TODO: handle surface data
    hrf_model : Canonical | Canonical with Derivative | FIR

    """

    fdata = FmriData.from_vol_files(mask_file, paradigm_csv_file,
                                    [bold_file], tr)
    g, dm, cons = glm_nipy(fdata, contrasts=contrasts, hrf_model=hrf_model,
                            hfcut=hfcut, drift_model=drift_model,
                            residuals_model=residuals_model,
                            fit_method=fit_method, fir_delays=fir_delays)

    ns, nr = dm.matrix.shape
    cdesign_matrix = xndarray(dm.matrix, axes_names=['time','regressor'],
                            axes_domains={'time':np.arange(ns)*tr,
                                          'regressor':dm.names})

    cdesign_matrix.save(op.join(output_dir, 'design_matrix.nii'))

    beta_files = []
    beta_values = dict.fromkeys(dm.names)
    beta_vars = dict.fromkeys(dm.names)
    beta_vars_voxels = dict.fromkeys(dm.names)
    for ib, bname in enumerate(dm.names):
        #beta values
        beta_vol = expand_array_in_mask(g.beta[ib], fdata.roiMask>0)
        beta_fn = op.join(output_dir, 'beta_%s.nii' %bname)
        write_volume(beta_vol, beta_fn, fdata.meta_obj)
        beta_files.append(beta_fn)
        beta_values[bname] = beta_vol

        #normalized variance of betas
        beta_vars[bname] = sp.diag(g.nvbeta)[ib] #variance: diag of cov matrix
        #sig2 = g.s2 #ResMS
        var_cond = sp.diag(g.nvbeta)[ib]*g.s2 #variance for all voxels, condition ib
        beta_vars_voxels[bname] = var_cond
        #beta_var_fn = op.join(output_dir, 'var_beta_%s.nii' %bname)
        #write_volume(beta_var, beta_var_fn, fdata.meta_obj)
        #beta_var_files.append(beta_var_fn)

    if cons is not None:
        con_files = []
        pval_files = []
        for cname, con in cons.iteritems():
            con_vol = expand_array_in_mask(con.effect, fdata.roiMask>0)
            con_fn = op.join(output_dir, 'con_effect_%s.nii' %cname)
            write_volume(con_vol, con_fn, fdata.meta_obj)
            con_files.append(con_fn)

            pval_vol = expand_array_in_mask(con.pvalue(con_test_baseline),
                                            fdata.roiMask>0)
            pval_fn = op.join(output_dir, 'con_pvalue_%s.nii' %cname)
            write_volume(pval_vol, pval_fn, fdata.meta_obj)
            pval_files.append(pval_fn)
    else:
        con_files = None
        pval_files = None

    dof = g.dof
    #if do_ppm:
        #for

    #TODO: FIR stuffs
    return beta_files, beta_values, beta_vars_voxels, dof#, con_files, pval_files
예제 #21
0
파일: glm.py 프로젝트: Solvi/pyhrf
    # ax.set_title('Design matrix')
    # plt.savefig(op.join(output_dir, 'design_matrix.png'))
    # design_matrix.save(...)

    # GLM fit
    my_glm = nipy.labs.glm.glm()
    pyhrf.verbose(1, 'Fit GLM - method: %s, residual model: %s' \
                      %(fit_method,residuals_model))
    glm = my_glm.fit(Y.T, design_matrix.matrix, method=fit_method,
                     model=residuals_model)

    # Beta outputs
    beta_files = []
    affine = fmri_image.get_affine()
    for ib, bname in enumerate(design_matrix.names):
        beta_vol = expand_array_in_mask(my_glm.beta[ib], mask_array)
        beta_image = Nifti1Image(beta_vol, affine)
        beta_file = op.join(output_dir, 'beta_%s.nii' %bname)
        save(beta_image, beta_file)
        beta_files.append(beta_file)


    from nipy.modalities.fmri.hemodynamic_models import _regressor_names
    from pyhrf.ndarray import MRI3Daxes
    if hrf_model == 'FIR':
        drnames = design_matrix.names
        nvox = mask_array.sum()
        print 'nvox:', nvox
        for cn in set(paradigm.con_id):
            fir_rnames = _regressor_names(cn, 'FIR', fir_delays)
            #lfir = len(fir_rnames)
예제 #22
0
def parcellation_for_jde(fmri_data,
                         avg_parcel_size=250,
                         output_dir=None,
                         method='gkm',
                         glm_drift='Cosine',
                         glm_hfcut=128):
    """
    method: gkm, ward, ward_and_gkm
    """

    if output_dir is None:
        output_dir = tempfile.mkdtemp(prefix='pyhrf_JDE_parcellation_GLM',
                                      dir=pyhrf.cfg['global']['tmp_path'])
    glm_output_dir = op.join(output_dir, 'GLM_for_parcellation')
    if not op.exists(glm_output_dir):
        os.makedirs(glm_output_dir)

    logger.info('GLM for parcellation')

    g, dm, cons = glm_nipy(fmri_data, drift_model=glm_drift, hfcut=glm_hfcut)

    pval_files = []
    if cons is not None:
        func_data = [('con_pval_%s' % cname, con.pvalue())
                     for cname, con in cons.iteritems()]
    else:
        reg_cst_drift = re.compile(".*constant.*|.*drift.*")
        func_data = [('beta_%s' % reg_name, g.beta[ir])
                     for ir, reg_name in enumerate(dm.names)
                     if not reg_cst_drift.match(reg_name)]

    for name, data in func_data:
        val_vol = expand_array_in_mask(data, fmri_data.roiMask > 0)
        val_fn = op.join(glm_output_dir, '%s.nii' % name)
        write_volume(val_vol, val_fn, fmri_data.meta_obj)
        pval_files.append(val_fn)

    mask_file = op.join(glm_output_dir, 'mask.nii')
    write_volume(fmri_data.roiMask > 0, mask_file, fmri_data.meta_obj)

    nvox = fmri_data.get_nb_vox_in_mask()
    nparcels = round_nb_parcels(nvox * 1. / avg_parcel_size)

    logger.info('Parcellation from GLM outputs, method: %s, nb parcels: %d',
                method, nparcels)

    if fmri_data.data_type == 'volume':
        parcellation_file = op.join(
            output_dir, 'parcellation_%s_np%d.nii' % (method, nparcels))

        make_parcellation_from_files(pval_files, mask_file, parcellation_file,
                                     nparcels, method)
        parcellation, _ = read_volume(parcellation_file)
    else:
        mesh_file = fmri_data.data_files[-1]
        parcellation_file = op.join(
            output_dir, 'parcellation_%s_np%d.gii' % (method, nparcels))
        make_parcellation_surf_from_files(pval_files,
                                          mesh_file,
                                          parcellation_file,
                                          nparcels,
                                          method,
                                          verbose=1)
        parcellation, _ = read_texture(parcellation_file)

    logger.info(parcellation_report(parcellation))

    return parcellation, parcellation_file
예제 #23
0
def glm_nipy_from_files(bold_file,
                        tr,
                        paradigm_csv_file,
                        output_dir,
                        mask_file,
                        session=0,
                        contrasts=None,
                        con_test_baseline=0.0,
                        hrf_model='Canonical',
                        drift_model='Cosine',
                        hfcut=128,
                        residuals_model='spherical',
                        fit_method='ols',
                        fir_delays=[0]):
    """
    #TODO: handle surface data
    hrf_model : Canonical | Canonical with Derivative | FIR

    """

    fdata = FmriData.from_vol_files(mask_file, paradigm_csv_file, [bold_file],
                                    tr)
    g, dm, cons = glm_nipy(fdata,
                           contrasts=contrasts,
                           hrf_model=hrf_model,
                           hfcut=hfcut,
                           drift_model=drift_model,
                           residuals_model=residuals_model,
                           fit_method=fit_method,
                           fir_delays=fir_delays)

    ns, nr = dm.matrix.shape
    cdesign_matrix = xndarray(dm.matrix,
                              axes_names=['time', 'regressor'],
                              axes_domains={
                                  'time': np.arange(ns) * tr,
                                  'regressor': dm.names
                              })

    cdesign_matrix.save(op.join(output_dir, 'design_matrix.nii'))

    beta_files = []
    beta_values = dict.fromkeys(dm.names)
    beta_vars = dict.fromkeys(dm.names)
    beta_vars_voxels = dict.fromkeys(dm.names)
    for ib, bname in enumerate(dm.names):
        # beta values
        beta_vol = expand_array_in_mask(g.beta[ib], fdata.roiMask > 0)
        beta_fn = op.join(output_dir, 'beta_%s.nii' % bname)
        write_volume(beta_vol, beta_fn, fdata.meta_obj)
        beta_files.append(beta_fn)
        beta_values[bname] = beta_vol

        # normalized variance of betas
        # variance: diag of cov matrix
        beta_vars[bname] = sp.diag(g.nvbeta)[ib]
        # sig2 = g.s2 #ResMS
        # variance for all voxels, condition ib
        var_cond = sp.diag(g.nvbeta)[ib] * g.s2
        beta_vars_voxels[bname] = var_cond
        #beta_var_fn = op.join(output_dir, 'var_beta_%s.nii' %bname)
        #write_volume(beta_var, beta_var_fn, fdata.meta_obj)
        # beta_var_files.append(beta_var_fn)

    if cons is not None:
        con_files = []
        pval_files = []
        for cname, con in cons.iteritems():
            con_vol = expand_array_in_mask(con.effect, fdata.roiMask > 0)
            con_fn = op.join(output_dir, 'con_effect_%s.nii' % cname)
            write_volume(con_vol, con_fn, fdata.meta_obj)
            con_files.append(con_fn)

            pval_vol = expand_array_in_mask(con.pvalue(con_test_baseline),
                                            fdata.roiMask > 0)
            pval_fn = op.join(output_dir, 'con_pvalue_%s.nii' % cname)
            write_volume(pval_vol, pval_fn, fdata.meta_obj)
            pval_files.append(pval_fn)
    else:
        con_files = None
        pval_files = None

    dof = g.dof
    # if do_ppm:
    # for

    # TODO: FIR stuffs
    # , con_files, pval_files
    return beta_files, beta_values, beta_vars_voxels, dof