Beispiel #1
0
    def glm_matlab_from_files(bold_file, tr, paradigm_csv_file, output_dir,
                              mask_file, hf_cut=128, hack_mask=False):
        """
        Only mono-session
        #TODO: compute mask if mask_file does not exist
        #TODO: handle contrasts
        """

        # Functional mask
        # if not op.exists(mask_file):
        #     pyhrf.verbose(1, 'Mask file does not exist. Computing mask from '\
        #                       'BOLD data')
        #     compute_mask_files(bold_files, mask_file, False, 0.4, 0.9)


        #split BOLD into 3D vols:
        bold, hbold = read_volume(bold_file)
        bold_files = []
        tmp_path = tempfile.mkdtemp(dir=pyhrf.cfg['global']['tmp_path'])
        for iscan, bscan in enumerate(bold):
            f = op.join(tmp_path, 'bold_%06d.nii' %iscan)
            write_volume(bscan, f, hbold)
            bold_files.append(f)
        bold_files = ';'.join(bold_files)

        script_path = op.join(op.dirname(pyhrf.__file__),'../../script/SPM')
        spm_path = pyhrf.cfg['global']['spm_path']
        matlab_code = "cd %s;paradigm_file='%s';TR=%f;mask_file='%s';" \
            "bold_files='%s';output_path='%s';" \
            "HF_cut=%f;spm_path='%s';api=1;hack_mask=%d;glm_intra_subj;exit" \
            %(script_path,paradigm_csv_file,tr,mask_file,bold_files,output_dir,
              hf_cut,spm_path,hack_mask)

        matlab_cmd = 'matlab -nosplash -nodesktop -r "%s"'%matlab_code

        if op.exists(op.join(output_dir,'SPM.mat')):
            #remove SPM.mat so that SPM won't ask over ask overwriting
            os.remove(op.join(output_dir,'SPM.mat'))

        #print 'matlab cmd:'
        #print matlab_cmd
        os.system(matlab_cmd)

        # Fix shape of outputs if necessary
        # eg if input data has shape (n,m,1) then SPM will write outputs of
        # shape (n,m) so that they are not consistent with their QForm
        input_shape = bscan.shape
        for foutput in glob.glob(op.join(output_dir, '*.img')):
            data, h = read_volume(foutput)
            if data.ndim < 3:
                sm = ','.join([ [':','np.newaxis'][d==1] \
                                    for d in input_shape ] )
                exec('data = data[%s]' %sm)
                assert data.shape == input_shape
                write_volume(data, foutput, h)

        shutil.rmtree(tmp_path)
        #TODO: maybe find a better way to grab beta file names
        beta_files = sorted(glob.glob(op.join(output_dir,'beta_*.img')))
        return beta_files
Beispiel #2
0
    def glm_matlab_from_files(bold_file, tr, paradigm_csv_file, output_dir,
                              mask_file, hf_cut=128, hack_mask=False):
        """
        Only mono-session
        #TODO: compute mask if mask_file does not exist
        #TODO: handle contrasts
        """

        # Functional mask
        # if not op.exists(mask_file):
        #     logger.info('Mask file does not exist. Computing mask from '
        #                 'BOLD data')
        #     compute_mask_files(bold_files, mask_file, False, 0.4, 0.9)

        # split BOLD into 3D vols:
        bold, hbold = read_volume(bold_file)
        bold_files = []
        tmp_path = tempfile.mkdtemp(dir=pyhrf.cfg['global']['tmp_path'])
        for iscan, bscan in enumerate(bold):
            f = op.join(tmp_path, 'bold_%06d.nii' % iscan)
            write_volume(bscan, f, hbold)
            bold_files.append(f)
        bold_files = ';'.join(bold_files)

        script_path = op.join(op.dirname(pyhrf.__file__), '../../script/SPM')
        spm_path = pyhrf.cfg['global']['spm_path']
        matlab_code = "cd %s;paradigm_file='%s';TR=%f;mask_file='%s';" \
            "bold_files='%s';output_path='%s';" \
            "HF_cut=%f;spm_path='%s';api=1;hack_mask=%d;glm_intra_subj;exit" \
            % (script_path, paradigm_csv_file, tr, mask_file, bold_files, output_dir,
               hf_cut, spm_path, hack_mask)

        matlab_cmd = 'matlab -nosplash -nodesktop -r "%s"' % matlab_code

        if op.exists(op.join(output_dir, 'SPM.mat')):
            # remove SPM.mat so that SPM won't ask over ask overwriting
            os.remove(op.join(output_dir, 'SPM.mat'))

        # print 'matlab cmd:'
        # print matlab_cmd
        os.system(matlab_cmd)

        # Fix shape of outputs if necessary
        # eg if input data has shape (n,m,1) then SPM will write outputs of
        # shape (n,m) so that they are not consistent with their QForm
        input_shape = bscan.shape
        for foutput in glob.glob(op.join(output_dir, '*.img')):
            data, h = read_volume(foutput)
            if data.ndim < 3:
                sm = ','.join([[':', 'np.newaxis'][d == 1]
                               for d in input_shape])
                exec('data = data[%s]' % sm)
                assert data.shape == input_shape
                write_volume(data, foutput, h)

        shutil.rmtree(tmp_path)
        # TODO: maybe find a better way to grab beta file names
        beta_files = sorted(glob.glob(op.join(output_dir, 'beta_*.img')))
        return beta_files
Beispiel #3
0
def split_big_parcels(parcel_file, output_file, max_size=400):
    print 'split_big_parcels ...'
    roiMask, roiHeader = read_volume(parcel_file)
    roiIds = np.unique(roiMask)
    background = roiIds.min()
    labels = roiMask[np.where(roiMask > background)].astype(int)
    if (np.bincount(labels) <= max_size).all():
        logger.info('no parcel to split')
        return

    graphs = parcels_to_graphs(roiMask, kerMask3D_6n)
    for roiId in roiIds:
        if roiId != background:
            roi_size = (roiMask == roiId).sum()
            if roi_size > max_size:
                print 'roi %d, size = %d' % (roiId, roi_size)
                nparcels = int(np.ceil(roi_size * 1. / max_size))
                print 'split into %d parcels ...' % (nparcels)
                split_parcel(labels,
                             graphs,
                             roiId,
                             nparcels,
                             inplace=True,
                             verbosity=1)

    final_roi_mask = np.zeros_like(roiMask)
    final_roi_mask[np.where(roiMask > background)] = labels
    assert (np.bincount(labels) <= max_size).all()
    write_volume(final_roi_mask, output_file, roiHeader)
Beispiel #4
0
def split_big_parcels(parcel_file, output_file, max_size=400):
    print 'split_big_parcels ...'
    roiMask, roiHeader = read_volume(parcel_file)
    roiIds = np.unique(roiMask)
    background = roiIds.min()
    labels = roiMask[np.where(roiMask>background)].astype(int)
    if (np.bincount(labels) <= max_size).all():
        pyhrf.verbose(1, 'no parcel to split')
        return

    graphs = parcels_to_graphs(roiMask, kerMask3D_6n)
    for roiId in roiIds:
        if roiId != background:
            roi_size = (roiMask==roiId).sum()
            if roi_size > max_size:
                print 'roi %d, size = %d' %(roiId, roi_size)
                nparcels = int(np.ceil(roi_size*1./max_size))
                print 'split into %d parcels ...' %(nparcels)
                split_parcel(labels, graphs, roiId, nparcels, inplace=True,
                             verbosity=1)

    final_roi_mask = np.zeros_like(roiMask)
    final_roi_mask[np.where(roiMask>background)] = labels
    #print np.bincount(labels)
    assert (np.bincount(labels) <= max_size).all()
    write_volume(final_roi_mask, output_file, roiHeader)
Beispiel #5
0
def load_many_hrf_territories(nb_hrf_territories):
    from pyhrf.tools._io import read_volume
    fn = pyhrf.get_data_file_name('simu_hrf_%d_territories.nii' %
                                  nb_hrf_territories)
    assert op.exists(fn)
    territories = read_volume(fn)[0]
    return territories[np.where(np.ones_like(territories))]
Beispiel #6
0
def load_many_hrf_territories(nb_hrf_territories):
    from pyhrf.tools._io import read_volume

    fn = pyhrf.get_data_file_name("simu_hrf_%d_territories.nii" % nb_hrf_territories)
    assert op.exists(fn)
    territories = read_volume(fn)[0]
    return territories[np.where(np.ones_like(territories))]
Beispiel #7
0
    def test_split4DVol(self):
        s = 'subj0_bold_session0.nii.gz'
        bfn = pyhrf.get_data_file_name(s)
        bold_files = pio.split4DVol(bfn, output_dir=self.tmp_dir)
        i, meta = pio.read_volume(bold_files[0])

        for bf in bold_files:
            os.remove(bf)
Beispiel #8
0
    def test_split4DVol(self):
        s = 'subj0_bold_session0.nii.gz'
        bfn = pyhrf.get_data_file_name(s)
        bold_files = pio.split4DVol(bfn, output_dir=self.tmp_dir)
        i, meta = pio.read_volume(bold_files[0])

        for bf in bold_files:
            os.remove(bf)
Beispiel #9
0
    def from_simulation_dict(self, simulation, mask=None):
        from pyhrf.tools._io import read_volume

        bold = simulation['bold']

        if isinstance(bold, xndarray):
            bold = bold.reorient(['time', 'voxel'])
            nvox = bold.data.shape[1]
            ss = [range(bold.data.shape[0])]  # one session
            print 'BOLD SHAPE=', bold.data.shape
        else:
            nvox = bold.shape[1]
            ss = [range(bold.shape[0])]  # one session

        onsets = simulation['paradigm'].stimOnsets
        # onsets = dict(zip(ons.keys(),ons.values()]))
        durations = simulation['paradigm'].stimDurations
        #durations = dict(zip(dur.keys(),[o for o in dur.values()]))

        # print ''
        # print 'onsets:'
        # print onsets
        # print ''
        tr = simulation['tr']

        s = simulation
        labelsVol = s.get('labels_vol', np.ones((
            1,
            nvox,
        )))

        if mask is None:
            if isinstance(labelsVol, xndarray):
                default_mshape = labelsVol.data.shape[1:]
            else:
                default_mshape = labelsVol.shape[1:]
                # print 'default_mask_shape:', default_mshape
                roiMask = simulation.get(
                    'mask', np.ones(default_mshape, dtype=np.int32))
        else:
            roiMask = read_volume(mask)[0]

        # print 'roiMask:', roiMask.shape
        if len(roiMask.shape) == 3:
            data_type = 'volume'
        else:
            data_type = 'surface'  # simulation ??

        return FmriData(onsets,
                        bold,
                        tr,
                        ss,
                        roiMask,
                        stimDurations=durations,
                        simulation=[simulation],
                        data_type=data_type)
Beispiel #10
0
    def test_split_vol_cc_3D(self):

        fn = 'subj0_parcellation.nii.gz'
        mask_file = pyhrf.get_data_file_name(fn)
        mask = read_volume(mask_file)[0].astype(int)
        mask[np.where(mask == 2)] = 1
        km = kerMask3D_6n
        for i, mcc in enumerate(split_mask_into_cc_iter(mask, kerMask=km)):
            pass
        assert i == 1  # 2 rois
Beispiel #11
0
    def test_split_vol_cc_3D(self):

        fn = 'subj0_parcellation.nii.gz'
        mask_file = pyhrf.get_data_file_name(fn)
        mask = read_volume(mask_file)[0].astype(int)
        mask[np.where(mask == 2)] = 1
        km = kerMask3D_6n
        for i, mcc in enumerate(split_mask_into_cc_iter(mask, kerMask=km)):
            pass
        assert i == 1  # 2 rois
Beispiel #12
0
    def from_simulation_dict(self, simulation, mask=None):
        from pyhrf.tools._io import read_volume

        bold = simulation['bold']

        if isinstance(bold, xndarray):
            bold = bold.reorient(['time', 'voxel'])
            nvox = bold.data.shape[1]
            ss = [range(bold.data.shape[0])]  # one session
            print 'BOLD SHAPE=', bold.data.shape
        else:
            nvox = bold.shape[1]
            ss = [range(bold.shape[0])]  # one session

        onsets = simulation['paradigm'].stimOnsets
        # onsets = dict(zip(ons.keys(),ons.values()]))
        durations = simulation['paradigm'].stimDurations
        #durations = dict(zip(dur.keys(),[o for o in dur.values()]))

        # print ''
        # print 'onsets:'
        # print onsets
        # print ''
        tr = simulation['tr']

        s = simulation
        labelsVol = s.get('labels_vol', np.ones((1, nvox,)))

        if mask is None:
            if isinstance(labelsVol, xndarray):
                default_mshape = labelsVol.data.shape[1:]
            else:
                default_mshape = labelsVol.shape[1:]
                # print 'default_mask_shape:', default_mshape
                roiMask = simulation.get('mask', np.ones(default_mshape,
                                                         dtype=np.int32))
        else:
            roiMask = read_volume(mask)[0]

        # print 'roiMask:', roiMask.shape
        if len(roiMask.shape) == 3:
            data_type = 'volume'
        else:
            data_type = 'surface'  # simulation ??

        return FmriData(onsets, bold, tr, ss, roiMask, stimDurations=durations,
                        simulation=[simulation], data_type=data_type)
Beispiel #13
0
    def test_split4DVol(self):
        s = 'subj0_bold_session0.nii.gz'
        bfn = pyhrf.get_data_file_name(s)
        bold_files = pio.split4DVol(bfn, output_dir=self.tmp_dir)
        #print bold_files
        i,meta = pio.read_volume(bold_files[0])

        if 0:
            from pprint import pprint
            affine, header = meta
            print ''
            print 'one vol shape:'
            print i.shape
            print 'header:'
            pprint(dict(header))

        for bf in bold_files:
            os.remove(bf)
Beispiel #14
0
        def handle_mask(self, mask_file, bold_files, tr, onsets, durations, output_dir, mesh_file=None):

            if mesh_file is None:  # Volumic
                if self.force_input_parcellation:
                    if not op.exists(mask_file):
                        raise IOError("Input parcellation is forced but " "mask file %s not found" % mask_file)
                    else:
                        # TODO: check if n-ary
                        return mask_file

                FMRIAnalyser.handle_mask(self, mask_file, bold_files, onsets, durations, mesh_file)

                mask, mask_obj = read_volume(mask_file)
                roi_ids = np.unique(mask)
                if len(roi_ids) <= 2:
                    glm_output_dir = op.join(output_dir, "GLM")
                    if not op.exists(glm_output_dir):
                        os.makedirs(glm_output_dir)
                    return glm_parcellation(bold_file, tr)
Beispiel #15
0
    def test_process_history_extension(self):
        nii_fn = pyhrf.get_data_file_name(
            'real_data_vol_4_regions_mask.nii.gz')

        nii_fn_out = op.join(self.tmp_dir, 'proc_ext_test.nii')
        input_pname = 'dummy_proc_test'
        input_pparams = {'my_param': 5.5, 'input_file': '/home/blh'}

        pio.append_process_info(nii_fn, input_pname, input_pparams,
                                img_output_fn=nii_fn_out)

        i2, (aff, header) = pio.read_volume(nii_fn_out)

        reloaded_pinfo = pio.get_process_info(nii_fn_out)
        self.assertNotEqual(reloaded_pinfo, None)
        self.assertEqual(reloaded_pinfo[0]['process_name'], input_pname)
        self.assertEqual(reloaded_pinfo[0]['process_inputs'], input_pparams)
        self.assertEqual(reloaded_pinfo[0]['process_version'], None)
        self.assertEqual(reloaded_pinfo[0]['process_id'], None)
def compute_consensus_clusters_parallel(K_clus, consensus_matrices, clustcount_matrices, \
        totalcount_matrices, num_voxels, remote_mask_fn, clusters_consensi):
    '''
    '''
    import nisl.io as ionisl
    import os
    import sys
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_BB/Parcellations/")
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_Adultes_Solv/")
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_Adultes_Solv/Scripts_divers_utiles/Scripts_utiles/")
    sys.path.append('/home/pc174679/local/installations/consensus-cluster-0.6')
    
    from Random_parcellations import random_parcellations, subsample_data_on_time
    from Divers_parcellations_test import *
    from pyhrf.tools._io import read_volume
    
    print '  * Consensus with ', K_clus, ' clusters' 
    c_mat = np.array(clustcount_matrices[int(K_clus)])
    t_mat = np.array(totalcount_matrices[int(K_clus)])
    consensus_mat = gen_consensus_matrix(num_voxels, 0, c_mat, t_mat)
    
    parc_name = 'Subsampled_data_with_' + str(K_clus) + 'clusters' 
    output_sub = os.sep.join((output_path, parc_name)) 
    
    nifti_masker = ionisl.NiftiMasker(remote_mask_fn)
    mask_shape=read_volume(remote_mask_fn)[0].shape
    Nm = nifti_masker.fit(remote_mask_fn)
    #Nm = nifti_masker.fit(np.ones((mask_shape)))
 
    #clusterize the consensus matrix
    if clusterize_cons_mat:
        labels_cons_mat = hcluster_consensus(consensus_mat, num_voxels, K_clus, linkage='average')
        labels_cons_mat_inv = Nm.inverse_transform(labels_cons_mat).get_data()
      
    clusters_consensi[int(K_clus)] = np.zeros((K_clus))
    
    #compute cluster consensus, for each clustering
    clusters_consensi[int(K_clus)] = compute_cc_mat(K_clus, consensus_mat, labels_cons_mat, num_voxels)
            
            
    return clusters_consensi.astype('int32'), consensus_mat.astype('int32'), labels_cons_mat.astype('int16'), labels_cons_mat_inv.astype('int16')
Beispiel #17
0
    def test_process_history_extension(self):
        nii_fn = pyhrf.get_data_file_name(
            'real_data_vol_4_regions_mask.nii.gz')

        nii_fn_out = op.join(self.tmp_dir, 'proc_ext_test.nii')
        input_pname = 'dummy_proc_test'
        input_pparams = {'my_param': 5.5, 'input_file': '/home/blh'}

        pio.append_process_info(nii_fn,
                                input_pname,
                                input_pparams,
                                img_output_fn=nii_fn_out)

        i2, (aff, header) = pio.read_volume(nii_fn_out)

        reloaded_pinfo = pio.get_process_info(nii_fn_out)
        self.assertNotEqual(reloaded_pinfo, None)
        self.assertEqual(reloaded_pinfo[0]['process_name'], input_pname)
        self.assertEqual(reloaded_pinfo[0]['process_inputs'], input_pparams)
        self.assertEqual(reloaded_pinfo[0]['process_version'], None)
        self.assertEqual(reloaded_pinfo[0]['process_id'], None)
Beispiel #18
0
        def handle_mask(self, mask_file, bold_files, tr,
                        onsets, durations, output_dir, mesh_file=None):

            if mesh_file is None:  # Volumic
                if self.force_input_parcellation:
                    if not op.exists(mask_file):
                        raise IOError("Input parcellation is forced but "
                                      "mask file %s not found" % mask_file)
                    else:
                        # TODO: check if n-ary
                        return mask_file

                FMRIAnalyser.handle_mask(self, mask_file, bold_files, onsets,
                                         durations, mesh_file)

                mask, mask_obj = read_volume(mask_file)
                roi_ids = np.unique(mask)
                if len(roi_ids) <= 2:
                    glm_output_dir = op.join(output_dir, 'GLM')
                    if not op.exists(glm_output_dir):
                        os.makedirs(glm_output_dir)
                    return glm_parcellation(bold_file, tr)
Beispiel #19
0
def make_parcellation_cubed_blobs_from_file(parcellation_file,
                                            output_path,
                                            roi_ids=None,
                                            bg_parcel=0,
                                            skip_existing=False):

    p, mp = read_volume(parcellation_file)
    p = p.astype(np.int32)
    if bg_parcel == 0 and p.min() == -1:
        p += 1  # set background to 0

    if roi_ids is None:
        roi_ids = np.unique(p)

    logger.info('%d rois to extract', (len(roi_ids) - 1))

    tmp_dir = pyhrf.get_tmp_path('blob_parcellation')
    tmp_parcel_mask_file = op.join(tmp_dir, 'parcel_for_blob.nii')

    out_files = []
    for roi_id in roi_ids:
        if roi_id != bg_parcel:  # discard background
            output_blob_file = op.join(output_path,
                                       'parcel_%d_cubed_blob.arg' % roi_id)
            out_files.append(output_blob_file)
            if skip_existing and os.path.exists(output_blob_file):
                continue
            parcel_mask = (p == roi_id).astype(np.int32)
            write_volume(parcel_mask, tmp_parcel_mask_file, mp)
            logger.info('Extract ROI %d -> %s', roi_id, output_blob_file)
            cmd = 'AimsGraphConvert -i %s -o %s --bucket' \
                % (tmp_parcel_mask_file, output_blob_file)
            logger.info('Cmd: %s', cmd)
            os.system(cmd)
    if op.exists(tmp_parcel_mask_file):
        os.remove(tmp_parcel_mask_file)

    return out_files
Beispiel #20
0
def make_parcellation_cubed_blobs_from_file(parcellation_file, output_path,
                                            roi_ids=None, bg_parcel=0,
                                            skip_existing=False):


    p,mp = read_volume(parcellation_file)
    p = p.astype(np.int32)
    if bg_parcel==0 and p.min() == -1:
        p += 1 #set background to 0

    if roi_ids is None:
        roi_ids = np.unique(p)

    pyhrf.verbose(1,'%d rois to extract' %(len(roi_ids)-1))

    tmp_dir = pyhrf.get_tmp_path('blob_parcellation')
    tmp_parcel_mask_file = op.join(tmp_dir, 'parcel_for_blob.nii')

    out_files = []
    for roi_id in roi_ids:
        if roi_id != bg_parcel: #discard background
            output_blob_file = op.join(output_path, 'parcel_%d_cubed_blob.arg'\
                                           %roi_id)
            out_files.append(output_blob_file)
            if skip_existing and os.path.exists(output_blob_file):
                continue
            parcel_mask = (p==roi_id).astype(np.int32)
            write_volume(parcel_mask, tmp_parcel_mask_file, mp)
            pyhrf.verbose(3,'Extract ROI %d -> %s' %(roi_id,output_blob_file))
            cmd = 'AimsGraphConvert -i %s -o %s --bucket' \
                %(tmp_parcel_mask_file, output_blob_file)
            pyhrf.verbose(3,'Cmd: %s' %(cmd))
            os.system(cmd)
    if op.exists(tmp_parcel_mask_file):
        os.remove(tmp_parcel_mask_file)

    return out_files
Beispiel #21
0
def project_fmri_from_kernels(
    input_mesh,
    kernels_file,
    fmri_data_file,
    output_tex,
    bin_threshold=None,
):

    logger.info('Project data onto mesh using kernels ...')

    if 0:
        print 'Projecting ...'
        print 'func data:', fmri_data_file
        print 'Mesh file:', input_mesh
        print 'Save as:', output_tex

    logger.info('Call AimsFunctionProjection -op 1 ...')

    data_files = []
    output_texs = []
    p_ids = None
    if bin_threshold is not None:
        d, h = read_volume(fmri_data_file)
        if np.allclose(d.astype(int), d):
            tmp_dir = pyhrf.get_tmp_path()
            p_ids = np.unique(d)
            logger.info('bin threshold: %f', bin_threshold)
            logger.info('pids(n=%d): %d...%d', len(p_ids), min(p_ids),
                        max(p_ids))
            for i, p_id in enumerate(p_ids):
                if p_id != 0:
                    new_p = np.zeros_like(d)
                    new_p[np.where(d == p_id)] = i + 1  # 0 is background
                    ifn = op.join(tmp_dir, 'pmask_%d.nii' % p_id)
                    write_volume(new_p, ifn, h)
                    data_files.append(ifn)
                    ofn = op.join(tmp_dir, 'ptex_%d.gii' % p_id)
                    output_texs.append(ofn)
        else:
            data_files.append(fmri_data_file)
            output_texs.append(output_tex)
    else:
        data_files.append(fmri_data_file)
        output_texs.append(output_tex)

    logger.info('input data files: %s', str(data_files))
    logger.info('output data files: %s', str(output_texs))

    for data_file, o_tex in zip(data_files, output_texs):
        projection = [
            'AimsFunctionProjection', '-op', '1', '-d', kernels_file, '-d1',
            data_file, '-m', input_mesh, '-o', o_tex
        ]

        cmd = ' '.join(map(str, projection))
        logger.info('cmd: %s', cmd)
        os.system(cmd)

    if bin_threshold is not None:
        logger.info('Binary threshold of texture at %f', bin_threshold)
        o_tex = output_texs[0]
        data, data_gii = read_texture(o_tex)
        data = (data > bin_threshold).astype(np.int32)
        print 'data:', data.dtype
        if p_ids is not None:
            for pid, o_tex in zip(p_ids[1:], output_texs[1:]):
                pdata, pdata_gii = read_texture(o_tex)
                data += (pdata > bin_threshold).astype(np.int32) * pid

        #assert (np.unique(data) == p_ids).all()
        write_texture(data, output_tex, intent='NIFTI_INTENT_LABEL')
Beispiel #22
0
def load_vol_bold_and_mask(bold_files, mask_file):

    from pyhrf.tools._io import read_volume, discard_bad_data

    # Handle mask
    if not op.exists(mask_file):
        logger.warning(
            'Mask file %s does not exist. Mask is '
            'computed from BOLD ...', mask_file)

        bold_file = 'subj0_bold_session0.nii.gz'
        # HACK
        if bold_files[0] == pyhrf.get_data_file_name(bold_file):
            max_frac = .99999  # be sure to keep non zero voxels
            connect_component = False  # default BOLD vol has 2 ROIs
        else:
            max_frac = .9
            connect_component = True
        compute_mask_files(bold_files[0],
                           mask_file,
                           False,
                           .4,
                           max_frac,
                           cc=connect_component)
        mask_loaded_from_file = False
    else:
        mask_loaded_from_file = True
        logger.info('Assuming orientation for mask file: ' +
                    string.join(MRI3Daxes, ','))

    logger.info('Read mask from: %s', mask_file)
    mask, mask_meta_obj = read_volume(mask_file)

    if not np.allclose(np.round(mask), mask):
        raise Exception("Mask is not n-ary (%s)" % mask_file)
    mask = np.round(mask).astype(np.int32)

    logger.info(
        'Mask has shape %s\nMask min value: %d\n'
        'Mask max value: %d\nMask has %d parcels', str(mask.shape), mask.min(),
        mask.max(), len(np.unique(mask)))

    if mask.min() == -1:
        mask += 1

    # Load BOLD:
    last_scan = 0
    session_scans = []
    bolds = []
    logger.info('Assuming orientation for BOLD files: ' +
                string.join(MRI4Daxes, ','))

    if type(bold_files[0]) is list:
        bold_files = bold_files[0]

    for bold_file in bold_files:
        if not op.exists(bold_file):
            raise Exception('File not found: ' + bold_file)

        bold, _ = read_volume(bold_file)
        bolds.append(bold)
        session_scans.append(
            np.arange(last_scan, last_scan + bold.shape[TIME_AXIS], dtype=int))
        last_scan += bold.shape[TIME_AXIS]

    bold = np.concatenate(tuple(bolds), axis=TIME_AXIS)

    logger.info('BOLD has shape %s', str(bold.shape))
    discard_bad_data(bold, mask)

    return mask, mask_meta_obj, mask_loaded_from_file, bold, session_scans
Beispiel #23
0
def load_vol_bold_and_mask(bold_files, mask_file):

    from pyhrf.tools._io import read_volume, discard_bad_data

    # Handle mask
    if not op.exists(mask_file):
        logger.warning('Mask file %s does not exist. Mask is '
                       'computed from BOLD ...', mask_file)

        bold_file = 'subj0_bold_session0.nii.gz'
        # HACK
        if bold_files[0] == pyhrf.get_data_file_name(bold_file):
            max_frac = .99999  # be sure to keep non zero voxels
            connect_component = False  # default BOLD vol has 2 ROIs
        else:
            max_frac = .9
            connect_component = True
        compute_mask_files(bold_files[0], mask_file, False, .4,
                           max_frac, cc=connect_component)
        mask_loaded_from_file = False
    else:
        mask_loaded_from_file = True
        logger.info('Assuming orientation for mask file: ' +
                    string.join(MRI3Daxes, ','))

    logger.info('Read mask from: %s', mask_file)
    mask, mask_meta_obj = read_volume(mask_file)

    if not np.allclose(np.round(mask), mask):
        raise Exception("Mask is not n-ary (%s)" % mask_file)
    mask = np.round(mask).astype(np.int32)

    logger.info('Mask has shape %s\nMask min value: %d\n'
                'Mask max value: %d\nMask has %d parcels',
                str(mask.shape), mask.min(), mask.max(), len(np.unique(mask)))

    if mask.min() == -1:
        mask += 1

    # Load BOLD:
    last_scan = 0
    session_scans = []
    bolds = []
    logger.info('Assuming orientation for BOLD files: ' +
                string.join(MRI4Daxes, ','))

    if type(bold_files[0]) is list:
        bold_files = bold_files[0]

    for bold_file in bold_files:
        if not op.exists(bold_file):
            raise Exception('File not found: ' + bold_file)

        bold, _ = read_volume(bold_file)
        bolds.append(bold)
        session_scans.append(np.arange(last_scan,
                                       last_scan + bold.shape[TIME_AXIS],
                                       dtype=int))
        last_scan += bold.shape[TIME_AXIS]

    bold = np.concatenate(tuple(bolds), axis=TIME_AXIS)

    logger.info('BOLD has shape %s', str(bold.shape))
    discard_bad_data(bold, mask)

    return mask, mask_meta_obj, mask_loaded_from_file, bold, session_scans
Beispiel #24
0
def parcellation_for_jde(fmri_data,
                         avg_parcel_size=250,
                         output_dir=None,
                         method='gkm',
                         glm_drift='Cosine',
                         glm_hfcut=128):
    """
    method: gkm, ward, ward_and_gkm
    """

    if output_dir is None:
        output_dir = tempfile.mkdtemp(prefix='pyhrf_JDE_parcellation_GLM',
                                      dir=pyhrf.cfg['global']['tmp_path'])
    glm_output_dir = op.join(output_dir, 'GLM_for_parcellation')
    if not op.exists(glm_output_dir):
        os.makedirs(glm_output_dir)

    logger.info('GLM for parcellation')

    g, dm, cons = glm_nipy(fmri_data, drift_model=glm_drift, hfcut=glm_hfcut)

    pval_files = []
    if cons is not None:
        func_data = [('con_pval_%s' % cname, con.pvalue())
                     for cname, con in cons.iteritems()]
    else:
        reg_cst_drift = re.compile(".*constant.*|.*drift.*")
        func_data = [('beta_%s' % reg_name, g.beta[ir])
                     for ir, reg_name in enumerate(dm.names)
                     if not reg_cst_drift.match(reg_name)]

    for name, data in func_data:
        val_vol = expand_array_in_mask(data, fmri_data.roiMask > 0)
        val_fn = op.join(glm_output_dir, '%s.nii' % name)
        write_volume(val_vol, val_fn, fmri_data.meta_obj)
        pval_files.append(val_fn)

    mask_file = op.join(glm_output_dir, 'mask.nii')
    write_volume(fmri_data.roiMask > 0, mask_file, fmri_data.meta_obj)

    nvox = fmri_data.get_nb_vox_in_mask()
    nparcels = round_nb_parcels(nvox * 1. / avg_parcel_size)

    logger.info('Parcellation from GLM outputs, method: %s, nb parcels: %d',
                method, nparcels)

    if fmri_data.data_type == 'volume':
        parcellation_file = op.join(
            output_dir, 'parcellation_%s_np%d.nii' % (method, nparcels))

        make_parcellation_from_files(pval_files, mask_file, parcellation_file,
                                     nparcels, method)
        parcellation, _ = read_volume(parcellation_file)
    else:
        mesh_file = fmri_data.data_files[-1]
        parcellation_file = op.join(
            output_dir, 'parcellation_%s_np%d.gii' % (method, nparcels))
        make_parcellation_surf_from_files(pval_files,
                                          mesh_file,
                                          parcellation_file,
                                          nparcels,
                                          method,
                                          verbose=1)
        parcellation, _ = read_texture(parcellation_file)

    logger.info(parcellation_report(parcellation))

    return parcellation, parcellation_file
Beispiel #25
0
def project_fmri_from_kernels(input_mesh, kernels_file, fmri_data_file, output_tex, bin_threshold=None):

    logger.info("Project data onto mesh using kernels ...")

    if 0:
        print "Projecting ..."
        print "func data:", fmri_data_file
        print "Mesh file:", input_mesh
        print "Save as:", output_tex

    logger.info("Call AimsFunctionProjection -op 1 ...")

    data_files = []
    output_texs = []
    p_ids = None
    if bin_threshold is not None:
        d, h = read_volume(fmri_data_file)
        if np.allclose(d.astype(int), d):
            tmp_dir = pyhrf.get_tmp_path()
            p_ids = np.unique(d)
            logger.info("bin threshold: %f", bin_threshold)
            logger.info("pids(n=%d): %d...%d", len(p_ids), min(p_ids), max(p_ids))
            for i, p_id in enumerate(p_ids):
                if p_id != 0:
                    new_p = np.zeros_like(d)
                    new_p[np.where(d == p_id)] = i + 1  # 0 is background
                    ifn = op.join(tmp_dir, "pmask_%d.nii" % p_id)
                    write_volume(new_p, ifn, h)
                    data_files.append(ifn)
                    ofn = op.join(tmp_dir, "ptex_%d.gii" % p_id)
                    output_texs.append(ofn)
        else:
            data_files.append(fmri_data_file)
            output_texs.append(output_tex)
    else:
        data_files.append(fmri_data_file)
        output_texs.append(output_tex)

    logger.info("input data files: %s", str(data_files))
    logger.info("output data files: %s", str(output_texs))

    for data_file, o_tex in zip(data_files, output_texs):
        projection = [
            "AimsFunctionProjection",
            "-op",
            "1",
            "-d",
            kernels_file,
            "-d1",
            data_file,
            "-m",
            input_mesh,
            "-o",
            o_tex,
        ]

        cmd = " ".join(map(str, projection))
        logger.info("cmd: %s", cmd)
        os.system(cmd)

    if bin_threshold is not None:
        logger.info("Binary threshold of texture at %f", bin_threshold)
        o_tex = output_texs[0]
        data, data_gii = read_texture(o_tex)
        data = (data > bin_threshold).astype(np.int32)
        print "data:", data.dtype
        if p_ids is not None:
            for pid, o_tex in zip(p_ids[1:], output_texs[1:]):
                pdata, pdata_gii = read_texture(o_tex)
                data += (pdata > bin_threshold).astype(np.int32) * pid

        # assert (np.unique(data) == p_ids).all()
        write_texture(data, output_tex, intent="NIFTI_INTENT_LABEL")
Beispiel #26
0
 def setUp(self):
     pf = 'subj0_parcellation.nii.gz'
     fnm = pyhrf.get_data_file_name(pf)
     m, mh = read_volume(fnm)
     self.graph = parcels_to_graphs(m.astype(int), kerMask3D_6n,
                                    toDiscard=[0])[1]
def Main_vbjde_Extension_constrained(graph, Y, Onsets, Thrf, K, TR, beta,
                                     dt, scale=1, estimateSigmaH=True,
                                     sigmaH=0.05, NitMax=-1,
                                     NitMin=1, estimateBeta=True,
                                     PLOT=False, contrasts=[],
                                     computeContrast=False,
                                     gamma_h=0, estimateHRF=True,
                                     TrueHrfFlag=False,
                                     HrfFilename='hrf.nii',
                                     estimateLabels=True,
                                     LabelsFilename='labels.nii',
                                     MFapprox=False, InitVar=0.5,
                                     InitMean=2.0, MiniVEMFlag=False,
                                     NbItMiniVem=5):
    # VBJDE Function for BOLD with contraints

    logger.info("Fast EM with C extension started ...")
    np.random.seed(6537546)

    ##########################################################################
    # INITIALIZATIONS
    # Initialize parameters
    tau1 = 0.0
    tau2 = 0.0
    S = 100
    Init_sigmaH = sigmaH
    Nb2Norm = 1
    NormFlag = False
    if NitMax < 0:
        NitMax = 100
    gamma = 7.5
    #gamma_h = 1000
    gradientStep = 0.003
    MaxItGrad = 200
    Thresh = 1e-5
    Thresh_FreeEnergy = 1e-5
    estimateLabels = True  # WARNING!! They should be estimated

    # Initialize sizes vectors
    D = int(np.ceil(Thrf / dt)) + 1  # D = int(np.ceil(Thrf/dt))
    M = len(Onsets)
    N = Y.shape[0]
    J = Y.shape[1]
    l = int(np.sqrt(J))
    condition_names = []

    # Neighbours
    maxNeighbours = max([len(nl) for nl in graph])
    neighboursIndexes = np.zeros((J, maxNeighbours), dtype=np.int32)
    neighboursIndexes -= 1
    for i in xrange(J):
        neighboursIndexes[i, :len(graph[i])] = graph[i]
    # Conditions
    X = OrderedDict([])
    for condition, Ons in Onsets.iteritems():
        X[condition] = vt.compute_mat_X_2(N, TR, D, dt, Ons)
        condition_names += [condition]
    XX = np.zeros((M, N, D), dtype=np.int32)
    nc = 0
    for condition, Ons in Onsets.iteritems():
        XX[nc, :, :] = X[condition]
        nc += 1
    # Covariance matrix
    order = 2
    D2 = vt.buildFiniteDiffMatrix(order, D)
    R = np.dot(D2, D2) / pow(dt, 2 * order)
    invR = np.linalg.inv(R)
    Det_invR = np.linalg.det(invR)

    Gamma = np.identity(N)
    Det_Gamma = np.linalg.det(Gamma)

    p_Wtilde = np.zeros((M, K), dtype=np.float64)
    p_Wtilde1 = np.zeros((M, K), dtype=np.float64)
    p_Wtilde[:, 1] = 1

    Crit_H = 1
    Crit_Z = 1
    Crit_A = 1
    Crit_AH = 1
    AH = np.zeros((J, M, D), dtype=np.float64)
    AH1 = np.zeros((J, M, D), dtype=np.float64)
    Crit_FreeEnergy = 1

    cA = []
    cH = []
    cZ = []
    cAH = []
    FreeEnergy_Iter = []
    cTime = []
    cFE = []

    SUM_q_Z = [[] for m in xrange(M)]
    mu1 = [[] for m in xrange(M)]
    h_norm = []
    h_norm2 = []

    CONTRAST = np.zeros((J, len(contrasts)), dtype=np.float64)
    CONTRASTVAR = np.zeros((J, len(contrasts)), dtype=np.float64)
    Q_barnCond = np.zeros((M, M, D, D), dtype=np.float64)
    XGamma = np.zeros((M, D, N), dtype=np.float64)
    m1 = 0
    for k1 in X:  # Loop over the M conditions
        m2 = 0
        for k2 in X:
            Q_barnCond[m1, m2, :, :] = np.dot(
                np.dot(X[k1].transpose(), Gamma), X[k2])
            m2 += 1
        XGamma[m1, :, :] = np.dot(X[k1].transpose(), Gamma)
        m1 += 1

    if MiniVEMFlag:
        logger.info("MiniVEM to choose the best initialisation...")
        """InitVar, InitMean, gamma_h = MiniVEM_CompMod(Thrf,TR,dt,beta,Y,K,
                                                     gamma,gradientStep,
                                                     MaxItGrad,D,M,N,J,S,
                                                     maxNeighbours,
                                                     neighboursIndexes,
                                                     XX,X,R,Det_invR,Gamma,
                                                     Det_Gamma,
                                                     scale,Q_barnCond,XGamma,
                                                     NbItMiniVem,
                                                     sigmaH,estimateHRF)"""

        InitVar, InitMean, gamma_h = vt.MiniVEM_CompMod(Thrf, TR, dt, beta, Y, K, gamma, gradientStep, MaxItGrad, D, M, N, J, S, maxNeighbours,
                                                        neighboursIndexes, XX, X, R, Det_invR, Gamma, Det_Gamma, p_Wtilde, scale, Q_barnCond, XGamma, tau1, tau2, NbItMiniVem, sigmaH, estimateHRF)

    sigmaH = Init_sigmaH
    sigma_epsilone = np.ones(J)
    logger.info(
        "Labels are initialized by setting active probabilities to ones ...")
    q_Z = np.zeros((M, K, J), dtype=np.float64)
    q_Z[:, 1, :] = 1
    q_Z1 = np.zeros((M, K, J), dtype=np.float64)
    Z_tilde = q_Z.copy()

    # TT,m_h = getCanoHRF(Thrf-dt,dt) #TODO: check
    TT, m_h = getCanoHRF(Thrf, dt)  # TODO: check
    m_h = m_h[:D]
    m_H = np.array(m_h).astype(np.float64)
    m_H1 = np.array(m_h)
    sigmaH1 = sigmaH
    if estimateHRF:
        Sigma_H = np.ones((D, D), dtype=np.float64)
    else:
        Sigma_H = np.zeros((D, D), dtype=np.float64)

    Beta = beta * np.ones((M), dtype=np.float64)
    P = vt.PolyMat(N, 4, TR)
    L = vt.polyFit(Y, TR, 4, P)
    PL = np.dot(P, L)
    y_tilde = Y - PL
    Ndrift = L.shape[0]

    sigma_M = np.ones((M, K), dtype=np.float64)
    sigma_M[:, 0] = 0.5
    sigma_M[:, 1] = 0.6
    mu_M = np.zeros((M, K), dtype=np.float64)
    for k in xrange(1, K):
        mu_M[:, k] = InitMean
    Sigma_A = np.zeros((M, M, J), np.float64)
    for j in xrange(0, J):
        Sigma_A[:, :, j] = 0.01 * np.identity(M)
    m_A = np.zeros((J, M), dtype=np.float64)
    m_A1 = np.zeros((J, M), dtype=np.float64)
    for j in xrange(0, J):
        for m in xrange(0, M):
            for k in xrange(0, K):
                m_A[j, m] += np.random.normal(mu_M[m, k],
                                              np.sqrt(sigma_M[m, k])) * q_Z[m, k, j]
    m_A1 = m_A

    t1 = time.time()

    ##########################################################################
    # VBJDE num. iter. minimum

    ni = 0

    while ((ni < NitMin) or (((Crit_FreeEnergy > Thresh_FreeEnergy) or (Crit_AH > Thresh)) and (ni < NitMax))):

        logger.info("------------------------------ Iteration n° " +
                    str(ni + 1) + " ------------------------------")

        #####################
        # EXPECTATION
        #####################

        # A
        logger.info("E A step ...")
        UtilsC.expectation_A(q_Z, mu_M, sigma_M, PL, sigma_epsilone, Gamma,
                             Sigma_H, Y, y_tilde, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N, K)
        val = np.reshape(m_A, (M * J))
        val[np.where((val <= 1e-50) & (val > 0.0))] = 0.0
        val[np.where((val >= -1e-50) & (val < 0.0))] = 0.0

        # crit. A
        DIFF = np.reshape(m_A - m_A1, (M * J))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        Crit_A = (
            np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(m_A1, (M * J)))) ** 2
        cA += [Crit_A]
        m_A1[:, :] = m_A[:, :]

        # HRF h
        if estimateHRF:
            ################################
            #  HRF ESTIMATION
            ################################
            UtilsC.expectation_H(XGamma, Q_barnCond, sigma_epsilone, Gamma, R, Sigma_H, Y,
                                 y_tilde, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N, scale, sigmaH)

            import cvxpy as cvx
            m, n = Sigma_H.shape
            Sigma_H_inv = np.linalg.inv(Sigma_H)
            zeros_H = np.zeros_like(m_H[:, np.newaxis])

            # Construct the problem. PRIMAL
            h = cvx.Variable(n)
            expression = cvx.quad_form(h - m_H[:, np.newaxis], Sigma_H_inv)
            objective = cvx.Minimize(expression)
            #constraints = [h[0] == 0, h[-1]==0, h >= zeros_H, cvx.square(cvx.norm(h,2))<=1]
            constraints = [
                h[0] == 0, h[-1] == 0, cvx.square(cvx.norm(h, 2)) <= 1]
            prob = cvx.Problem(objective, constraints)
            result = prob.solve(verbose=0, solver=cvx.CVXOPT)

            # Now we update the mean of h
            m_H_old = m_H
            Sigma_H_old = Sigma_H
            m_H = np.squeeze(np.array((h.value)))
            Sigma_H = np.zeros_like(Sigma_H)

            h_norm += [np.linalg.norm(m_H)]
            # print 'h_norm = ', h_norm

            # Plotting HRF
            if PLOT and ni >= 0:
                import matplotlib.pyplot as plt
                plt.figure(M + 1)
                plt.plot(m_H)
                plt.hold(True)
        else:
            if TrueHrfFlag:
                #TrueVal, head = read_volume(HrfFilename)
                TrueVal, head = read_volume(HrfFilename)[:, 0, 0, 0]
                print TrueVal
                print TrueVal.shape
                m_H = TrueVal

        # crit. h
        Crit_H = (np.linalg.norm(m_H - m_H1) / np.linalg.norm(m_H1)) ** 2
        cH += [Crit_H]
        m_H1[:] = m_H[:]

        # crit. AH
        for d in xrange(0, D):
            AH[:, :, d] = m_A[:, :] * m_H[d]
        DIFF = np.reshape(AH - AH1, (M * J * D))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        if np.linalg.norm(np.reshape(AH1, (M * J * D))) == 0:
            Crit_AH = 1000000000.
        else:
            Crit_AH = (
                np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(AH1, (M * J * D)))) ** 2
        cAH += [Crit_AH]
        AH1[:, :, :] = AH[:, :, :]

        # Z labels
        if estimateLabels:
            logger.info("E Z step ...")
            # WARNING!!! ParsiMod gives better results, but we need the other
            # one.
            if MFapprox:
                UtilsC.expectation_Z(Sigma_A, m_A, sigma_M, Beta, Z_tilde, mu_M, q_Z, neighboursIndexes.astype(
                    np.int32), M, J, K, maxNeighbours)
            if not MFapprox:
                UtilsC.expectation_Z_ParsiMod_RVM_and_CompMod(
                    Sigma_A, m_A, sigma_M, Beta, mu_M, q_Z, neighboursIndexes.astype(np.int32), M, J, K, maxNeighbours)
        else:
            logger.info("Using True Z ...")
            TrueZ = read_volume(LabelsFilename)
            for m in xrange(M):
                q_Z[m, 1, :] = np.reshape(TrueZ[0][:, :, :, m], J)
                q_Z[m, 0, :] = 1 - q_Z[m, 1, :]

        # crit. Z
        val = np.reshape(q_Z, (M * K * J))
        val[np.where((val <= 1e-50) & (val > 0.0))] = 0.0

        DIFF = np.reshape(q_Z - q_Z1, (M * K * J))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        if np.linalg.norm(np.reshape(q_Z1, (M * K * J))) == 0:
            Crit_Z = 1000000000.
        else:
            Crit_Z = (
                np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(q_Z1, (M * K * J)))) ** 2
        cZ += [Crit_Z]
        q_Z1 = q_Z

        #####################
        # MAXIMIZATION
        #####################

        # HRF: Sigma_h
        if estimateHRF:
            if estimateSigmaH:
                logger.info("M sigma_H step ...")
                if gamma_h > 0:
                    sigmaH = vt.maximization_sigmaH_prior(
                        D, Sigma_H_old, R, m_H_old, gamma_h)
                else:
                    sigmaH = vt.maximization_sigmaH(D, Sigma_H, R, m_H)
                logger.info('sigmaH = %s', str(sigmaH))

        # (mu,sigma)
        logger.info("M (mu,sigma) step ...")
        mu_M, sigma_M = vt.maximization_mu_sigma(
            mu_M, sigma_M, q_Z, m_A, K, M, Sigma_A)
        for m in xrange(M):
            SUM_q_Z[m] += [sum(q_Z[m, 1, :])]
            mu1[m] += [mu_M[m, 1]]

        # Drift L
        UtilsC.maximization_L(
            Y, m_A, m_H, L, P, XX.astype(np.int32), J, D, M, Ndrift, N)
        PL = np.dot(P, L)
        y_tilde = Y - PL

        # Beta
        if estimateBeta:
            logger.info("estimating beta")
            for m in xrange(0, M):
                if MFapprox:
                    Beta[m] = UtilsC.maximization_beta(beta, q_Z[m, :, :].astype(np.float64), Z_tilde[m, :, :].astype(
                        np.float64), J, K, neighboursIndexes.astype(np.int32), gamma, maxNeighbours, MaxItGrad, gradientStep)
                if not MFapprox:
                    #Beta[m] = UtilsC.maximization_beta(beta,q_Z[m,:,:].astype(np.float64),q_Z[m,:,:].astype(np.float64),J,K,neighboursIndexes.astype(int32),gamma,maxNeighbours,MaxItGrad,gradientStep)
                    Beta[m] = UtilsC.maximization_beta_CB(beta, q_Z[m, :, :].astype(
                        np.float64), J, K, neighboursIndexes.astype(np.int32), gamma, maxNeighbours, MaxItGrad, gradientStep)
            logger.info("End estimating beta")
            logger.info(Beta)

        # Sigma noise
        logger.info("M sigma noise step ...")
        UtilsC.maximization_sigma_noise(
            Gamma, PL, sigma_epsilone, Sigma_H, Y, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N)

        #### Computing Free Energy ####
        if ni > 0:
            FreeEnergy1 = FreeEnergy

        """FreeEnergy = vt.Compute_FreeEnergy(y_tilde,m_A,Sigma_A,mu_M,sigma_M,
                                           m_H,Sigma_H,R,Det_invR,sigmaH,
                                           p_Wtilde,q_Z,neighboursIndexes,
                                           maxNeighbours,Beta,sigma_epsilone,
                                           XX,Gamma,Det_Gamma,XGamma,J,D,M,
                                           N,K,S,"CompMod")"""
        FreeEnergy = vt.Compute_FreeEnergy(y_tilde, m_A, Sigma_A, mu_M, sigma_M, m_H, Sigma_H, R, Det_invR, sigmaH, p_Wtilde, tau1,
                                           tau2, q_Z, neighboursIndexes, maxNeighbours, Beta, sigma_epsilone, XX, Gamma, Det_Gamma, XGamma, J, D, M, N, K, S, "CompMod")

        if ni > 0:
            Crit_FreeEnergy = (FreeEnergy1 - FreeEnergy) / FreeEnergy1
        FreeEnergy_Iter += [FreeEnergy]
        cFE += [Crit_FreeEnergy]

        # Update index
        ni += 1

        t02 = time.time()
        cTime += [t02 - t1]

    t2 = time.time()

    ##########################################################################
    # PLOTS and SNR computation

    FreeEnergyArray = np.zeros((ni), dtype=np.float64)
    for i in xrange(ni):
        FreeEnergyArray[i] = FreeEnergy_Iter[i]

    SUM_q_Z_array = np.zeros((M, ni), dtype=np.float64)
    mu1_array = np.zeros((M, ni), dtype=np.float64)
    h_norm_array = np.zeros((ni), dtype=np.float64)
    for m in xrange(M):
        for i in xrange(ni):
            SUM_q_Z_array[m, i] = SUM_q_Z[m][i]
            mu1_array[m, i] = mu1[m][i]
            h_norm_array[i] = h_norm[i]

    if PLOT and 0:
        import matplotlib.pyplot as plt
        import matplotlib
        font = {'size': 15}
        matplotlib.rc('font', **font)
        plt.savefig('./HRF_Iter_CompMod.png')
        plt.hold(False)
        plt.figure(2)
        plt.plot(cAH[1:-1], 'lightblue')
        plt.hold(True)
        plt.plot(cFE[1:-1], 'm')
        plt.hold(False)
        #plt.legend( ('CA','CH', 'CZ', 'CAH', 'CFE') )
        plt.legend(('CAH', 'CFE'))
        plt.grid(True)
        plt.savefig('./Crit_CompMod.png')
        plt.figure(3)
        plt.plot(FreeEnergyArray)
        plt.grid(True)
        plt.savefig('./FreeEnergy_CompMod.png')

        plt.figure(4)
        for m in xrange(M):
            plt.plot(SUM_q_Z_array[m])
            plt.hold(True)
        plt.hold(False)
        #plt.legend( ('m=0','m=1', 'm=2', 'm=3') )
        #plt.legend( ('m=0','m=1') )
        plt.savefig('./Sum_q_Z_Iter_CompMod.png')

        plt.figure(5)
        for m in xrange(M):
            plt.plot(mu1_array[m])
            plt.hold(True)
        plt.hold(False)
        plt.savefig('./mu1_Iter_CompMod.png')

        plt.figure(6)
        plt.plot(h_norm_array)
        plt.savefig('./HRF_Norm_CompMod.png')

        Data_save = xndarray(h_norm_array, ['Iteration'])
        Data_save.save('./HRF_Norm_Comp.nii')

    CompTime = t2 - t1
    cTimeMean = CompTime / ni

    sigma_M = np.sqrt(np.sqrt(sigma_M))
    logger.info("Nb iterations to reach criterion: %d", ni)
    logger.info("Computational time = %s min %s s", str(
        int(CompTime // 60)), str(int(CompTime % 60)))
    # print "Computational time = " + str(int( CompTime//60 ) ) + " min " + str(int(CompTime%60)) + " s"
    # print "sigma_H = " + str(sigmaH)
    logger.info('mu_M: %f', mu_M)
    logger.info('sigma_M: %f', sigma_M)
    logger.info("sigma_H = %s" + str(sigmaH))
    logger.info("Beta = %s" + str(Beta))

    StimulusInducedSignal = vt.computeFit(m_H, m_A, X, J, N)
    SNR = 20 * \
        np.log(
            np.linalg.norm(Y) / np.linalg.norm(Y - StimulusInducedSignal - PL))
    SNR /= np.log(10.)
    logger.info("SNR = %d", SNR)
    return ni, m_A, m_H, q_Z, sigma_epsilone, mu_M, sigma_M, Beta, L, PL, CONTRAST, CONTRASTVAR, cA[2:], cH[2:], cZ[2:], cAH[2:], cTime[2:], cTimeMean, Sigma_A, StimulusInducedSignal, FreeEnergyArray
Beispiel #28
0
def load_vol_bold_and_mask(bold_files, mask_file):

    from pyhrf.tools._io import read_volume, discard_bad_data

    # Handle mask
    if not op.exists(mask_file):
        pyhrf.verbose(1,'Mask file %s does not exist. Mask is '\
                          ' computed from BOLD ...' %mask_file)
        bf = 'subj0_bold_session0.nii.gz'
        # HACK
        if bold_files[0] == pyhrf.get_data_file_name(bf):
            max_frac = .99999 #be sure to keep non zero voxels
            cc = 0 # default BOLD vol has 2 ROIs
        else:
            max_frac = .9
            cc = 1
        compute_mask_files(bold_files[0], mask_file, False, .4,
                           max_frac, cc=cc)
        mask_loaded_from_file = False
    else:
        mask_loaded_from_file = True
        pyhrf.verbose(1,'Assuming orientation for mask file: ' + \
                          string.join(MRI3Daxes, ','))

    pyhrf.verbose(2,'Read mask from: %s' %mask_file)
    mask, mask_meta_obj = read_volume(mask_file)

    if not np.allclose(np.round(mask),mask):
        raise Exception("Mask is not n-ary (%s)" %mask_file)
    mask = mask.astype(np.int32)

    pyhrf.verbose(1,'Mask has shape %s' %str(mask.shape))
    pyhrf.verbose(1,'Mask min value: %d' %mask.min())
    pyhrf.verbose(1,'Mask max value: %d' %mask.max())
    pyhrf.verbose(1,'Mask has %d parcels' %len(np.unique(mask)))

    mshape = mask.shape
    if mask.min() == -1:
        mask += 1

    #Load BOLD:
    lastScan = 0
    sessionScans = []
    bolds = []
    pyhrf.verbose(1,'Assuming orientation for BOLD files: ' + \
                      string.join(MRI4Daxes, ','))

    #print 'type of bold_files[0]:', type(bold_files[0])
    #print 'bold_files:', bold_files
    if type(bold_files[0]) is list:
        bold_files = bold_files[0]

    for bold_file in bold_files:
        if not op.exists(bold_file):
            raise Exception('File not found: ' + bold_file)

        b, bold_meta = read_volume(bold_file)
        bolds.append(b)
        sessionScans.append(np.arange(lastScan,
                                      lastScan+b.shape[TIME_AXIS],
                                      dtype=int))
        lastScan += b.shape[TIME_AXIS]

    bold = np.concatenate(tuple(bolds), axis=TIME_AXIS)

    pyhrf.verbose(1,'BOLD has shape %s' %str(bold.shape))
    discard_bad_data(bold, mask)

    # #HACK
    # if mask_file != DEFAULT_MASK_VOL_FILE:
    #     write_volume(mask,'./treated_mask.nii')
    #     sys.exit(0)


    return mask, mask_meta_obj, mask_loaded_from_file, bold, sessionScans
Beispiel #29
0
def Main_vbjde_Extension_constrained(graph, Y, Onsets, Thrf, K, TR, beta,
                                     dt, scale=1, estimateSigmaH=True,
                                     sigmaH=0.05, NitMax=-1,
                                     NitMin=1, estimateBeta=True,
                                     PLOT=False, contrasts=[],
                                     computeContrast=False,
                                     gamma_h=0, estimateHRF=True,
                                     TrueHrfFlag=False,
                                     HrfFilename='hrf.nii',
                                     estimateLabels=True,
                                     LabelsFilename='labels.nii',
                                     MFapprox=False, InitVar=0.5,
                                     InitMean=2.0, MiniVEMFlag=False,
                                     NbItMiniVem=5):
    # VBJDE Function for BOLD with contraints

    logger.info("Fast EM with C extension started ...")
    np.random.seed(6537546)

    ##########################################################################
    # INITIALIZATIONS
    # Initialize parameters
    tau1 = 0.0
    tau2 = 0.0
    S = 100
    Init_sigmaH = sigmaH
    Nb2Norm = 1
    NormFlag = False
    if NitMax < 0:
        NitMax = 100
    gamma = 7.5
    #gamma_h = 1000
    gradientStep = 0.003
    MaxItGrad = 200
    Thresh = 1e-5
    Thresh_FreeEnergy = 1e-5
    estimateLabels = True  # WARNING!! They should be estimated

    # Initialize sizes vectors
    D = int(np.ceil(Thrf / dt)) + 1  # D = int(np.ceil(Thrf/dt))
    M = len(Onsets)
    N = Y.shape[0]
    J = Y.shape[1]
    l = int(np.sqrt(J))
    condition_names = []

    # Neighbours
    maxNeighbours = max([len(nl) for nl in graph])
    neighboursIndexes = np.zeros((J, maxNeighbours), dtype=np.int32)
    neighboursIndexes -= 1
    for i in xrange(J):
        neighboursIndexes[i, :len(graph[i])] = graph[i]
    # Conditions
    X = OrderedDict([])
    for condition, Ons in Onsets.iteritems():
        X[condition] = vt.compute_mat_X_2(N, TR, D, dt, Ons)
        condition_names += [condition]
    XX = np.zeros((M, N, D), dtype=np.int32)
    nc = 0
    for condition, Ons in Onsets.iteritems():
        XX[nc, :, :] = X[condition]
        nc += 1
    # Covariance matrix
    order = 2
    D2 = vt.buildFiniteDiffMatrix(order, D)
    R = np.dot(D2, D2) / pow(dt, 2 * order)
    invR = np.linalg.inv(R)
    Det_invR = np.linalg.det(invR)

    Gamma = np.identity(N)
    Det_Gamma = np.linalg.det(Gamma)

    p_Wtilde = np.zeros((M, K), dtype=np.float64)
    p_Wtilde1 = np.zeros((M, K), dtype=np.float64)
    p_Wtilde[:, 1] = 1

    Crit_H = 1
    Crit_Z = 1
    Crit_A = 1
    Crit_AH = 1
    AH = np.zeros((J, M, D), dtype=np.float64)
    AH1 = np.zeros((J, M, D), dtype=np.float64)
    Crit_FreeEnergy = 1

    cA = []
    cH = []
    cZ = []
    cAH = []
    FreeEnergy_Iter = []
    cTime = []
    cFE = []

    SUM_q_Z = [[] for m in xrange(M)]
    mu1 = [[] for m in xrange(M)]
    h_norm = []
    h_norm2 = []

    CONTRAST = np.zeros((J, len(contrasts)), dtype=np.float64)
    CONTRASTVAR = np.zeros((J, len(contrasts)), dtype=np.float64)
    Q_barnCond = np.zeros((M, M, D, D), dtype=np.float64)
    XGamma = np.zeros((M, D, N), dtype=np.float64)
    m1 = 0
    for k1 in X:  # Loop over the M conditions
        m2 = 0
        for k2 in X:
            Q_barnCond[m1, m2, :, :] = np.dot(
                np.dot(X[k1].transpose(), Gamma), X[k2])
            m2 += 1
        XGamma[m1, :, :] = np.dot(X[k1].transpose(), Gamma)
        m1 += 1

    if MiniVEMFlag:
        logger.info("MiniVEM to choose the best initialisation...")
        """InitVar, InitMean, gamma_h = MiniVEM_CompMod(Thrf,TR,dt,beta,Y,K,
                                                     gamma,gradientStep,
                                                     MaxItGrad,D,M,N,J,S,
                                                     maxNeighbours,
                                                     neighboursIndexes,
                                                     XX,X,R,Det_invR,Gamma,
                                                     Det_Gamma,
                                                     scale,Q_barnCond,XGamma,
                                                     NbItMiniVem,
                                                     sigmaH,estimateHRF)"""

        InitVar, InitMean, gamma_h = vt.MiniVEM_CompMod(Thrf, TR, dt, beta, Y, K, gamma, gradientStep, MaxItGrad, D, M, N, J, S, maxNeighbours,
                                                        neighboursIndexes, XX, X, R, Det_invR, Gamma, Det_Gamma, p_Wtilde, scale, Q_barnCond, XGamma, tau1, tau2, NbItMiniVem, sigmaH, estimateHRF)

    sigmaH = Init_sigmaH
    sigma_epsilone = np.ones(J)
    logger.info(
        "Labels are initialized by setting active probabilities to ones ...")
    q_Z = np.zeros((M, K, J), dtype=np.float64)
    q_Z[:, 1, :] = 1
    q_Z1 = np.zeros((M, K, J), dtype=np.float64)
    Z_tilde = q_Z.copy()

    # TT,m_h = getCanoHRF(Thrf-dt,dt) #TODO: check
    TT, m_h = getCanoHRF(Thrf, dt)  # TODO: check
    m_h = m_h[:D]
    m_H = np.array(m_h).astype(np.float64)
    m_H1 = np.array(m_h)
    sigmaH1 = sigmaH
    if estimateHRF:
        Sigma_H = np.ones((D, D), dtype=np.float64)
    else:
        Sigma_H = np.zeros((D, D), dtype=np.float64)

    Beta = beta * np.ones((M), dtype=np.float64)
    P = vt.PolyMat(N, 4, TR)
    L = vt.polyFit(Y, TR, 4, P)
    PL = np.dot(P, L)
    y_tilde = Y - PL
    Ndrift = L.shape[0]

    sigma_M = np.ones((M, K), dtype=np.float64)
    sigma_M[:, 0] = 0.5
    sigma_M[:, 1] = 0.6
    mu_M = np.zeros((M, K), dtype=np.float64)
    for k in xrange(1, K):
        mu_M[:, k] = InitMean
    Sigma_A = np.zeros((M, M, J), np.float64)
    for j in xrange(0, J):
        Sigma_A[:, :, j] = 0.01 * np.identity(M)
    m_A = np.zeros((J, M), dtype=np.float64)
    m_A1 = np.zeros((J, M), dtype=np.float64)
    for j in xrange(0, J):
        for m in xrange(0, M):
            for k in xrange(0, K):
                m_A[j, m] += np.random.normal(mu_M[m, k],
                                              np.sqrt(sigma_M[m, k])) * q_Z[m, k, j]
    m_A1 = m_A

    t1 = time.time()

    ##########################################################################
    # VBJDE num. iter. minimum

    ni = 0

    while ((ni < NitMin) or (((Crit_FreeEnergy > Thresh_FreeEnergy) or (Crit_AH > Thresh)) and (ni < NitMax))):

        logger.info("------------------------------ Iteration n° " +
                    str(ni + 1) + " ------------------------------")

        #####################
        # EXPECTATION
        #####################

        # A
        logger.info("E A step ...")
        UtilsC.expectation_A(q_Z, mu_M, sigma_M, PL, sigma_epsilone, Gamma,
                             Sigma_H, Y, y_tilde, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N, K)
        val = np.reshape(m_A, (M * J))
        val[np.where((val <= 1e-50) & (val > 0.0))] = 0.0
        val[np.where((val >= -1e-50) & (val < 0.0))] = 0.0

        # crit. A
        DIFF = np.reshape(m_A - m_A1, (M * J))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        Crit_A = (
            np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(m_A1, (M * J)))) ** 2
        cA += [Crit_A]
        m_A1[:, :] = m_A[:, :]

        # HRF h
        if estimateHRF:
            ################################
            #  HRF ESTIMATION
            ################################
            UtilsC.expectation_H(XGamma, Q_barnCond, sigma_epsilone, Gamma, R, Sigma_H, Y,
                                 y_tilde, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N, scale, sigmaH)

            import cvxpy as cvx
            m, n = Sigma_H.shape
            Sigma_H_inv = np.linalg.inv(Sigma_H)
            zeros_H = np.zeros_like(m_H[:, np.newaxis])

            # Construct the problem. PRIMAL
            h = cvx.Variable(n)
            expression = cvx.quad_form(h - m_H[:, np.newaxis], Sigma_H_inv)
            objective = cvx.Minimize(expression)
            #constraints = [h[0] == 0, h[-1]==0, h >= zeros_H, cvx.square(cvx.norm(h,2))<=1]
            constraints = [
                h[0] == 0, h[-1] == 0, cvx.square(cvx.norm(h, 2)) <= 1]
            prob = cvx.Problem(objective, constraints)
            result = prob.solve(verbose=0, solver=cvx.CVXOPT)

            # Now we update the mean of h
            m_H_old = m_H
            Sigma_H_old = Sigma_H
            m_H = np.squeeze(np.array((h.value)))
            Sigma_H = np.zeros_like(Sigma_H)

            h_norm += [np.linalg.norm(m_H)]
            # print 'h_norm = ', h_norm

            # Plotting HRF
            if PLOT and ni >= 0:
                import matplotlib.pyplot as plt
                plt.figure(M + 1)
                plt.plot(m_H)
                plt.hold(True)
        else:
            if TrueHrfFlag:
                #TrueVal, head = read_volume(HrfFilename)
                TrueVal, head = read_volume(HrfFilename)[:, 0, 0, 0]
                print TrueVal
                print TrueVal.shape
                m_H = TrueVal

        # crit. h
        Crit_H = (np.linalg.norm(m_H - m_H1) / np.linalg.norm(m_H1)) ** 2
        cH += [Crit_H]
        m_H1[:] = m_H[:]

        # crit. AH
        for d in xrange(0, D):
            AH[:, :, d] = m_A[:, :] * m_H[d]
        DIFF = np.reshape(AH - AH1, (M * J * D))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        if np.linalg.norm(np.reshape(AH1, (M * J * D))) == 0:
            Crit_AH = 1000000000.
        else:
            Crit_AH = (
                np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(AH1, (M * J * D)))) ** 2
        cAH += [Crit_AH]
        AH1[:, :, :] = AH[:, :, :]

        # Z labels
        if estimateLabels:
            logger.info("E Z step ...")
            # WARNING!!! ParsiMod gives better results, but we need the other
            # one.
            if MFapprox:
                UtilsC.expectation_Z(Sigma_A, m_A, sigma_M, Beta, Z_tilde, mu_M, q_Z, neighboursIndexes.astype(
                    np.int32), M, J, K, maxNeighbours)
            if not MFapprox:
                UtilsC.expectation_Z_ParsiMod_RVM_and_CompMod(
                    Sigma_A, m_A, sigma_M, Beta, mu_M, q_Z, neighboursIndexes.astype(np.int32), M, J, K, maxNeighbours)
        else:
            logger.info("Using True Z ...")
            TrueZ = read_volume(LabelsFilename)
            for m in xrange(M):
                q_Z[m, 1, :] = np.reshape(TrueZ[0][:, :, :, m], J)
                q_Z[m, 0, :] = 1 - q_Z[m, 1, :]

        # crit. Z
        val = np.reshape(q_Z, (M * K * J))
        val[np.where((val <= 1e-50) & (val > 0.0))] = 0.0

        DIFF = np.reshape(q_Z - q_Z1, (M * K * J))
        # To avoid numerical problems
        DIFF[np.where((DIFF < 1e-50) & (DIFF > 0.0))] = 0.0
        # To avoid numerical problems
        DIFF[np.where((DIFF > -1e-50) & (DIFF < 0.0))] = 0.0
        if np.linalg.norm(np.reshape(q_Z1, (M * K * J))) == 0:
            Crit_Z = 1000000000.
        else:
            Crit_Z = (
                np.linalg.norm(DIFF) / np.linalg.norm(np.reshape(q_Z1, (M * K * J)))) ** 2
        cZ += [Crit_Z]
        q_Z1 = q_Z

        #####################
        # MAXIMIZATION
        #####################

        # HRF: Sigma_h
        if estimateHRF:
            if estimateSigmaH:
                logger.info("M sigma_H step ...")
                if gamma_h > 0:
                    sigmaH = vt.maximization_sigmaH_prior(
                        D, Sigma_H_old, R, m_H_old, gamma_h)
                else:
                    sigmaH = vt.maximization_sigmaH(D, Sigma_H, R, m_H)
                logger.info('sigmaH = %s', str(sigmaH))

        # (mu,sigma)
        logger.info("M (mu,sigma) step ...")
        mu_M, sigma_M = vt.maximization_mu_sigma(
            mu_M, sigma_M, q_Z, m_A, K, M, Sigma_A)
        for m in xrange(M):
            SUM_q_Z[m] += [sum(q_Z[m, 1, :])]
            mu1[m] += [mu_M[m, 1]]

        # Drift L
        UtilsC.maximization_L(
            Y, m_A, m_H, L, P, XX.astype(np.int32), J, D, M, Ndrift, N)
        PL = np.dot(P, L)
        y_tilde = Y - PL

        # Beta
        if estimateBeta:
            logger.info("estimating beta")
            for m in xrange(0, M):
                if MFapprox:
                    Beta[m] = UtilsC.maximization_beta(beta, q_Z[m, :, :].astype(np.float64), Z_tilde[m, :, :].astype(
                        np.float64), J, K, neighboursIndexes.astype(np.int32), gamma, maxNeighbours, MaxItGrad, gradientStep)
                if not MFapprox:
                    #Beta[m] = UtilsC.maximization_beta(beta,q_Z[m,:,:].astype(np.float64),q_Z[m,:,:].astype(np.float64),J,K,neighboursIndexes.astype(int32),gamma,maxNeighbours,MaxItGrad,gradientStep)
                    Beta[m] = UtilsC.maximization_beta_CB(beta, q_Z[m, :, :].astype(
                        np.float64), J, K, neighboursIndexes.astype(np.int32), gamma, maxNeighbours, MaxItGrad, gradientStep)
            logger.info("End estimating beta")
            logger.info(Beta)

        # Sigma noise
        logger.info("M sigma noise step ...")
        UtilsC.maximization_sigma_noise(
            Gamma, PL, sigma_epsilone, Sigma_H, Y, m_A, m_H, Sigma_A, XX.astype(np.int32), J, D, M, N)

        #### Computing Free Energy ####
        if ni > 0:
            FreeEnergy1 = FreeEnergy

        """FreeEnergy = vt.Compute_FreeEnergy(y_tilde,m_A,Sigma_A,mu_M,sigma_M,
                                           m_H,Sigma_H,R,Det_invR,sigmaH,
                                           p_Wtilde,q_Z,neighboursIndexes,
                                           maxNeighbours,Beta,sigma_epsilone,
                                           XX,Gamma,Det_Gamma,XGamma,J,D,M,
                                           N,K,S,"CompMod")"""
        FreeEnergy = vt.Compute_FreeEnergy(y_tilde, m_A, Sigma_A, mu_M, sigma_M, m_H, Sigma_H, R, Det_invR, sigmaH, p_Wtilde, tau1,
                                           tau2, q_Z, neighboursIndexes, maxNeighbours, Beta, sigma_epsilone, XX, Gamma, Det_Gamma, XGamma, J, D, M, N, K, S, "CompMod")

        if ni > 0:
            Crit_FreeEnergy = (FreeEnergy1 - FreeEnergy) / FreeEnergy1
        FreeEnergy_Iter += [FreeEnergy]
        cFE += [Crit_FreeEnergy]

        # Update index
        ni += 1

        t02 = time.time()
        cTime += [t02 - t1]

    t2 = time.time()

    ##########################################################################
    # PLOTS and SNR computation

    FreeEnergyArray = np.zeros((ni), dtype=np.float64)
    for i in xrange(ni):
        FreeEnergyArray[i] = FreeEnergy_Iter[i]

    SUM_q_Z_array = np.zeros((M, ni), dtype=np.float64)
    mu1_array = np.zeros((M, ni), dtype=np.float64)
    h_norm_array = np.zeros((ni), dtype=np.float64)
    for m in xrange(M):
        for i in xrange(ni):
            SUM_q_Z_array[m, i] = SUM_q_Z[m][i]
            mu1_array[m, i] = mu1[m][i]
            h_norm_array[i] = h_norm[i]

    if PLOT and 0:
        import matplotlib.pyplot as plt
        import matplotlib
        font = {'size': 15}
        matplotlib.rc('font', **font)
        plt.savefig('./HRF_Iter_CompMod.png')
        plt.hold(False)
        plt.figure(2)
        plt.plot(cAH[1:-1], 'lightblue')
        plt.hold(True)
        plt.plot(cFE[1:-1], 'm')
        plt.hold(False)
        #plt.legend( ('CA','CH', 'CZ', 'CAH', 'CFE') )
        plt.legend(('CAH', 'CFE'))
        plt.grid(True)
        plt.savefig('./Crit_CompMod.png')
        plt.figure(3)
        plt.plot(FreeEnergyArray)
        plt.grid(True)
        plt.savefig('./FreeEnergy_CompMod.png')

        plt.figure(4)
        for m in xrange(M):
            plt.plot(SUM_q_Z_array[m])
            plt.hold(True)
        plt.hold(False)
        #plt.legend( ('m=0','m=1', 'm=2', 'm=3') )
        #plt.legend( ('m=0','m=1') )
        plt.savefig('./Sum_q_Z_Iter_CompMod.png')

        plt.figure(5)
        for m in xrange(M):
            plt.plot(mu1_array[m])
            plt.hold(True)
        plt.hold(False)
        plt.savefig('./mu1_Iter_CompMod.png')

        plt.figure(6)
        plt.plot(h_norm_array)
        plt.savefig('./HRF_Norm_CompMod.png')

        Data_save = xndarray(h_norm_array, ['Iteration'])
        Data_save.save('./HRF_Norm_Comp.nii')

    CompTime = t2 - t1
    cTimeMean = CompTime / ni

    sigma_M = np.sqrt(np.sqrt(sigma_M))
    logger.info("Nb iterations to reach criterion: %d", ni)
    logger.info("Computational time = %s min %s s", str(
        int(CompTime // 60)), str(int(CompTime % 60)))
    # print "Computational time = " + str(int( CompTime//60 ) ) + " min " + str(int(CompTime%60)) + " s"
    # print "sigma_H = " + str(sigmaH)
    logger.info('mu_M: %f', mu_M)
    logger.info('sigma_M: %f', sigma_M)
    logger.info("sigma_H = %s" + str(sigmaH))
    logger.info("Beta = %s" + str(Beta))

    StimulusInducedSignal = vt.computeFit(m_H, m_A, X, J, N)
    SNR = 20 * \
        np.log(
            np.linalg.norm(Y) / np.linalg.norm(Y - StimulusInducedSignal - PL))
    SNR /= np.log(10.)
    logger.info("SNR = %d", SNR)
    return ni, m_A, m_H, q_Z, sigma_epsilone, mu_M, sigma_M, Beta, L, PL, CONTRAST, CONTRASTVAR, cA[2:], cH[2:], cZ[2:], cAH[2:], cTime[2:], cTimeMean, Sigma_A, StimulusInducedSignal, FreeEnergyArray
def BMA_consensus_cluster_parallel(cfg, remote_path, remote_BOLD_fn, remote_mask_fn, Y, nifti_masker, \
                        num_vox, K_clus, K_clusters, \
                        parc, alpha, prop, nbItRFIR, onsets, durations,\
                        output_sub_parc, rescale=True, averg_bold=False):
    '''
    Performs all steps for one clustering case (Kclus given, number l of the parcellation given)
    remote_path: path on the cluster, where results will be stored
    '''
    import os
    import sys
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_BB/Parcellations/")
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_Adultes_Solv/")
    sys.path.append("/home/pc174679/pyhrf/pyhrf-tree_trunk/script/WIP/Scripts_IRMf_Adultes_Solv/Scripts_divers_utiles/Scripts_utiles/")
    sys.path.append('/home/pc174679/local/installations/consensus-cluster-0.6')
    
    from Random_parcellations import random_parcellations, subsample_data_on_time
    from Divers_parcellations_test import *
    
    from RFIR_evaluation_parcellations import JDE_estim, RFIR_estim, clustering_from_RFIR
    
    from Random_parcellations import hrf_roi_to_vox
    from pyhrf.tools._io import remote_copy, remote_mkdir
    from nisl import io
    
    #nifti_masker.mask=remote_mask_fn
    
    # Creation of the necessary paths --> do not do here
    parc_name = 'Subsampled_data_with_' + str(K_clus) + 'clusters' 
    parc_name_clus = parc_name + 'rnd_number_' + str(parc+1)
    
    remote_sub = os.sep.join((remote_path, parc_name))   
    #if not os.path.exists(remote_sub):
        #os.path.exists(remote_sub)
        #print 'remote_sub:', remote_sub
        #os.makedirs(remote_sub)
    remote_sub_parc = os.sep.join((remote_sub,parc_name_clus))   
    #if not os.path.exists(remote_sub_parc):
        #os.makedirs(remote_sub_parc)
    
    output_RFIR_parc = os.sep.join((output_sub_parc,'RFIR_estim'))
    
    ###################################
    ## 1st STEP: SUBSAMPLING
    print '--- Subsample data ---'
    Ysub = subsample_data_on_time(Y, remote_mask_fn, K_clus, alpha, prop, \
                    nifti_masker, rescale=rescale)
    print 'Ysub:', Ysub
    print 'remote_sub_prc:', remote_sub_parc
    Ysub_name = 'Y_sub_'+ str(K_clus) + 'clusters_' + 'rnd_number_' + str(parc+1) +'.nii'
    Ysub_fn = os.sep.join((remote_sub_parc, Ysub_name))
    Ysub_masked = nifti_masker.inverse_transform(Ysub).get_data()
    write_volume(Ysub_masked, Ysub_fn)                        
    
    
    
    ###################################
    ## 2D STEP: RFIR
    print '--- Performs RFIR estimation ---'

    
    remote_RFIR_parc_clus = os.sep.join((remote_sub_parc, 'RFIR_estim'))
    #if not os.path.exists(remote_RFIR_parc):os.makedirs(remote_RFIR_parc)
    #remote_RFIR_parc_clus = os.sep.join((remote_RFIR_parc, parc_name_clus))
    #if not os.path.exists(remote_RFIR_parc_clus):os.makedirs(remote_RFIR_parc_clus)
    
    print '  * output path for RFIR ', remote_RFIR_parc_clus
    print '  * RFIR for subsampling nb ', str(parc+1), ' with ', K_clus, ' clusters' 
    RFIR_estim(nbItRFIR, onsets, durations, Ysub_fn, remote_mask_fn, \
                remote_RFIR_parc, avg_bold=averg_bold) 
                  
    hrf_fn = os.sep.join((remote_RFIR_parc_clus, 'rfir_ehrf.nii'))
    #remote_copy([hrf_fn], remote_host, 
                #remote_user, remote_path)[0]
    
    ###################################
    ## 3D STEP: CLUSTERING FROM RFIR RESULTS
    name_hrf = 'rfir_ehrf.nii'
    
    from pyhrf.tools._io import write_volume, read_volume
    from pyhrf.tools._io import read_volume, write_volume
    import nisl.io as ionisl
    from sklearn.feature_extraction import image
    from sklearn.cluster import WardAgglomeration
    from scipy.spatial.distance import cdist, pdist
    
    hrf_fn = os.sep.join((remote_RFIR_parc_clus,name_hrf))
    hrf=read_volume(hrf_fn)[0]
    hrf_t_fn = add_suffix(hrf_fn, 'transpose')
    #taking only 1st condition to parcellate
    write_volume(hrf[:,:,:,:,0], hrf_t_fn)
    
    nifti_masker = ionisl.NiftiMasker(remote_mask_fn)
    Nm = nifti_masker.fit(hrf_t_fn)
    
    #features: coeff of the HRF
    HRF = Nm.fit_transform(hrf_t_fn)
    
    mask, meta_data = read_volume(remote_mask_fn)
    shape = mask.shape
    connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],
            n_z=shape[2], mask=mask)
            
    #features used for clustering
    features = HRF.transpose()

    ward = WardAgglomeration(n_clusters=K_clus, connectivity=connectivity,
                                memory='nisl_cache')
    ward.fit(HRF)
    labels_tot = ward.labels_+1 
        
        
    #Kelbow, Perc_WSS, all_parc_from_RFIR_fns, all_parc_RFIR = \
    #clustering_from_RFIR(K_clusters, remote_RFIR_parc_clus, remote_mask_fn, name_hrf, plots=False)
    #labels_tot = all_parc_RFIR[str(Kelbow)]
    
    #to retrieve clustering with as many clusters as determined in K_clusters
    #labels_tot = all_parc_RFIR[str(K_clus)]
    #Parcellation retrieved: for K=Kelbow
    #clusters_RFIR_fn = all_parc_from_RFIR[str(Kelbow)]
    #clustering_rfir_fn = os.path.join(remote_RFIR_parc_clus, 'output_clustering_elbow.nii')
    #write_volume(read_volume(clusters_RFIR_fn)[0], clustering_rfir_fn, meta_bold)

    #labels_tot = nifti_masker.fit_transform([clusters_RFIR_fn])[0]
    #labels_tot = read_volume(clusters_RFIR_fn)[0]
    
    #labels_name='labels_' + str(int(K_clus)) + '_' + str(parc+1) + '.pck'
    #name_f = os.sep.join((remote_sub_parc, labels_name))
    #pickle_labels=open(name_f, 'w')
    #cPickle.dump(labels_tot,f)
    #pickle_labels.close()
    
    #remote_copy(pickle_labels, remote_user, 
            #remote_host, output_sub_parc)
    
    #################################
    ## Prepare consensus clustering
    print 'Prepare consensus clustering'
    clustcount, totalcount = upd_similarity_matrix(labels_tot)
    print 'results:', clustcount
    
    return clustcount.astype(np.bool)
Beispiel #31
0
def parcellation_for_jde(fmri_data, avg_parcel_size=250, output_dir=None,
                         method='gkm', glm_drift='Cosine', glm_hfcut=128):
    """
    method: gkm, ward, ward_and_gkm
    """

    if output_dir is None:
        output_dir = tempfile.mkdtemp(prefix='pyhrf_JDE_parcellation_GLM',
                                      dir=pyhrf.cfg['global']['tmp_path'])
    glm_output_dir = op.join(output_dir, 'GLM_for_parcellation')
    if not op.exists(glm_output_dir): os.makedirs(glm_output_dir)

    pyhrf.verbose(1, 'GLM for parcellation')

    # if fmri_data.data_type == 'volume':
    #     paradigm_file, bold_file, mask_file = fmri_data.save(glm_output_dir)
    #     beta_files = glm_nipy_from_files(bold_file, fmri_data.tr, paradigm_file,
    #                                      glm_output_dir, mask_file,
    #                                      drift_model=glm_drift, hfcut=glm_hfcut)
    # elif fmri_data.data_type == 'surface':
    #     beta_files = glm_nipy(fmri_data, glm_output_dir,
    #                           drift_model=glm_drift, hfcut=glm_hfcut)

    g, dm, cons = glm_nipy(fmri_data, drift_model=glm_drift, hfcut=glm_hfcut)

    pval_files = []
    if cons is not None:
        func_data = [('con_pval_%s' %cname, con.pvalue()) \
                         for cname, con in cons.iteritems()]
    else:
        reg_cst_drift = re.compile(".*constant.*|.*drift.*")
        func_data = [('beta_%s' %reg_name, g.beta[ir]) \
                         for ir,reg_name in enumerate(dm.names) \
                         if not reg_cst_drift.match(reg_name)]

    for name, data in func_data:
        val_vol = expand_array_in_mask(data, fmri_data.roiMask>0)
        val_fn = op.join(glm_output_dir, '%s.nii' %name)
        write_volume(val_vol, val_fn, fmri_data.meta_obj)
        pval_files.append(val_fn)

    mask_file = op.join(glm_output_dir,'mask.nii')
    write_volume(fmri_data.roiMask>0, mask_file, fmri_data.meta_obj)

    nvox = fmri_data.get_nb_vox_in_mask()
    nparcels = round_nb_parcels(nvox * 1. / avg_parcel_size)

    pyhrf.verbose(1, 'Parcellation from GLM outputs, method: %s, ' \
                      'nb parcels: %d' %(method, nparcels))

    if fmri_data.data_type == 'volume':
        parcellation_file = op.join(output_dir, 'parcellation_%s_np%d.nii'
                                    %(method, nparcels))

        make_parcellation_from_files(pval_files, mask_file, parcellation_file,
                                     nparcels, method)
        parcellation,_ = read_volume(parcellation_file)
    else:
        mesh_file = fmri_data.data_files[-1]
        parcellation_file = op.join(output_dir, 'parcellation_%s_np%d.gii'
                                    %(method, nparcels))
        make_parcellation_surf_from_files(pval_files, mesh_file,
                                          parcellation_file, nparcels, method,
                                          verbose=1)
        parcellation,_ = read_texture(parcellation_file)
    #print parcellation_file


    pyhrf.verbose(1, parcellation_report(parcellation))

    return parcellation, parcellation_file
Beispiel #32
0
def project_fmri_from_kernels(input_mesh, kernels_file, fmri_data_file,
                              output_tex, bin_threshold=None, ):

    pyhrf.verbose(2,'Project data onto mesh using kernels ...')

    if 0:
        print 'Projecting ...'
        print 'func data:', fmri_data_file
        print 'Mesh file:', input_mesh
        print 'Save as:', output_tex

    pyhrf.verbose(2,'Call AimsFunctionProjection -op 1 ...')    

    data_files = []
    output_texs = []
    p_ids = None
    if bin_threshold is not None:
        d,h = read_volume(fmri_data_file)
        if np.allclose(d.astype(int), d):
            tmp_dir = pyhrf.get_tmp_path()
            p_ids = np.unique(d)
            pyhrf.verbose(2, 'bin threshold: %f' %bin_threshold)
            pyhrf.verbose(2, 'pids(n=%d): %d...%d' \
                              %(len(p_ids),min(p_ids),max(p_ids)))
            for i,p_id in enumerate(p_ids):
                if p_id != 0:
                    new_p = np.zeros_like(d)
                    new_p[np.where(d==p_id)] = i + 1 #0 is background
                    ifn = op.join(tmp_dir,'pmask_%d.nii'%p_id)
                    write_volume(new_p, ifn, h)
                    data_files.append(ifn)
                    ofn = op.join(tmp_dir,'ptex_%d.gii'%p_id)
                    output_texs.append(ofn)
        else:
            data_files.append(fmri_data_file)
            output_texs.append(output_tex)
    else:
        data_files.append(fmri_data_file)
        output_texs.append(output_tex)

    pyhrf.verbose(3, 'input data files: %s' %str(data_files))
    pyhrf.verbose(3, 'output data files: %s' %str(output_texs))

    for data_file, o_tex in zip(data_files, output_texs):
        projection = [ 
            'AimsFunctionProjection', 
            '-op', '1',
            '-d', kernels_file,
            '-d1', data_file,
            '-m', input_mesh,
            '-o', o_tex
            ]

        cmd = ' '.join(map(str,projection))
        pyhrf.verbose(3, 'cmd: %s' %cmd)
        os.system(cmd)

    if bin_threshold is not None:
        pyhrf.verbose(2, 'Binary threshold of texture at %f' %bin_threshold)
        o_tex = output_texs[0]
        data,data_gii = read_texture(o_tex)
        data = (data>bin_threshold).astype(np.int32)
        print 'data:', data.dtype
        if p_ids is not None:
            for pid, o_tex in zip(p_ids[1:], output_texs[1:]):
                pdata,pdata_gii = read_texture(o_tex)
                data += (pdata>bin_threshold).astype(np.int32) * pid

        #assert (np.unique(data) == p_ids).all()
        write_texture(data, output_tex, intent='NIFTI_INTENT_LABEL')