Exemple #1
0
        def run_fast_ecm(nuisance_file, gm_mask, string):
            if not os.path.isfile(os.path.join(subject_dir, 'FAST_ECM',string, 'zscore_fastECM.nii.gz')):
                #create output dir
                dir =  os.path.join(subject_dir,'FAST_ECM', string)
                mkdir_path(dir)
                os.chdir(dir)

                #copy nuisance file locally
                shutil.copy(nuisance_file, './residual.nii.gz')

                #gunzip
                if not os.path.isfile('residual.nii'):
                    os.system('gunzip residual.nii.gz')
                    os.system('rm -rf residual.nii.gz')

                # run Fast ECM
                pproc = os.path.join(dir, 'residual.nii')
                matlab_cmd = ['matlab',  '-version', '8.2', '-nodesktop' ,'-nosplash'  ,'-nojvm' ,'-r "fastECM(\'%s\', \'1\', \'1\', \'1\', \'20\', \'%s\') ; quit;"' %(pproc, gm_mask)]
                print '    ... Running ECM'
                subprocess.call(matlab_cmd)

                def z_score_centrality(image, outname):

                    print '    ... z-scoring %s'%outname
                    # zscore fastECM image
                    std  = commands.getoutput('fslstats %s -k %s -s | awk \'{print $1}\''%(image, group_gm_mask))
                    mean = commands.getoutput('fslstats %s -k %s -m | awk \'{print $1}\''%(image, group_gm_mask))
                    os.system('fslmaths %s -sub %s -div %s -mas %s %s'%(image, mean, std, group_gm_mask, outname))

                z_score_centrality('residual_fastECM.nii', 'zscore_fastECM')
                z_score_centrality('residual_degCM.nii'  , 'zscore_degCM')
                z_score_centrality('residual_normECM.nii', 'zscore_normECM')
                z_score_centrality('residual_rankECM.nii', 'zscore_rankECM')
Exemple #2
0
        def create_svs_masks(voxel_name):
            #matlab related defenitions.... check RDA2NII.m file
            anatomical_dir     = os.path.join(subject_workspace, 'anatomical_original')
            T1Path     = os.path.join(subject_workspace, 'anatomical_original' + '/')
            T1Image    = 'ANATOMICAL.nii'
            svs_path   = os.path.join(subject_workspace, 'svs_rda', voxel_name, 'met' + '/')
            svs_file   = '%s%s_%s_SUPPRESSED.rda' %(subject ,workspace_dir[-10:-9], voxel_name)

            #  output dir
            mkdir_path(os.path.join(subject_workspace, 'svs_voxel_mask'))
            mask_dir = os.path.join(subject_workspace, 'svs_voxel_mask')

            #run matlab code to create registered mask from rda file
            matlab_command = ['matlab',  '--version', '8.2', '-nodesktop' ,'-nosplash'  ,'-nojvm' ,'-r "RDA_TO_NIFTI(\'%s\', \'%s\', \'%s\', \'%s\') ; quit;"'
                               %(T1Path, T1Image, svs_path, svs_file)]

            if not os.path.isfile(os.path.join(mask_dir, '%s%s_%s_RDA_MASK.nii' %(subject,workspace_dir[-10:-9], voxel_name))):
                print '..... extracting geometry from RDA and creating mask for %s'%voxel_name
                subprocess.call(matlab_command)

            for file in os.listdir(anatomical_dir):
                        if 'rda' in file and '%s'%voxel_name in file:
                            shutil.move(os.path.join(anatomical_dir, file),
                                        os.path.join(mask_dir,  '%s%s_%s_RDA_MASK.nii' %(subject,workspace_dir[-10:-9], voxel_name)))
                        elif 'coord' in file:
                            shutil.move(os.path.join(anatomical_dir, file),
                                        os.path.join(mask_dir, '%s%s_%s_RDA_coord.txt' %(subject,workspace_dir[-10:-9], voxel_name)))
            else:
                print '%s SVS mask already created..... moving on'%voxel_name

            print '========================================================================================'
def dicom_convert(population, data_dir, workspace_dir):

    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT %s %s' %(data_dir[12:19], workspace_dir[-8:])
    print ''
    print '#############################################################################'

    count=0
    for subject in population:
        count +=1
        print '====================================================================='
        print '%s- DICOM CONVERSION for %s' %(count,subject)

        # define dicom directory for each subject
        dicom_dir  = os.path.join(data_dir, subject, 'DICOM')

        # define destination directory for NIFTI outputs
        mkdir_path(os.path.join(workspace_dir, subject, 'anatomical_original'))
        out_nifti_dir  = str(os.path.join(workspace_dir, subject, 'anatomical_original'))

        if not os.path.isfile(os.path.join(out_nifti_dir, 'ANATOMICAL.nii')):
            # create a list of all dicoms with absolute paths for each file
            dicom_list = []
            for dicom in os.listdir(dicom_dir):
                dicomstr = os.path.join(dicom_dir, dicom)
                dicom_list.append(dicomstr)

            # grab SeriesDescription and append T1 files to list
            T1_list = []
            print 'Reading dicom series descriptions'
            for dicom in dicom_list:
                try:
                    dcm_read = pydicom.read_file(dicom, force = True)
                    sequence = dcm_read.SeriesDescription
                except AttributeError:
                    continue

                if 'mp2rage_p3_602B_UNI_Images' in sequence:
                    T1_list.append(dicom)

            # convert T1 anatomical to NIFTI with SPM
            print 'Converting Dicom to Nifti for %s' %subject
            spm_dicom_convert                   = spmu.DicomImport()
            spm_dicom_convert.inputs.format     = 'nii'
            spm_dicom_convert.inputs.in_files   = T1_list
            spm_dicom_convert.inputs.output_dir = out_nifti_dir
            spm_dicom_convert.run()

            #rename output file
            for file in os.listdir(out_nifti_dir):
                if file.endswith('nii'):
                    os.rename(str(os.path.join(out_nifti_dir, file)),
                              str(os.path.join(out_nifti_dir, 'ANATOMICAL.nii')))
        else:
            print 'subject already processed.......moving on'

        print '====================================================================='
        print ''
def calc_ecm(population, workspace_dir):

    # all_gm = []
    #
    # if not os.path.isfile(os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK.nii.gz')):
    #     print 'Creating Group GM mask'
    #     for subject in population:
    #
    #         # input and output folders
    #         subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
    #         out_mask        = os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK.nii.gz')
    #         MNI2mm_gm   = os.path.join(subject_dir , 'anatomical_MNI2mm_tissue_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_warp_thresh.nii.gz')
    #
    #         if os.path.isfile(MNI2mm_gm):
    #             all_gm.append(MNI2mm_gm)
    #
    #     maths_input = []
    #     for i in all_gm:
    #         x = '-add %s'%i
    #         maths_input.append(x)
    #
    #     maths_string = ' '.join(maths_input)[5:]
    #     os.system('fslmaths %s -mul /usr/share/fsl/data/standard/MNI152_T1_2mm_brain_mask.nii.gz %s'%(maths_string, out_mask))
    #
    #     out_mask_4mm = os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK_4mm.nii.gz')
    #     mni_4mm = '/SCR/ROI/brain_4mm.nii.gz'
    #     os.system('flirt -in %s -ref %s -out %s -applyisoxfm 4' %(out_mask, mni_4mm, out_mask_4mm ))

    for subject in population:
        print 'Running Subject %s'%subject

        # input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'ECM_LIPSIA')
        mkdir_path(outdir)
        os.chdir(outdir)

        # TRANSFORM NATIVE IMAGE TO MNI2mm
        mkdir_path( os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp'))
        pproc = os.path.join(subject_dir , 'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz')
        pproc_2mm  = os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/pproc.nii.gz')
        anat2mni  = os.path.join(subject_dir , 'anatomical_MNI2mm_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp.nii.gz')
        func2anat = os.path.join(subject_dir , 'functional_ANAT2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt.mat')
        if not os.path.isfile(pproc_2mm):
            if os.path.isfile(pproc):
                print '... Warping to MNI'
                os.system(' '.join([ 'applywarp',
                                 '--in='     +  pproc,
                                 '--ref='    +  '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz',
                                 '--out='    +  pproc_2mm,
                                 '--warp='   +  anat2mni,
                                 '--premat=' +  func2anat]))


        # Convert VISTA to NIFTI
        pproc_2mmv = os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/pproc.v')
        if not os.path.isfile(pproc_2mmv):
            print '...Converting NIFTI to VISTA.. make sure you are running this on telemann'
            os.system('isisconv -in %s -out %s' %(pproc_2mm, pproc_2mmv))
def calc_ecm(population, workspace_dir):

    for subject in population:
        print '####################################'
        print 'Running fast ECM Subject %s' % subject

        #############################################################
        #################### Input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'FAST_ECM_SCRUBBED')
        mkdir_path(outdir)
        os.chdir(outdir)

        pproc_2mm_scrubbed = os.path.join(
            subject_dir,
            'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_scrubbed.nii'
        )

        #############################################################
        #################### Run Fast ECM

        mask = '/SCR4/workspace/project_GluRest/OUT_DIR_A/GluConnectivity/GM2mm_bin.nii'

        if os.path.isfile(pproc_2mm_scrubbed):
            if not os.path.isfile(os.path.join(outdir, 'FAST_ECM.nii')):
                matlab_cmd = [
                    'matlab', '-version', '8.2', '-nodesktop', '-nosplash',
                    '-nojvm',
                    '-r "fastECM(\'%s\', \'1\', \'1\', \'1\', \'20\', \'%s\') ; quit;"'
                    % (pproc_2mm_scrubbed, mask)
                ]
                subprocess.call(matlab_cmd)

                shutil.move(
                    os.path.join(
                        subject_dir,
                        'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_fastECM.nii'
                    ), os.path.join(outdir, 'FAST_ECM.nii'))
                shutil.move(
                    os.path.join(
                        subject_dir,
                        'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_rankECM.nii'
                    ), os.path.join(outdir, 'RANK_ECM.nii'))
                shutil.move(
                    os.path.join(
                        subject_dir,
                        'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_normECM.nii'
                    ), os.path.join(outdir, 'NORM_ECM.nii'))
                shutil.move(
                    os.path.join(
                        subject_dir,
                        'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_degCM.nii'
                    ), os.path.join(outdir, 'DCM.nii'))
Exemple #6
0
def run_randomise_baseline(population_1, population_2, workspace_dir):

    print "#######################################"

    for string in ecm_strings:
        print "Running FSL-randomise on  ECM maps = %s" % string[16:]
        print ""
        ecm_list_1 = []
        ecm_list_2 = []
        subs_1 = []
        subs_2 = []

        for subject in population_1:
            ecm = os.path.join(workspace_dir, subject, "FAST_ECM/%s/zscore_fastECM.nii.gz" % string)
            if os.path.isfile(ecm):
                ecm_list_1.append(ecm)
                subs_1.append(subject)

        for subject in population_2:
            ecm = os.path.join(workspace_dir, subject, "FAST_ECM/%s/zscore_fastECM.nii.gz" % string)
            if os.path.isfile(ecm):
                ecm_list_2.append(ecm)
                subs_2.append(subject)

        print "...Concatenating ECM maps into 4D"
        # calculate group mean
        out_dir = os.path.join(workspace_dir, "STATISTICS/ECM/%s" % string)
        mkdir_path(out_dir)
        os.chdir(out_dir)
        if not os.path.isfile("ECM_all_concat.nii.gz"):
            os.system("fslmerge -t ECM_controls_concat.nii.gz %s" % " ".join(ecm_list_1))
            print "Controls n=%s" % len(subs_1), subs_1
            os.system("fslmerge -t ECM_patients_concat.nii.gz %s" % " ".join(ecm_list_2))
            print "Patients n=%s" % len(subs_2), subs_2
            print ""
            os.system("fslmaths ECM_controls_concat.nii.gz -Tmean ECM_controls_mean.nii.gz")
            os.system("fslmaths ECM_patients_concat.nii.gz -Tmean ECM_patients_mean.nii.gz")
            os.system("fslmerge -t ECM_all_concat.nii.gz ECM_controls_concat.nii.gz ECM_patients_concat.nii.gz")

        print "...Running Non-paramteric permuation tests"
        # Runs FSL Randomise - nonparametric permutation inference
        # Two-Sample Unpaired T-test

        group_gm_mask = "/SCR4/workspace/project_GluRest/OUT_DIR_A/GluConnectivity/GM2mm_bin.nii"

        glm_mat = os.path.join(workspace_dir, "STATISTICS/ECM/GLM_baseline/randomise_baseline.mat")
        glm_con = os.path.join(workspace_dir, "STATISTICS/ECM/GLM_baseline/randomise_baseline.con")
        if not os.path.isfile("randomise_baseline_tfce_corrp_tstat2.nii.gz"):
            os.system(
                "randomise -i ECM_all_concat.nii.gz -o randomise_baseline -d %s -t %s -T -R -P -N --uncorrp -n 5000 -m %s "
                % (glm_mat, glm_con, group_gm_mask)
            )
def return_fd_tsnr_dist(population, out_dir, pipeline_name):

    fd_means=[]
    tsnr_files = []
    mask_files =[]
    missing_subjects = []
    for subject in population:

        subject_dir = os.path.join(out_dir, pipeline_name, subject)
        mkdir_path(os.path.join(subject_dir, 'quality_control'))
        qc_dir = os.path.join(subject_dir, 'quality_control')
        subject_dir = os.path.join(out_dir, pipeline_name, subject)

        fd1d = os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D')
        if os.path.isfile(fd1d):
            fd_means.append(np.mean(np.genfromtxt(fd1d)))

        else:
            print subject,'has no fd1d'
            missing_subjects.append(subject)

        os.chdir(qc_dir)
        pp_file = os.path.join(subject_dir, 'functional_native_brain_preproc/REST_calc_resample_corrected_volreg_maths_brain.nii.gz')

        tsnr_file = os.path.join(qc_dir,'REST_calc_resample_corrected_volreg_maths_brain_tsnr.nii.gz')
        mask_file = os.path.join(subject_dir, 'functional_native_brain_preproc_mask/REST_calc_resample_corrected_volreg_maths_brain_mask.nii.gz')

        if os.path.isfile(tsnr_file):
            tsnr_files.append(tsnr_file)
            mask_files.append(mask_file)
        else:
            if os.path.isfile(pp_file):
                tsnr = TSNR()
                tsnr.inputs.in_file =  pp_file
                res = tsnr.run()
                tsnr_files.append(res.outputs.tsnr_file)
            else:
                print subject,'has no functional_native_preproc'



    tsnr_distributions = volumes.get_median_distribution(tsnr_files, mask_files)
    population_fd_means = fd_means


    np.savetxt(os.path.join(out_dir, 'GluConnectivity', 'population_fd_distributions.txt'), population_fd_means)
    np.savetxt(os.path.join(out_dir, 'GluConnectivity', 'population_tsnr_distributions.txt'), tsnr_distributions)

    print 'FD mean=', population_fd_means
    print 'TSNR_distribution=', tsnr_distributions
    print ''
Exemple #8
0
def calc_ecm(population, workspace_dir):

    for subject in population:

        print 'Running Subject %s' % subject

        # input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'ECM')
        mkdir_path(outdir)
        os.chdir(outdir)

        #inputdata
        pproc_4mm = os.path.join(
            subject_dir,
            'functional_MNI4mm_preproc_FWHM_AROMA_residual_high_pass/bandpassed_demeaned_filtered.nii.gz'
        )
        if os.path.isfile(pproc_4mm):
            # bp 4mm pproc data
            if not os.path.isfile(
                    os.path.join(subject_dir,
                                 'ECM/bandpassed_demeaned_filtered.nii.gz')):
                bp_file = bandpass_voxels(pproc_4mm, (0.01, 0.1))

            MNI2mm_gm = os.path.join(
                subject_dir,
                'anatomical_MNI2mm_tissue_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_warp_thresh.nii.gz'
            )
            MNI4mm_gm = os.path.join(subject_dir, 'ECM/mask_4mm.nii.gz')
            MNI4mm_gm_bin = os.path.join(subject_dir,
                                         'ECM/mask_4mm_bin.nii.gz')
            os.system('flirt -in %s -ref %s -out %s -applyxfm' %
                      (MNI2mm_gm, pproc_4mm, MNI4mm_gm))
            os.system('fslmaths %s -thr 0.3 -bin %s' %
                      (MNI4mm_gm, MNI4mm_gm_bin))

            if not os.path.isfile(
                    os.path.join(
                        subject_dir,
                        'ECM/resting_state_graph/calculate_centrality/eigenvector_centrality_binarize.nii.gz'
                    )):
                print '...computing ecm'
                ecm_graph = create_resting_state_graphs()
                ecm_graph.inputs.inputspec.method_option = 1
                ecm_graph.inputs.inputspec.subject = bp_file
                ecm_graph.inputs.inputspec.template = mask
                ecm_graph.inputs.inputspec.threshold_option = 0
                ecm_graph.inputs.inputspec.threshold = 0.001
                ecm_graph.inputs.inputspec.weight_options = [True, True]
                ecm_graph.base_dir = outdir
                ecm_graph.run()
def make_r1_surf(population, workspace, freesurfer_dir):

    for subject in population:
        print '========================================================================================'
        print '%s-Preprocessing T1MAPSfor %s' %(subject, subject)

        # I/O
        T1MAPS = os.path.join(workspace, subject, 'RAW', 'T1MAPS.nii.gz')
        T1MGZ  = os.path.join(freesurfer_dir, subject, 'mri', 'T1.mgz')
        mask   = os.path.join(workspace, subject, 'ANATOMICAL','ANATOMICAL_BRAIN_MASK.nii.gz')
        t1_dir = mkdir_path(os.path.join(workspace, subject, 'T1MAPS'))

        os.chdir(t1_dir)

        if not os.path.isfile(os.path.join(t1_dir, 'R1.mgz')):
            # Deskull
            os.system('fslmaths %s -mul %s T1MAPS_brain.nii.gz '%(T1MAPS, mask))
            # Swapdim
            os.system('fslswapdim T1MAPS_brain LR SI PA T1MAPS_brain_rsp')
            # get fs t1
            os.system('mri_convert %s T1mgz.nii.gz'%T1MGZ)
            # reg
            os.system('flirt -in T1MAPS_brain_rsp -ref T1mgz -dof 6 -cost mutualinfo -out T1MAPS_fs -omat NATIVE2FS.mat')

            #get reciprocal
            os.system('fslmaths T1MAPS_fs -recip -mul 10000 R1')
            os.system('mri_convert R1.nii.gz R1.mgz')

        #mri_vol2surf --mov R1.mgz --regheader LZ050 --projfrac 0.2 0.4 0.1 --hemi --interp nearest --out depth2.mgh
        #mri_surf2surf --s LZ050 --sval depth2.mgh --trgsubject fsaverage5 --tval depth2_fs5.mgh --fwhm 6 --hemi lh --cortex


        proj_fracs = {'depth1': '0.0 0.2 0.1', 'depth2': '0.2 0.4 0.1','depth3': '0.4 0.6 0.1',
                      'depth4': '0.6 0.8 0.1', 'depth5': '0.8 1.0 0.1'}
        fwhm = 6

        # vol2surf iterate of five laminar layers
        if not os.path.isfile(os.path.join(t1_dir, '%s_depth5_rh_R1.mgh' % subject)):
            for hemi in ['lh', 'rh']:
                for depth in proj_fracs.keys():
                    print hemi, proj_fracs

                    os.system('mri_vol2surf --mov R1.mgz --regheader %s --projfrac-avg %s --interp nearest --hemi %s '
                              '--out %s_%s_%s_R1.mgh '
                              %(subject, proj_fracs[depth], hemi,
                                subject, depth, hemi,
                                ))

                    os.system('mri_surf2surf --s %s --sval  %s_%s_%s_R1.mgh --trgsubject fsaverage5 '
                              '--tval %s_%s_%s_fs5_R1.mgh --hemi %s --noreshape --cortex --fwhm %s '
                              %(subject,
                                subject, depth, hemi,
                                subject, depth, hemi,
                                hemi,
                                fwhm,
                                ))
Exemple #10
0
        def get_spectra(voxel_name, string1, string2, string3):

            rda_met = []
            rda_h2o = []
            twix_met= []
            twix_h2o= []

            for root, dirs, files, in os.walk(subject_afs, topdown= False):
                for name in files:
                    if 'SUPP' in name:
                        if string1 in name or string2 in name or string3 in name:
                            rda_met.append(os.path.join(root, name))

                    if 'HEAD' in name and 'REF' in name and 'SUPP' not in name and 'meas' not in name:
                        if string1 in name or string2 in name or string3 in name:
                            rda_h2o.append(os.path.join(root, name))

                    if 'meas' in name and 'rda' not in name:
                       if string1 in name or string2 in name or string3 in name:
                            if 'ref' not in name and 'head' not in name and 'body' not in name and 'HEAD' not in name:
                                twix_met.append(os.path.join(root, name))
                            elif 'ref' in name or 'REF' in name:
                                if 'body' not in name:
                                    twix_h2o.append(os.path.join(root, name))

            print rda_met

            if rda_met is []:
                print 'RDA metabolite data does not exist for subject %s' %subject
            elif rda_h2o is []:
                print 'RDA water data does not exist for subject %s' %subject
            elif twix_met is []:
                print 'TWIX metabolite data does not exist for subject %s' %subject
            elif twix_h2o is []:
                print 'TWIX water data does not exist for subject %s' %subject

            mkdir_path(os.path.join(subject_workspace, 'svs_rda', voxel_name, 'met'))
            mkdir_path(os.path.join(subject_workspace, 'svs_rda', voxel_name, 'h2o'))
            mkdir_path(os.path.join(subject_workspace, 'svs_twix', voxel_name, voxel_name))
            mkdir_path(os.path.join(subject_workspace, 'svs_twix', voxel_name, '%s_w'%voxel_name))

            rda_met_dir = os.path.join(workspace_dir, subject, 'svs_rda', voxel_name, 'met')
            rda_h2o_dir = os.path.join(workspace_dir, subject, 'svs_rda', voxel_name, 'h2o')
            twx_met_dir = os.path.join(workspace_dir, subject, 'svs_twix', voxel_name, voxel_name)
            twx_h2o_dir = os.path.join(workspace_dir, subject, 'svs_twix', voxel_name, '%s_w'%voxel_name)

            shutil.copy(rda_met[0], os.path.join(rda_met_dir, '%s%s_%s_SUPPRESSED.rda' %(subject,  workspace_dir[-10:-9], voxel_name)))
            shutil.copy(rda_h2o[0], os.path.join(rda_h2o_dir, '%s%s_%s_WATER.rda' %(subject, workspace_dir[-10:-9], voxel_name)))
            shutil.copy(twix_met[0], os.path.join(twx_met_dir, '%s%s_%s_SUPPRESSED_TWIX.dat' %(subject,  workspace_dir[-10:-9], voxel_name)))
            shutil.copy(twix_h2o[0], os.path.join(twx_h2o_dir, '%s%s_%s_WATER_TWIX.dat' %(subject,workspace_dir[-10:-9], voxel_name)))

            print '.....done voxel %s'%voxel_name
def run_CanICA_NILEAN(output_dir, working_dir, population, n_components=20):

    #create group ica outputdir
    mkdir_path(os.path.join(working_dir, 'CANICA_GROUP_ICA'))
    canica_dir = os.path.join(working_dir, 'CANICA_GROUP_ICA')

    # grab subjects
    preprocssed_all = []
    for subject in population:
        preprocssed_subject = os.path.join(output_dir, subject, 'xxxx.nii.gz')
        preprocssed_all.append(preprocssed_subject)

    canica = CanICA(n_components=n_components,
                    smoothing_fwhm=0.,
                    memory='nilearn_cashe',
                    memory_level=5,
                    threshold=3.,
                    verbose=10,
                    random_state=10)
    canica.fit(preprocssed_all)

    # save data
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(os.path.join(canica_dir, 'canica_IC.nii.gz'))
def run_CanICA_NILEAN(output_dir, working_dir, population, n_components = 20):

    #create group ica outputdir
    mkdir_path(os.path.join(working_dir, 'CANICA_GROUP_ICA'))
    canica_dir = os.path.join(working_dir, 'CANICA_GROUP_ICA')

    # grab subjects
    preprocssed_all =[]
    for subject in population:
        preprocssed_subject = os.path.join(output_dir, subject, 'xxxx.nii.gz')
        preprocssed_all.append(preprocssed_subject)

    canica = CanICA(n_components=n_components,
                    smoothing_fwhm= 0.,
                    memory= 'nilearn_cashe',
                    memory_level= 5,
                    threshold = 3.,
                    verbose = 10,
                    random_state=10)
    canica.fit(preprocssed_all)

    # save data
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(os.path.join(canica_dir, 'canica_IC.nii.gz'))
def calc_ecm(population, workspace_dir):

    for subject in population:

        print 'Running Subject %s'%subject

        # input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'ECM')
        mkdir_path(outdir)
        os.chdir(outdir)

        #inputdata
        pproc_4mm = os.path.join(subject_dir , 'functional_MNI4mm_preproc_FWHM_AROMA_residual_high_pass/bandpassed_demeaned_filtered.nii.gz')
        if os.path.isfile(pproc_4mm):
            # bp 4mm pproc data
            if not os.path.isfile(os.path.join(subject_dir, 'ECM/bandpassed_demeaned_filtered.nii.gz')):
                bp_file = bandpass_voxels(pproc_4mm, (0.01,0.1))

            MNI2mm_gm =  os.path.join(subject_dir , 'anatomical_MNI2mm_tissue_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_warp_thresh.nii.gz')
            MNI4mm_gm =  os.path.join(subject_dir , 'ECM/mask_4mm.nii.gz')
            MNI4mm_gm_bin = os.path.join(subject_dir , 'ECM/mask_4mm_bin.nii.gz')
            os.system('flirt -in %s -ref %s -out %s -applyxfm'%(MNI2mm_gm, pproc_4mm, MNI4mm_gm))
            os.system('fslmaths %s -thr 0.3 -bin %s' %(MNI4mm_gm,MNI4mm_gm_bin))

            if not os.path.isfile(os.path.join(subject_dir, 'ECM/resting_state_graph/calculate_centrality/eigenvector_centrality_binarize.nii.gz')):
                print '...computing ecm'
                ecm_graph = create_resting_state_graphs()
                ecm_graph.inputs.inputspec.method_option     = 1
                ecm_graph.inputs.inputspec.subject           = bp_file
                ecm_graph.inputs.inputspec.template          = mask
                ecm_graph.inputs.inputspec.threshold_option  = 0
                ecm_graph.inputs.inputspec.threshold         = 0.001
                ecm_graph.inputs.inputspec.weight_options    = [True, True]
                ecm_graph.base_dir                           = outdir
                ecm_graph.run()
def run_group_ica(output_dir, working_dir, population, pipeline_name):

    print '#############################################################################'
    print ''
    print '                          RUNNNING GROUP ICA'
    print ''
    print '#############################################################################'

    #define ica outputdir
    group_ica_dir = os.path.join(working_dir, 'MELODIC_GROUP_ICA')
    mkdir_path(group_ica_dir)

    # concatenate fmri preprocessed and high-pass filtered data (0.001hz) for group ICA
    preprocssed_all =[]
    for subject in population:
        preprocssed_subject = os.path.join(output_dir, pipeline_name, subject, 'functional_MNI4mm_preproc_FWHM_AROMA_residual_high_pass/bandpassed_demeaned_filtered.nii.gz')
        if os.path.isfile(preprocssed_subject):
            preprocssed_all.append(preprocssed_subject)
        else:
            print 'subjects with missing data ',subject
    data_4_melodic  = ','.join(preprocssed_all)

    #print data_4_melodic

    def run_melodic(func, brain_mask, TR = 1.4, melodic_dir= group_ica_dir):

        # Run MELODIC
        #os.system('melodic --in=%s --outdir=%s --mask=%s -Ostats --nobet --mmthresh=0.5 --report --tr=%s' %(func, melodic_dir, brain_mask, str(TR)))
        os.system(' '.join([ 'melodic',
                             '--in=' + func,
                             '--mask=' + brain_mask,
                             '-v',
			                 '-d 25',
                             '--outdir='  + melodic_dir,
                             '--Ostats --nobet --mmthresh=0.5 --report',
                             '--tr=' + str(TR)]))
        #if os.path.isfile(os.path.join(melodic_dir, 'melodic_IC.nii.gz')):
        # Get number of components
       	melodic_4d = nb.load(os.path.join(melodic_dir, 'melodic_IC.nii.gz'))
        n_componenets = melodic_4d.shape[3]

        for n_componenet in range(1,n_componenets):
            z_thresh = os.path.join(melodic_dir, 'stats/thresh_zstat%s.nii.gz'%n_componenet)

            cmd = ' '.join(['fslinfo', z_thresh, '| grep dim4 | head -n1 | awk \'{print $2}\''])
            z_thresh_dim4 = int(float(commands.getoutput(cmd)))

            # Zero-pad the IC number and extract the 3D data........
            # For cases where the mixture modeling does not converge, 2nd img in the 4th dimension wil be the results of the null hypothesis test.
            cmd = ' '.join(['zeropad', str(n_componenet), '4'])
            z_thresh_zeropad = os.path.join(melodic_dir,'thr_zstat' + commands.getoutput(cmd))

            # Extract last spatial map within the thresh_zstat file
            os.system('fslroi %s %s %s 1' %(z_thresh, z_thresh_zeropad, str(z_thresh_dim4-1)))

        # Merge and subsequently remove all mixture modeled Z-maps within the output directory
        z_thresh_zeropad_all = os.path.join(melodic_dir, 'thr_zstat????.nii.gz')
        z_thresh_merged = os.path.join(melodic_dir, 'melodic_IC_thr.nii.gz')

        os.system('fslmerge -t %s %s ' %(z_thresh_merged , z_thresh_zeropad_all))
        os.system('rm ' + z_thresh_zeropad_all)

        # Apply the mask to the merged file (in case a melodic-directory was predefined and run with a different mask)
        os.system('fslmaths %s -mas %s %s' %(z_thresh_merged, brain_mask, z_thresh_merged))

    run_melodic(data_4_melodic, mni_brain_mask_4mm, TR = 1.4, melodic_dir = group_ica_dir)
def quantiation_correction(population, workspace_dir, analysis_type):

    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT NMR-093%s %s' % (
        workspace_dir[-10:-9], workspace_dir[-8:])
    print ''
    print '#############################################################################'

    # output dir
    mkdir_path(os.path.join(workspace_dir[:-8], 'group_statistics'))
    results_dir = os.path.join(workspace_dir[:-8], 'group_statistics')

    def save_reliable_concentrations(population, workspace_dir, voxel_name,
                                     analysis_type):

        csv_list = []
        for subject in population:
            # get metabolite data for each subject and append to a list
            csv = os.path.join(workspace_dir, subject,
                               'lcmodel_%s' % analysis_type, voxel_name,
                               'spreadsheet.csv')
            if os.path.isfile(csv):
                reader = pd.read_csv(csv)
                reader.insert(0, 'Subject', subject)
                csv_list.append(reader)

        # creat a dataframe and place reliable metabolite data for every subject
        df = pd.concat(csv_list, ignore_index=True)
        reliable = df.loc[:, [
            'Subject', ' Cre', ' Cre %SD', ' GPC+PCh', ' GPC+PCh %SD',
            ' NAA+NAAG', ' NAA+NAAG %SD', ' mI', ' mI %SD', ' Glu', ' Glu %SD',
            ' Gln', ' Gln %SD', ' Glu+Gln', ' Glu+Gln %SD'
        ]]

        # sort subjects alphabetically and reset index....
        reliable.sort(columns='Subject', inplace=True)
        reliable.reset_index(drop=True, inplace=True)

        # save reliable dataframe
        reliable.to_csv(
            os.path.join(
                results_dir, 'lcmodel_%s_%s_%s_%s.csv' %
                (analysis_type, voxel_name, workspace_dir[-8:],
                 workspace_dir[-10:-9])))

        return reliable

    print '1. Creating new dataframe with reliable LC-Model concentrations for ACC,THA,STR'
    acc_reliable = save_reliable_concentrations(population, workspace_dir,
                                                'ACC', analysis_type)
    tha_reliable = save_reliable_concentrations(population, workspace_dir,
                                                'THA', analysis_type)
    str_reliable = save_reliable_concentrations(population, workspace_dir,
                                                'STR', analysis_type)

    def save_tissue_proportions(population, workspace_dir, voxel_name):
        list_spm = []

        for subject in population:
            # grab tissue proportion data for all subjects and dump into list
            spm = pd.read_csv(os.path.join(
                workspace_dir, subject, 'svs_voxel_stats',
                '%s_voxel_statistics_spm.txt' % voxel_name),
                              header=None)
            spm.insert(0, 'SUBJECT', subject)
            list_spm.append(spm)

        # create concatenated dataframe for all tissue data in list
        df_spm = pd.concat(list_spm, ignore_index=True)
        df_spm.columns = [
            'SUBJECT',
            '%s_GM' % voxel_name,
            '%s_WM' % voxel_name,
            '%s_CSF' % voxel_name,
            '%s_SUM' % voxel_name
        ]

        df_spm.to_csv(
            os.path.join(
                results_dir, 'proportions_%s_%s_%s.csv' %
                (voxel_name, workspace_dir[-8:], workspace_dir[-10:-9])))
        return df_spm

    print '2. Creating new dataframe with SPM tissue proportions for ACC,THA,STR'
    acc_props = save_tissue_proportions(population, workspace_dir, 'ACC')
    tha_prpos = save_tissue_proportions(population, workspace_dir, 'THA')
    str_props = save_tissue_proportions(population, workspace_dir, 'STR')

    def calc_asbolute(lcmodel, frac_gm, frac_wm, frac_csf):

        import math

        #lcmodel correction factor
        factor = (55.55 / (35.88 * 0.7))

        # relative water content in tissue.. determined experimentally.
        alpha_gm = 0.81  # 0.78
        alpha_wm = 0.71  # 0.65
        alpha_csf = 1.0  # 1.0

        #attentuation factor for water
        R_H2O_GM = (1.0 - math.e**(-3000.0 / 1820.0)) * math.e**(-30.0 / 99.0)
        R_H2O_WM = (1.0 - math.e**(-3000.0 / 1084.0)) * math.e**(-30.0 / 69.0)
        R_H2O_CSF = (1.0 - math.e**(-3000.0 / 4163.0)) * math.e**(-30.0 /
                                                                  503.0)

        #########  Correction Equations  #######
        # tissel equation
        Cmet1 = (lcmodel * (((frac_csf * 1. * (1. - frac_csf)) +
                             (frac_gm * 0.81 + frac_wm * 0.71)) /
                            (1. - frac_csf)))

        # gusseuw equation
        Cmet2 = (lcmodel) * (
            ((frac_gm * alpha_gm * R_H2O_GM + frac_wm * alpha_wm * R_H2O_WM +
              frac_csf * alpha_csf * R_H2O_CSF) /
             (frac_gm * 1.0 + frac_wm * 1.0))) * factor
        # gusseew csf equation
        Cmet3 = (lcmodel) * (1 / (1 - frac_csf))

        return Cmet2

    def create_absolute_df(reliable, proportions, voxel_name):
        cre = calc_asbolute(reliable[' Cre'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        cho = calc_asbolute(reliable[' GPC+PCh'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        naa = calc_asbolute(reliable[' NAA+NAAG'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        ino = calc_asbolute(reliable[' mI'], proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        glu = calc_asbolute(reliable[' Glu'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        gln = calc_asbolute(reliable[' Gln'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])
        glx = calc_asbolute(reliable[' Glu+Gln'],
                            proportions['%s_GM' % voxel_name],
                            proportions['%s_WM' % voxel_name],
                            proportions['%s_CSF' % voxel_name])

        absolute = pd.DataFrame({
            'Subjects': reliable['Subject'],
            'Cre': cre,
            'GPC+PCh': cho,
            'NAA+NAAG': naa,
            'mI': ino,
            'Glu': glu,
            'Gln': gln,
            'Glu+Gln': glx,
        })

        column_order = [
            'Subjects', 'Cre', 'GPC+PCh', 'NAA+NAAG', 'mI', 'Glu', 'Gln',
            'Glu+Gln'
        ]
        absolute = absolute.reindex(columns=column_order)

        absolute.to_csv(
            os.path.join(
                results_dir, 'absolute_%s_%s_%s_%s.csv' %
                (analysis_type, voxel_name, workspace_dir[-8:],
                 workspace_dir[-10:-9])))

    print '3. Creating new dataframe with Absolute Concentrations for ACC,THA,STR'
    create_absolute_df(acc_reliable, acc_props, 'ACC')
    create_absolute_df(tha_reliable, tha_prpos, 'THA')
    create_absolute_df(str_reliable, str_props, 'STR')
def run_freesurfer_mask_connectivity(pop_name, population, freesurfer_dir, workspace_dir, mrs_datadir, ):

    df = pd.DataFrame(index = [population], columns= columnx )

    for subject in population:

        print '####################### Subject %s' %subject

        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'RSFC_CONNECTIVITY')
        mkdir_path(outdir)

        func_pproc = os.path.join(subject_dir, 'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz') # 2.3 mm
        func_mean  = os.path.join(subject_dir, 'functional_native_brain_preproc_mean/REST_calc_resample_corrected_volreg_maths_tstat.nii')
        func_aroma = os.path.join(subject_dir, 'functional_native_brain_preproc_FWHM_AROMA/denoised_func_data_nonaggr.nii.gz')
        func_gm    = os.path.join(subject_dir, 'functional_native_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_flirt_thresh_maths.nii.gz')
        anat_func  = os.path.join(subject_dir, 'anatomical_FUNC2mm_brain/MP2RAGE_DESKULL_RPI_resample_ero_flirt_flirt.nii.gz') # 2.3mm
        anat_func_xfm = os.path.join(subject_dir, 'anatomical_FUNC2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt_inv.mat')
        mni2natwarp    =  os.path.join(subject_dir, 'MNI2mm_ANAT_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp_inverse.nii.gz')

       #######################################    Grab ATAG masks   #######################################

        STN_LEFT       =  os.path.join(outdir, 'ATAG_STN_LEFT.nii.gz')
        SN_LEFT        =  os.path.join(outdir, 'ATAG_SN_LEFT.nii.gz')
        GPe_LEFT       =  os.path.join(outdir, 'ATAG_GPE_left.nii.gz')
        GPi_LEFT       =  os.path.join(outdir, 'ATAG_GPi_left.nii.gz')

        os.system('applywarp -i %s -r %s -w %s --postmat=%s -o %s' %(mni_stn_left_1mm, anat_func,mni2natwarp, anat_func_xfm, STN_LEFT ))
        os.system('applywarp -i %s -r %s -w %s --postmat=%s -o %s' %(mni_sn_left_1mm, anat_func,mni2natwarp, anat_func_xfm, SN_LEFT ))

        os.system('fslmaths %s -bin %s' %(STN_LEFT, STN_LEFT ))
        os.system('fslmaths %s -bin %s' %(SN_LEFT, SN_LEFT ))

        #######################################    Grab Subcortical masks   #######################################
        print '1. grabbing FIRST Subcortical masks'

        STR =  os.path.join(subject_dir,  'functional_subcortical', 'left_str.nii.gz')
        CAUx=  os.path.join(subject_dir,  'functional_subcortical', 'left_caudate.nii.gz')
        PUT =  os.path.join(subject_dir,  'functional_subcortical', 'left_putamen.nii.gz')
        PAL =  os.path.join(subject_dir,  'functional_subcortical', 'left_pallidum.nii.gz')
        NAC =  os.path.join(subject_dir,  'functional_subcortical', 'left_nacc.nii.gz')
        HIP =  os.path.join(subject_dir,  'functional_subcortical', 'left_hipoocampus.nii.gz')
        AMG =  os.path.join(subject_dir,  'functional_subcortical', 'left_amygdala.nii.gz')

        THA = os.path.join(subject_dir,  'functional_subcortical', 'thalamus.nii.gz')
        lTHA = os.path.join(subject_dir, 'functional_subcortical', 'left_thalamus.nii.gz')
        rTHA = os.path.join(subject_dir, 'functional_subcortical', 'right_thalamus.nii.gz')

        #######################################    Fill Caudate Holes   #######################################

        CAU = os.path.join(subject_dir,  'functional_subcortical', 'left_caudate_fill.nii.gz')

        if not os.path.isfile(CAU):
            os.system('fslmaths %s -fillh %s' %(CAUx, CAU))

        #######################################    Grab svs masks   #######################################
        print '2. grabbing SVS masks'

        svs_acc_src = os.path.join(mrs_datadir, pop_name, subject, 'svs_voxel_mask', '%s%s_ACC_RDA_MASK.nii' %(subject, mrs_datadir[-1]))
        svs_tha_src = os.path.join(mrs_datadir, pop_name, subject, 'svs_voxel_mask', '%s%s_THA_RDA_MASK.nii' %(subject, mrs_datadir[-1]))
        svs_str_src = os.path.join(mrs_datadir, pop_name, subject, 'svs_voxel_mask', '%s%s_STR_RDA_MASK.nii' %(subject, mrs_datadir[-1]))

        svs_acc = os.path.join(outdir, 'svs_acc.nii.gz')
        svs_tha = os.path.join(outdir, 'svs_tha.nii.gz')
        svs_str = os.path.join(outdir, 'svs_str.nii.gz')

        svs_acc_func = os.path.join(outdir, 'svs_acc_func.nii.gz')
        svs_tha_func = os.path.join(outdir, 'svs_tha_func.nii.gz')
        svs_str_func = os.path.join(outdir, 'svs_str_func.nii.gz')

        if not os.path.isfile(svs_acc_func):
            os.system('fslswapdim %s RL PA IS %s' %(svs_acc_src, svs_acc))
            os.system('fslswapdim %s RL PA IS %s' %(svs_tha_src, svs_tha))
            os.system('fslswapdim %s RL PA IS %s' %(svs_str_src, svs_str))

            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(svs_acc, anat_func, anat_func_xfm, svs_acc_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(svs_tha, anat_func, anat_func_xfm, svs_tha_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(svs_str, anat_func, anat_func_xfm, svs_str_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %(svs_acc_func, svs_acc_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %(svs_tha_func, svs_tha_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %(svs_str_func, svs_str_func))

        #######################################   Grab freesurfer masks  #######################################
        print '3. grabbing Freesurfer masks'

        os.system('export SUBJECTS_DIR=%s'%(freesurfer_dir))

        t1mgz  = os.path.join(freesurfer_dir, subject, 'mri', 'T1.mgz')
        segmgz = os.path.join(freesurfer_dir, subject, 'mri', 'aparc.a2009s+aseg.mgz')
        t1nii  = os.path.join(outdir, 'freesurfer_T1.nii.gz')
        segnii = os.path.join(outdir, 'freesurfer_seg.nii.gz')

        fs_la_acc       =  os.path.join(outdir, 'freesurfer_seg_la_MCC_11107.nii.gz')  # 11107  ctx_lh_G_and_S_cingul-Mid-Ant
        fs_ra_acc       =  os.path.join(outdir, 'freesurfer_seg_ra_MCC_12107.nii.gz')  # 12107  ctx_lh_G_and_S_cingul-Mid-Ant
        fs_acc          =  os.path.join(outdir, 'freesurfer_seg_aMCC_11107_12107.nii.gz')

        fs_la_insula =  os.path.join(outdir, 'freesurfer_seg_la_INS_11148.nii.gz') # 11148  ctx_lh_S_circular_insula_ant
        fs_ra_insula =  os.path.join(outdir, 'freesurfer_seg_ra_INS_12148.nii.gz') # 12148  ctx_lh_S_circular_insula_ant

        if not os.path.isfile(fs_acc):
            os.system('mri_convert %s %s' %(t1mgz, t1nii))
            os.system('mri_convert %s %s' %(segmgz, segnii))

            os.system('fslmaths %s -thr 11107 -uthr 11107 %s ' %(segnii, fs_la_acc))
            os.system('fslmaths %s -thr 12107 -uthr 12107 %s ' %(segnii, fs_ra_acc))
            os.system('fslmaths %s -add %s -dilM -bin %s' %(fs_la_acc, fs_ra_acc, fs_acc))

            os.system('fslmaths %s -thr 11148 -uthr 11148 -dilM -bin %s' %(segnii, fs_la_insula))
            os.system('fslmaths %s -thr 12148 -uthr 12148 -dilM -bin %s' %(segnii, fs_ra_insula))

        labels_dir = os.path.join(freesurfer_dir, subject, 'label')
        fs_ba6_rh = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_rh.nii.gz')
        fs_ba6_lh = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_lh.nii.gz')
        fs_sma = os.path.join(outdir, 'freesurfer_seg_SMA_BA6.nii.gz')

        if not os.path.isfile(fs_sma):
            os.system('mri_label2vol --label %s/rh.BA6.thresh.label --subject %s --temp %s --regheader %s --o %s' %(labels_dir, subject,t1mgz, t1mgz,fs_ba6_rh))
            os.system('mri_label2vol --label %s/lh.BA6.thresh.label --subject %s --temp %s --regheader %s --o %s' %(labels_dir, subject,t1mgz, t1mgz,fs_ba6_lh))
            os.system('fslmaths  %s -add %s -dilM -dilM %s' %(fs_ba6_rh,fs_ba6_lh, fs_sma))

        #######################################   TRANSFORM Freesurfer masks to native func space   #######################################
        print '4. Transforming Freesurfer masks to native func space'
        t1nii_rpi         =  os.path.join(outdir, 'freesurfer_T1_RPI.nii.gz')
        fs_acc_rpi        =  os.path.join(outdir, 'freesurfer_seg_aMCC_11107_12107_RPI.nii.gz')
        fs_la_insula_rpi  =  os.path.join(outdir, 'freesurfer_seg_la_INS_11148_RPI.nii.gz')
        fs_ra_insula_rpi  =  os.path.join(outdir, 'freesurfer_seg_ra_INS_12148_RPI.nii.gz')
        fs_sma_rpi        =  os.path.join(outdir, 'freesurfer_seg_SMA_BA6_RPI.nii.gz')

        fst1omat          =  os.path.join(outdir, 'freesurfer2func.mat')
        fst1func          =  os.path.join(outdir, 'freesurfer_T1_func.nii.gz')
        fs_acc_func       =  os.path.join(outdir, 'freesurfer_seg_aMCC_11107_12107_func.nii.gz')
        fs_la_insula_func =  os.path.join(outdir, 'freesurfer_seg_la_INS_11148_func.nii.gz')
        fs_ra_insula_func =  os.path.join(outdir, 'freesurfer_seg_ra_INS_11148_func.nii.gz')
        fs_sma_func       =  os.path.join(outdir, 'freesurfer_seg_SMA_BA6_func.nii.gz')

        if not os.path.isfile(t1nii_rpi):
            os.system('fslswapdim %s RL PA IS %s' %(t1nii, t1nii_rpi))
            os.system('fslswapdim %s RL PA IS %s' %(fs_acc, fs_acc_rpi))
            os.system('fslswapdim %s RL PA IS %s' %(fs_la_insula, fs_la_insula_rpi))
            os.system('fslswapdim %s RL PA IS %s' %(fs_ra_insula, fs_ra_insula_rpi))
            os.system('fslswapdim %s RL PA IS %s' %(fs_sma, fs_sma_rpi))
            os.system('flirt -in %s -ref %s -omat %s -dof 6 -out %s -cost mutualinfo' %(t1nii_rpi, anat_func, fst1omat, fst1func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(fs_acc_rpi, anat_func, fst1omat, fs_acc_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(fs_la_insula_rpi, anat_func, fst1omat, fs_la_insula_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(fs_ra_insula_rpi, anat_func, fst1omat, fs_ra_insula_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %(fs_sma_rpi, anat_func, fst1omat, fs_sma_func))

            os.system('fslmaths  %s -thr 0.5 -bin %s' %(fs_acc_func,fs_acc_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %(fs_la_insula_func,fs_la_insula_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %(fs_ra_insula_func,fs_ra_insula_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %(fs_sma_func,fs_sma_func))

        if os.path.isfile(fs_sma_func):
            sma_load = nb.load(fs_sma_func).get_data()
            x, y, z = center_of_mass(sma_load)
            sma_point = os.path.join(outdir, 'sma_point.nii.gz')
            fs_sma_optimized = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_func_opt.nii.gz')

            os.system('fslmaths %s -mul 0 -add 1 -roi %s 1 %s 1 %s 1 0 1 %s -odt float'%(func_mean, x,y,z, sma_point))
            os.system('fslmaths %s -kernel sphere 10 -fmean -dilM -dilM -ero -ero %s -odt float'%(sma_point, fs_sma_optimized))

        #######################################   GET MOTION PARAMS   #######################################
        print '5. Grabbing motion paramaters'

        motion = os.path.join(subject_dir, 'functional_motion_statistics/motion_power_params.txt')
        if os.path.isfile(motion):
            power   =  pd.read_csv(motion) #n, ignore_index = True)
            exclude =  power.loc[subject][' FD_exclude']
            fd      =  power.loc[subject]['Subject']
        if os.path.isfile(func_aroma) and os.path.isfile(func_gm):
            dvars   =  np.mean(return_DVARS(func_aroma, func_gm))
            print dvars

        #######################################   GEN TIMESERIES OF ROIs   #######################################
        print '6. Extracting timeseries and calculating connectivity'

        if os.path.isfile(func_pproc):

            stn_timeseries = input_data.NiftiLabelsMasker(labels_img= STN_LEFT, standardize=True).fit_transform(func_pproc)
            sn_timeseries = input_data.NiftiLabelsMasker(labels_img= SN_LEFT, standardize=True).fit_transform(func_pproc)

            str_timeseries = input_data.NiftiLabelsMasker(labels_img= STR,   standardize=True).fit_transform(func_pproc)
            tha_timeseries = input_data.NiftiLabelsMasker(labels_img= THA,   standardize=True).fit_transform(func_pproc)
            thaL_timeseries = input_data.NiftiLabelsMasker(labels_img= lTHA, standardize=True).fit_transform(func_pproc)
            thaR_timeseries = input_data.NiftiLabelsMasker(labels_img= rTHA, standardize=True).fit_transform(func_pproc)

            cau_timeseries = input_data.NiftiLabelsMasker(labels_img= CAU, standardize=True).fit_transform(func_pproc)
            put_timeseries = input_data.NiftiLabelsMasker(labels_img= PUT, standardize=True).fit_transform(func_pproc)
            pal_timeseries = input_data.NiftiLabelsMasker(labels_img= PAL, standardize=True).fit_transform(func_pproc)
            nac_timeseries = input_data.NiftiLabelsMasker(labels_img= NAC, standardize=True).fit_transform(func_pproc)
            hip_timeseries = input_data.NiftiLabelsMasker(labels_img= HIP, standardize=True).fit_transform(func_pproc)
            amg_timeseries = input_data.NiftiLabelsMasker(labels_img= AMG, standardize=True).fit_transform(func_pproc)

            mACC_timeseries = input_data.NiftiLabelsMasker(labels_img= fs_acc_func, standardize=True).fit_transform(func_pproc)
            lINS_timeseries = input_data.NiftiLabelsMasker(labels_img= fs_la_insula_func, standardize=True).fit_transform(func_pproc)
            rINS_timeseries = input_data.NiftiLabelsMasker(labels_img= fs_ra_insula_func, standardize=True).fit_transform(func_pproc)
            SMA_timeseries = input_data.NiftiLabelsMasker(labels_img= fs_sma_optimized, standardize=True).fit_transform(func_pproc)

            mACCX_timeseries = input_data.NiftiLabelsMasker(labels_img= svs_acc_func, standardize=True).fit_transform(func_pproc)
            strX_timeseries = input_data.NiftiLabelsMasker(labels_img= svs_str_func, standardize=True).fit_transform(func_pproc)
            thaX_timeseries = input_data.NiftiLabelsMasker(labels_img= svs_tha_func, standardize=True).fit_transform(func_pproc)


            print '......calculating Subthalamic Nucleus connectivity'
            df.loc[subject]['stn_pal']  = float(pearsonr(stn_timeseries, pal_timeseries)[0])
            df.loc[subject]['stn_acc']  = float(pearsonr(stn_timeseries, mACC_timeseries)[0])
            df.loc[subject]['stn_tha']  = float(pearsonr(stn_timeseries, tha_timeseries)[0])
            df.loc[subject]['stn_thaX'] = float(pearsonr(stn_timeseries, thaX_timeseries)[0])
            df.loc[subject]['stn_thaL'] = float(pearsonr(stn_timeseries, thaL_timeseries)[0])
            df.loc[subject]['stn_thaR'] = float(pearsonr(stn_timeseries, thaR_timeseries)[0])
            df.loc[subject]['stn_hip']  = float(pearsonr(stn_timeseries, hip_timeseries)[0])
            df.loc[subject]['stn_amg']  = float(pearsonr(stn_timeseries, amg_timeseries)[0])
            df.loc[subject]['stn_accX'] = float(pearsonr(stn_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['stn_lins'] = float(pearsonr(stn_timeseries, lINS_timeseries)[0])
            df.loc[subject]['stn_rins'] = float(pearsonr(stn_timeseries, rINS_timeseries)[0])
            df.loc[subject]['stn_sma']  = float(pearsonr(stn_timeseries, SMA_timeseries)[0])
            df.loc[subject]['stn_strX'] = float(pearsonr(stn_timeseries, strX_timeseries)[0])
            df.loc[subject]['stn_str']  = float(pearsonr(stn_timeseries, str_timeseries)[0])
            df.loc[subject]['stn_cau']  = float(pearsonr(stn_timeseries, cau_timeseries)[0])
            df.loc[subject]['stn_put']  = float(pearsonr(stn_timeseries, put_timeseries)[0])
            df.loc[subject]['stn_nac']  = float(pearsonr(stn_timeseries, nac_timeseries)[0])

            print '......calculating Substantia Nigra connectivity'
            df.loc[subject]['sn_pal']  = float(pearsonr(sn_timeseries, pal_timeseries)[0])
            df.loc[subject]['sn_acc']  = float(pearsonr(sn_timeseries, mACC_timeseries)[0])
            df.loc[subject]['sn_tha']  = float(pearsonr(sn_timeseries, tha_timeseries)[0])
            df.loc[subject]['sn_thaX'] = float(pearsonr(sn_timeseries, thaX_timeseries)[0])
            df.loc[subject]['sn_thaL'] = float(pearsonr(sn_timeseries, thaL_timeseries)[0])
            df.loc[subject]['sn_thaR'] = float(pearsonr(sn_timeseries, thaR_timeseries)[0])
            df.loc[subject]['sn_hip']  = float(pearsonr(sn_timeseries, hip_timeseries)[0])
            df.loc[subject]['sn_amg']  = float(pearsonr(sn_timeseries, amg_timeseries)[0])
            df.loc[subject]['sn_accX'] = float(pearsonr(sn_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['sn_lins'] = float(pearsonr(sn_timeseries, lINS_timeseries)[0])
            df.loc[subject]['sn_rins'] = float(pearsonr(sn_timeseries, rINS_timeseries)[0])
            df.loc[subject]['sn_sma']  = float(pearsonr(sn_timeseries, SMA_timeseries)[0])
            df.loc[subject]['sn_strX'] = float(pearsonr(sn_timeseries, strX_timeseries)[0])
            df.loc[subject]['sn_str']  = float(pearsonr(sn_timeseries, str_timeseries)[0])
            df.loc[subject]['sn_cau']  = float(pearsonr(sn_timeseries, cau_timeseries)[0])
            df.loc[subject]['sn_put']  = float(pearsonr(sn_timeseries, put_timeseries)[0])
            df.loc[subject]['sn_nac']  = float(pearsonr(sn_timeseries, nac_timeseries)[0])

            print '......calculating STR_SVS connectivity'
            df.loc[subject]['strX_acc']  = float(pearsonr(strX_timeseries, mACC_timeseries)[0])
            df.loc[subject]['strX_tha']  = float(pearsonr(strX_timeseries, tha_timeseries)[0])
            df.loc[subject]['strX_thaX'] = float(pearsonr(strX_timeseries, thaX_timeseries)[0])
            df.loc[subject]['strX_thaL'] = float(pearsonr(strX_timeseries, thaL_timeseries)[0])
            df.loc[subject]['strX_thaR'] = float(pearsonr(strX_timeseries, thaR_timeseries)[0])
            df.loc[subject]['strX_hip']  = float(pearsonr(strX_timeseries, hip_timeseries)[0])
            df.loc[subject]['strX_amg']  = float(pearsonr(strX_timeseries, amg_timeseries)[0])
            df.loc[subject]['strX_accX'] = float(pearsonr(strX_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['strX_lins'] = float(pearsonr(strX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['strX_rins'] = float(pearsonr(strX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['strX_sma'] = float(pearsonr(strX_timeseries, SMA_timeseries)[0])

            print '......calculating STR connetivity'
            df.loc[subject]['str_acc']  = float(pearsonr(str_timeseries, mACC_timeseries)[0])
            df.loc[subject]['str_tha']  = float(pearsonr(str_timeseries, tha_timeseries)[0])
            df.loc[subject]['str_thaX'] = float(pearsonr(str_timeseries, thaX_timeseries)[0])
            df.loc[subject]['str_thaL'] = float(pearsonr(str_timeseries, thaL_timeseries)[0])
            df.loc[subject]['str_thaR'] = float(pearsonr(str_timeseries, thaR_timeseries)[0])
            df.loc[subject]['str_hip']  = float(pearsonr(str_timeseries, hip_timeseries)[0])
            df.loc[subject]['str_amg']  = float(pearsonr(str_timeseries, amg_timeseries)[0])
            df.loc[subject]['str_accX'] = float(pearsonr(str_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['str_lins'] = float(pearsonr(str_timeseries, lINS_timeseries)[0])
            df.loc[subject]['str_rins'] = float(pearsonr(str_timeseries, rINS_timeseries)[0])
            df.loc[subject]['str_sma'] = float(pearsonr(str_timeseries, SMA_timeseries)[0])

            print '......calculating CAUDATE connectivity'
            df.loc[subject]['cau_acc']  = float(pearsonr(cau_timeseries, mACC_timeseries)[0])
            df.loc[subject]['cau_tha']  = float(pearsonr(cau_timeseries, tha_timeseries)[0])
            df.loc[subject]['cau_thaX'] = float(pearsonr(cau_timeseries, thaX_timeseries)[0])
            df.loc[subject]['cau_thaL'] = float(pearsonr(cau_timeseries, thaL_timeseries)[0])
            df.loc[subject]['cau_thaR'] = float(pearsonr(cau_timeseries, thaR_timeseries)[0])
            df.loc[subject]['cau_pal']  = float(pearsonr(cau_timeseries, pal_timeseries)[0])
            df.loc[subject]['cau_hip']  = float(pearsonr(cau_timeseries, hip_timeseries)[0])
            df.loc[subject]['cau_amg']  = float(pearsonr(cau_timeseries, amg_timeseries)[0])
            df.loc[subject]['cau_accX'] = float(pearsonr(cau_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['cau_lins'] = float(pearsonr(cau_timeseries, lINS_timeseries)[0])
            df.loc[subject]['cau_rins'] = float(pearsonr(cau_timeseries, rINS_timeseries)[0])
            df.loc[subject]['cau_sma'] = float(pearsonr(cau_timeseries, SMA_timeseries)[0])

            print '......calculating PUTAMEN connectivity'
            df.loc[subject]['put_tha']  = float(pearsonr(put_timeseries, tha_timeseries)[0])
            df.loc[subject]['put_thaX'] = float(pearsonr(put_timeseries, thaX_timeseries)[0])
            df.loc[subject]['put_thaL'] = float(pearsonr(put_timeseries, thaL_timeseries)[0])
            df.loc[subject]['put_thaR'] = float(pearsonr(put_timeseries, thaR_timeseries)[0])
            df.loc[subject]['put_pal']  = float(pearsonr(put_timeseries, pal_timeseries)[0])
            df.loc[subject]['put_hip']  = float(pearsonr(put_timeseries, hip_timeseries)[0])
            df.loc[subject]['put_amg']  = float(pearsonr(put_timeseries, amg_timeseries)[0])
            df.loc[subject]['put_acc']  = float(pearsonr(put_timeseries, mACC_timeseries)[0])
            df.loc[subject]['put_accX'] = float(pearsonr(put_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['put_lins'] = float(pearsonr(put_timeseries, lINS_timeseries)[0])
            df.loc[subject]['put_rins'] = float(pearsonr(put_timeseries, rINS_timeseries)[0])
            df.loc[subject]['put_sma'] = float(pearsonr(put_timeseries, SMA_timeseries)[0])

            print '......calcualting NUCLESUS ACCUMBENS connectivity'
            df.loc[subject]['nac_tha']  = float(pearsonr(nac_timeseries, tha_timeseries)[0])
            df.loc[subject]['nac_thaX'] = float(pearsonr(nac_timeseries, thaX_timeseries)[0])
            df.loc[subject]['nac_thaL'] = float(pearsonr(nac_timeseries, thaL_timeseries)[0])
            df.loc[subject]['nac_thaR'] = float(pearsonr(nac_timeseries, thaR_timeseries)[0])
            df.loc[subject]['nac_pal']  = float(pearsonr(nac_timeseries, pal_timeseries)[0])
            df.loc[subject]['nac_hip']  = float(pearsonr(nac_timeseries, hip_timeseries)[0])
            df.loc[subject]['nac_amg']  = float(pearsonr(nac_timeseries, amg_timeseries)[0])
            df.loc[subject]['nac_acc']  = float(pearsonr(nac_timeseries, mACC_timeseries)[0])
            df.loc[subject]['nac_accX'] = float(pearsonr(nac_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['nac_lins'] = float(pearsonr(nac_timeseries, lINS_timeseries)[0])
            df.loc[subject]['nac_rins'] = float(pearsonr(nac_timeseries, rINS_timeseries)[0])
            df.loc[subject]['nac_sma'] = float(pearsonr(nac_timeseries, SMA_timeseries)[0])

            print '......calcualting PALLIDUM connectivity'
            df.loc[subject]['pal_tha']  = float(pearsonr(pal_timeseries, tha_timeseries)[0])
            df.loc[subject]['pal_thaX'] = float(pearsonr(pal_timeseries, thaX_timeseries)[0])
            df.loc[subject]['pal_thaL'] = float(pearsonr(pal_timeseries, thaL_timeseries)[0])
            df.loc[subject]['pal_thaR'] = float(pearsonr(pal_timeseries, thaR_timeseries)[0])
            df.loc[subject]['pal_hip']  = float(pearsonr(pal_timeseries, hip_timeseries)[0])
            df.loc[subject]['pal_amg']  = float(pearsonr(pal_timeseries, amg_timeseries)[0])
            df.loc[subject]['pal_acc']  = float(pearsonr(pal_timeseries, mACC_timeseries)[0])
            df.loc[subject]['pal_accX'] = float(pearsonr(pal_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['pal_lins'] = float(pearsonr(pal_timeseries, lINS_timeseries)[0])
            df.loc[subject]['pal_rins'] = float(pearsonr(pal_timeseries, rINS_timeseries)[0])
            df.loc[subject]['pal_sma'] = float(pearsonr(pal_timeseries, SMA_timeseries)[0])

            print '......calcualting THA_SVS connectivity'
            df.loc[subject]['thaX_cau']  = float(pearsonr(thaX_timeseries,  cau_timeseries)[0])
            df.loc[subject]['thaX_put']  = float(pearsonr(thaX_timeseries,  put_timeseries)[0])
            df.loc[subject]['thaX_pal']  = float(pearsonr(thaX_timeseries,  pal_timeseries)[0])
            df.loc[subject]['thaX_nac']  = float(pearsonr(thaX_timeseries,  nac_timeseries)[0])
            df.loc[subject]['thaX_hip']  = float(pearsonr(thaX_timeseries,  hip_timeseries)[0])
            df.loc[subject]['thaX_amg']  = float(pearsonr(thaX_timeseries,  amg_timeseries)[0])
            df.loc[subject]['thaX_acc']  = float(pearsonr(thaX_timeseries,  mACC_timeseries)[0])
            df.loc[subject]['thaX_accX'] = float(pearsonr(thaX_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaX_lins'] = float(pearsonr(thaX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaX_rins'] = float(pearsonr(thaX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaX_sma']  = float(pearsonr(thaX_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS FULL connectivity'
            df.loc[subject]['tha_cau']  = float(pearsonr(tha_timeseries,  cau_timeseries)[0])
            df.loc[subject]['tha_put']  = float(pearsonr(tha_timeseries,  put_timeseries)[0])
            df.loc[subject]['tha_pal']  = float(pearsonr(tha_timeseries,  pal_timeseries)[0])
            df.loc[subject]['tha_nac']  = float(pearsonr(tha_timeseries,  nac_timeseries)[0])
            df.loc[subject]['tha_hip']  = float(pearsonr(tha_timeseries,  hip_timeseries)[0])
            df.loc[subject]['tha_amg']  = float(pearsonr(tha_timeseries,  amg_timeseries)[0])
            df.loc[subject]['tha_acc']  = float(pearsonr(tha_timeseries,  mACC_timeseries)[0])
            df.loc[subject]['tha_accX'] = float(pearsonr(tha_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['tha_lins'] = float(pearsonr(tha_timeseries, lINS_timeseries)[0])
            df.loc[subject]['tha_rins'] = float(pearsonr(tha_timeseries, rINS_timeseries)[0])
            df.loc[subject]['tha_sma']  = float(pearsonr(tha_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS RIGHT connectivity'
            df.loc[subject]['thaR_cau']  = float(pearsonr(thaR_timeseries,  cau_timeseries)[0])
            df.loc[subject]['thaR_put']  = float(pearsonr(thaR_timeseries,  put_timeseries)[0])
            df.loc[subject]['thaR_pal']  = float(pearsonr(thaR_timeseries,  pal_timeseries)[0])
            df.loc[subject]['thaR_nac']  = float(pearsonr(thaR_timeseries,  nac_timeseries)[0])
            df.loc[subject]['thaR_hip']  = float(pearsonr(thaR_timeseries,  hip_timeseries)[0])
            df.loc[subject]['thaR_amg']  = float(pearsonr(thaR_timeseries,  amg_timeseries)[0])
            df.loc[subject]['thaR_acc']  = float(pearsonr(thaR_timeseries,  mACC_timeseries)[0])
            df.loc[subject]['thaR_accX'] = float(pearsonr(thaR_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaR_lins'] = float(pearsonr(thaR_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaR_rins'] = float(pearsonr(thaR_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaR_sma']  = float(pearsonr(thaR_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS LEFT connectivity'
            df.loc[subject]['thaL_cau']  = float(pearsonr(thaL_timeseries,  cau_timeseries)[0])
            df.loc[subject]['thaL_put']  = float(pearsonr(thaL_timeseries,  put_timeseries)[0])
            df.loc[subject]['thaL_pal']  = float(pearsonr(thaL_timeseries,  pal_timeseries)[0])
            df.loc[subject]['thaL_nac']  = float(pearsonr(thaL_timeseries,  nac_timeseries)[0])
            df.loc[subject]['thaL_hip']  = float(pearsonr(thaL_timeseries,  hip_timeseries)[0])
            df.loc[subject]['thaL_amg']  = float(pearsonr(thaL_timeseries,  amg_timeseries)[0])
            df.loc[subject]['thaL_acc']  = float(pearsonr(thaL_timeseries,  mACC_timeseries)[0])
            df.loc[subject]['thaL_accX'] = float(pearsonr(thaL_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaL_lins'] = float(pearsonr(thaL_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaL_rins'] = float(pearsonr(thaL_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaL_sma']  = float(pearsonr(thaL_timeseries, SMA_timeseries)[0])

            print '......calcualting ACC connectivity'
            df.loc[subject]['acc_lins']  = float(pearsonr(mACC_timeseries, lINS_timeseries)[0])
            df.loc[subject]['acc_rins']  = float(pearsonr(mACC_timeseries, rINS_timeseries)[0])
            df.loc[subject]['acc_sma']  = float(pearsonr(mACC_timeseries, rINS_timeseries)[0])
            df.loc[subject]['accX_lins'] = float(pearsonr(mACCX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['accX_rins'] = float(pearsonr(mACCX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['accX_sma']  = float(pearsonr(mACCX_timeseries, SMA_timeseries)[0])

            print '......calcualting SMA connectivity'
            df.loc[subject]['sma_lins']  = float(pearsonr(mACCX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['sma_rins']  = float(pearsonr(mACCX_timeseries, rINS_timeseries)[0])

            df.loc[subject]['fd']       = fd
            df.loc[subject]['exclude']  = exclude
            df.loc[subject]['dvars']    = dvars

    df.to_csv(os.path.join(workspace_dir,'GluConnectivity' ,'x4_RSFC_df_%s_%s.csv'%(pop_name, mrs_datadir[-1])))
    print 'done'
    def create_group_dataframe(voxel_name, analysis_type, ppmst):

        df_group =[]
        for subject in population:
            print subject
            header = open(os.path.join(workspace_dir, subject, 'svs_rda', 'ACC', 'h2o', '%s%s_ACC_WATER.rda'%(subject, workspace_dir[-10:-9]))).read().splitlines()
            gender = [i[11:15] for i in header if 'PatientSex' in i][0]
            age    = [i[13:15] for i in header if 'PatientAge' in i][0]

            analysis_file = os.path.join(workspace_dir, subject, 'lcmodel_%s'%analysis_type, voxel_name, 'ppm_%s'%ppmst, 'table')

            if not os.path.isfile(analysis_file):
                print 'Subject %s has not %s data for %s' %(subject, analysis_type, voxel_name)

            else:
                print os.path.join(workspace_dir, subject, 'lcmodel_%s'%analysis_type ,  voxel_name, 'ppm_%s'%ppmst, 'snr.txt')
                quality = np.genfromtxt(os.path.join(workspace_dir, subject, 'lcmodel_%s'%analysis_type ,  voxel_name, 'ppm_%s'%ppmst, 'snr.txt'), delimiter = ',')

                # grab tissue proprotion data
                prop_gm, prop_wm, prop_csf, prop_all  = np.genfromtxt(os.path.join(workspace_dir,subject,'svs_voxel_stats', '%s_voxel_statistics_spm_opt.txt'%voxel_name), delimiter = ',')

                # get lcmodel metabolites from spreadsheet csv
                csv = pd.read_csv(os.path.join(workspace_dir, subject, 'lcmodel_%s'%analysis_type,voxel_name, 'ppm_%s'%ppmst, 'spreadsheet.csv'))

                # create dataframe with subject demographics, frequency data and reliable metabolite concentrations
                columns =  ['Age' ,  'Gender', 'FWHM'  , 'SNR'  , 'Shift', 'Ph0', 'Ph1', 'GM', 'WM', 'CSF', 'AllTissue',
                            'Cre'    ,  'Cre%'      ,
                            'tCho'   , 'tCho%'      ,
                            'NAA'    ,  'NAA%'      ,
                            'NAAG'   ,  'NAAG%'     ,
                            'tNAA'   ,  'tNAA%'     ,
                            'mIno'   ,  'mIno%'     ,
                            'Glu'    ,  'Glu%'      ,
                            'Gln'    ,  'Gln%'      ,
                            'Glx'    ,  'Glx%'      ,
                            'Glu_Cre',  'Gln_Cre'   ,   'Glx_Cre'   ,
                            'GABA'   ,  'GABA%'     ,
                            'Asp'    ,  'Asp%'      ,
                            'Tau'    ,  'Tau%'      ,
                            'Lac'    ,  'Lac%'      ,
                            'Ala'    ,  'Ala%'      ,
                            'Asp'    ,  'Asp%'      ,
                            'Scy'    ,  'Scy%'      ,
                            'Glc'    ,  'Glc%'      ,
                            'Gua'    ,  'Gua%'      ,]

                df_subject = pd.DataFrame(columns = columns, index = ['%s'%subject])
                df_subject.loc['%s'%subject] = pd.Series({'Age'        : age,
                                                         'Gender'      : gender,
                                                         'FWHM'        : quality[1],
                                                         'SNR'         : quality[2],
                                                         'Shift'       : quality[3],
                                                         'Ph0'         : quality[4],
                                                         'Ph1'         : quality[5],
                                                         'GM'          : prop_gm  * 100.,
                                                         'WM'          : prop_wm  * 100.,
                                                         'CSF'         : prop_csf * 100.,
                                                         'AllTissue'   : prop_all * 100.,
                                                         'Cre'         : float(csv[' Cre']),         'Cre%'        : float(csv[' Cre %SD']),
                                                         'tCho'        : float(csv[' GPC+PCh']),     'tCho%'       : float(csv[' GPC+PCh %SD']),
                                                         'tNAA'        : float(csv[' NAA+NAAG']),    'tNAA%'       : float(csv[' NAA+NAAG %SD']),
                                                         'NAA'         : float(csv[' NAA']),         'NAA%'        : float(csv[' NAA %SD']),
                                                         'NAAG'        : float(csv[' NAAG']),        'NAAG%'       : float(csv[' NAAG %SD']),
                                                         'mIno'        : float(csv[' mI']),          'mIno%'       : float(csv[' mI %SD']),
                                                         'Glu'         : float(csv[' Glu']),         'Glu%'        : float(csv[' Glu %SD']),
                                                         'Gln'         : float(csv[' Gln']),         'Gln%'        : float(csv[' Gln %SD']),
                                                         'Glx'         : float(csv[' Glu+Gln']),     'Glx%'        : float(csv[' Glu+Gln %SD']),
                                                         'Glu_Cre'     : float(csv[' Glu/Cre']),
                                                         'Gln_Cre'     : float(csv[' Gln/Cre']),
                                                         'Glx_Cre'     : float(csv[' Glu+Gln/Cre']),
                                                         'GABA'        : float(csv[' GABA']),        'GABA%'       : float(csv[' GABA %SD']),
                                                         'Asp'         : float(csv[' Asp']),         'Asp%'        : float(csv[' Asp %SD']),
                                                         'Ala'         : float(csv[' Ala']),         'Ala%'        : float(csv[' Ala %SD']),
                                                         'Lac'         : float(csv[' Lac']),         'Lac%'        : float(csv[' Lac %SD']),
                                                         'Tau'         : float(csv[' Tau']),         'Tau%'        : float(csv[' Tau %SD']),
                                                         #'Gua'         : float(csv[' Gua']),         'Gua%'        : float(csv[' Gua %SD']),
                                                         #'Glc'         : float(csv[' Glc']),         'Glc%'        : float(csv[' Glc %SD']),
                                                         'Scy'         : float(csv[' Scyllo']),      'Scy%'         : float(csv[' Scyllo %SD']),
                                                         })
                # append subject data to list
                df_group.append(df_subject)

        group_dataframe = pd.concat(df_group, ignore_index = False).sort(columns='Age')


        # create results directory and save group dataframe
        mkdir_path(os.path.join(results_dir, voxel_name))
        #group_dataframe.to_csv(os.path.join(results_dir, voxel_name, 'lcmodel_%s_%s_ppmst_%s_%s_%s.csv'%(voxel_name, analysis_type, ppmst, workspace_dir[-8:],workspace_dir[-10:-9])))
        group_dataframe.to_csv(os.path.join(results_dir, voxel_name, 'v2_lcmodel_%s_%s_ppmst_%s_%s_%s.csv'%(voxel_name, analysis_type, ppmst, workspace_dir[-8:],workspace_dir[-10:-9])))

        print 'NMR-093%s_ %s %s_%s Results here: %s'%( workspace_dir[-10:-9],workspace_dir[-8:], voxel_name,analysis_type, results_dir)
def scrub_data(population, workspace_dir):

    count = 0
    for subject in population:
        count +=1
        print '####################################'
        print '%s. Running SCRUBBING FOR SUBJECT %s'%(count,subject)

        #############################################################
        #################### Input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)

        mkdir_path( os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp'))
        pproc      = os.path.join(subject_dir , 'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz')
        pproc_2mm  = os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc.nii')
        anat2mni   = os.path.join(subject_dir , 'anatomical_MNI2mm_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp.nii.gz')
        func2anat  = os.path.join(subject_dir , 'functional_ANAT2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt.mat')


        #############################################################
        #################### WARPING PPROC TO MNI
        print '.... warping pproc to MNI SPACE'
        if not os.path.isfile(pproc_2mm):
            if os.path.isfile(pproc):
                print '... Warping to MNI'
                os.system('FSLOUTPUTTYPE=NIFTI')
                os.system(' '.join([ 'applywarp',
                                 '--in='     +  pproc,
                                 '--ref='    +  '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz',
                                 '--out='    +  pproc_2mm,
                                 '--warp='   +  anat2mni,
                                 '--premat=' +  func2anat]))

        #############################################################
        #################### MAKE SCRUB

        print '....scrubbing '

        if os.path.isfile(os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D' )):

            FDs = np.loadtxt(os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D' ))

            # GET LIST OF GOOD FRAMES
            in_frames = []
            for frame, fd in enumerate(FDs):
                if fd < 0.2:
                    in_frames.append(frame)
            #print subject,'-----> GOOD FRAMES =', in_frames


            if len(in_frames) > 130:
                print '..........Scrubbing frames above FD=0.2 for subject [ %s ]' %subject
                print '..........Subject has %s good frames' % len(in_frames)
                print '...........taking first 100 good frames'
                frames = str(in_frames[0:130]).replace(" ","")

                # SCRUB DATA
                pproc_2mm  = os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc.nii')
                scrubbed   = os.path.join(subject_dir , 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_scrubbed.nii')

                os.system("3dcalc -a %s%s -expr 'a' -prefix %s" %(pproc_2mm, frames, scrubbed))

            else:
                print '**** Subject [ %s ]  has less than 100 frames with FD below the 0.2mm threshold'%subject

        scrub_subs = []
        for subject in population:
            if os.path.isfile( os.path.join(workspace_dir, 'GluConnectivity', subject, 'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_scrubbed.nii')):
                scrub_subs.append(subject)
        print scrub_subs
def scrub_data(population, workspace_dir):

    count = 0
    for subject in population:
        count += 1
        print '####################################'
        print '%s. Running SCRUBBING FOR SUBJECT %s' % (count, subject)

        #############################################################
        #################### Input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)

        mkdir_path(
            os.path.join(
                subject_dir,
                'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp'))
        pproc = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz'
        )
        pproc_2mm = os.path.join(
            subject_dir,
            'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc.nii'
        )
        anat2mni = os.path.join(
            subject_dir,
            'anatomical_MNI2mm_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp.nii.gz'
        )
        func2anat = os.path.join(
            subject_dir,
            'functional_ANAT2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt.mat'
        )

        #############################################################
        #################### WARPING PPROC TO MNI
        print '.... warping pproc to MNI SPACE'
        if not os.path.isfile(pproc_2mm):
            if os.path.isfile(pproc):
                print '... Warping to MNI'
                os.system('FSLOUTPUTTYPE=NIFTI')
                os.system(' '.join([
                    'applywarp', '--in=' + pproc, '--ref=' +
                    '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz',
                    '--out=' + pproc_2mm, '--warp=' + anat2mni,
                    '--premat=' + func2anat
                ]))

        #############################################################
        #################### MAKE SCRUB

        print '....scrubbing '

        if os.path.isfile(
                os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D')):

            FDs = np.loadtxt(
                os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D'))

            # GET LIST OF GOOD FRAMES
            in_frames = []
            for frame, fd in enumerate(FDs):
                if fd < 0.2:
                    in_frames.append(frame)
            #print subject,'-----> GOOD FRAMES =', in_frames

            if len(in_frames) > 130:
                print '..........Scrubbing frames above FD=0.2 for subject [ %s ]' % subject
                print '..........Subject has %s good frames' % len(in_frames)
                print '...........taking first 100 good frames'
                frames = str(in_frames[0:130]).replace(" ", "")

                # SCRUB DATA
                pproc_2mm = os.path.join(
                    subject_dir,
                    'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc.nii'
                )
                scrubbed = os.path.join(
                    subject_dir,
                    'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_scrubbed.nii'
                )

                os.system("3dcalc -a %s%s -expr 'a' -prefix %s" %
                          (pproc_2mm, frames, scrubbed))

            else:
                print '**** Subject [ %s ]  has less than 100 frames with FD below the 0.2mm threshold' % subject

        scrub_subs = []
        for subject in population:
            if os.path.isfile(
                    os.path.join(
                        workspace_dir, 'GluConnectivity', subject,
                        'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/rest_pproc_scrubbed.nii'
                    )):
                scrub_subs.append(subject)
        print scrub_subs
Exemple #20
0
def calc_overlap(population, population_name):


    for subject in population:

        print 'Calculating DICE METRIC for subject', subject

        subdir_a = os.path.join(project_dir, 'study_a', population_name, subject)
        subdir_b = os.path.join(project_dir, 'study_b', population_name, subject)

        dice_dir = os.path.join(subdir_b, 'dice_metric')
        mkdir_path(dice_dir)
        os.chdir(dice_dir)

        anat_a = os.path.join(subdir_a, 'anatomical_original/ANATOMICAL.nii')
        anat_b = os.path.join(subdir_b, 'anatomical_original/ANATOMICAL.nii')

        acc_a = os.path.join(subdir_a, 'svs_voxel_mask/%sa_ACC_RDA_MASK.nii'%subject)
        tha_a = os.path.join(subdir_a, 'svs_voxel_mask/%sa_THA_RDA_MASK.nii'%subject)
        str_a = os.path.join(subdir_a, 'svs_voxel_mask/%sa_STR_RDA_MASK.nii'%subject)

        acc_b = os.path.join(subdir_b, 'svs_voxel_mask/%sb_ACC_RDA_MASK.nii'%subject)
        tha_b = os.path.join(subdir_b, 'svs_voxel_mask/%sb_THA_RDA_MASK.nii'%subject)
        str_b = os.path.join(subdir_b, 'svs_voxel_mask/%sb_STR_RDA_MASK.nii'%subject)

        print '....running anat registration'
        if not os.path.isfile('anat_a2b.mat'):
            os.system('flirt -in %s -ref %s -omat anat_a2b.mat -out anat_a2b.nii.gz '
                      '-dof 6 -cost mutualinfo -finesearch 18'
                      %(anat_a, anat_b))

        print '....applying transform to SVS'
        if not os.path.isfile('overlap_ACC_a2b.nii.gz'):
            os.system('flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_ACC_a2b.nii.gz' %(acc_a, anat_b))
            os.system('flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_THA_a2b.nii.gz' %(tha_a, anat_b))
            os.system('flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_STR_a2b.nii.gz' %(str_a, anat_b))

        print '....calculating dice'
        if not os.path.isfile('dice_metric_ACC.txt'):
            calc_dice_metric(acc_b, os.path.join(dice_dir, 'overlap_ACC_a2b.nii.gz'), 'ACC')
            calc_dice_metric(tha_b, os.path.join(dice_dir, 'overlap_THA_a2b.nii.gz'), 'THA')
            calc_dice_metric(str_b, os.path.join(dice_dir, 'overlap_STR_a2b.nii.gz'), 'STR')

        print '....plotting'

        def plot_svs(svs_a, svs_b, anatomical, fname):
            import os
            import numpy as np
            import nibabel as nb
            import matplotlib
            import matplotlib.pyplot as plt
            import seaborn as sns
            from matplotlib import colors

            #get data into matrix
            anat_load = nb.load(anatomical)
            anat_data = anat_load.get_data()

            svs_load_a  = nb.load(svs_a)
            svs_data_a  = svs_load_a.get_data().astype(float)

            svs_load_b  = nb.load(svs_b)
            svs_data_b  = svs_load_b.get_data().astype(float)

            # get svs cut coords
            coords = find_cut_coords(svs_load_b)

            # convert zeros to nans for visualization purposes
            svs_data_a[svs_data_a==0]=np.nan
            svs_data_b[svs_data_b==0]=np.nan

            svs_x = svs_data_a * svs_data_b

            # plot voxel on anat
            fig =plt.figure()
            fig.set_size_inches(15, 15)
            fig.subplots_adjust(wspace=0.005)


            red = colors.ListedColormap(['red'])
            blue = colors.ListedColormap(['blue'])
            mix = colors.ListedColormap(['purple'])


            #1
            ax1 = plt.subplot2grid((1,3), (0,0),  colspan = 1, rowspan =1)
            ax1.imshow(anat_data[coords[0],:,:], matplotlib.cm.bone_r)
            ax1.imshow(svs_data_b[coords[0],:,:] , red, alpha = 0.7)
            ax1.imshow(svs_data_a[coords[0],:,:] , blue, alpha = 0.7)
            ax1.imshow(svs_x[coords[0],:,:]      ,  mix , alpha = 1, interpolation='nearest')
            ax1.set_xlim(23, 157)
            ax1.set_ylim(101, 230)
            ax1.axes.get_yaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            #2
            ax2 = plt.subplot2grid((1,3), (0,1),  colspan = 1, rowspan =1)
            ax2.imshow(np.rot90(anat_data[:,:,coords[2]]), matplotlib.cm.bone_r )
            ax2.imshow(np.rot90(svs_data_b[:,:,coords[2]]) , red, alpha = 0.7 )
            ax2.imshow(np.rot90(svs_data_a[:,:,coords[2]]) , blue, alpha = 0.7 )
            ax2.imshow(np.rot90(svs_x[:,:,coords[2]]) , mix, alpha = 0.7 )

            ax2.set_xlim(230, 20)
            ax2.set_ylim(207, 4)
            ax2.axes.get_yaxis().set_visible(False)
            ax2.axes.get_xaxis().set_visible(False)
            #3
            ax3 = plt.subplot2grid((1,3), (0,2),  colspan = 1, rowspan =1)
            ax3.imshow(anat_data[:,coords[1],:], matplotlib.cm.bone_r, origin='lower')
            ax3.imshow(svs_data_b[:,coords[1],:] , red, alpha = 0.7, origin='lower')
            ax3.imshow(svs_data_a[:,coords[1],:] , blue, alpha = 0.7, origin='lower')
            ax3.imshow(svs_x[:,coords[1],:]      , mix, alpha = 0.7, origin='lower')

            ax3.set_xlim(37, 140)
            ax3.set_ylim(160, 61)
            ax3.axes.get_yaxis().set_visible(False)
            ax3.axes.get_xaxis().set_visible(False)

            for ax in [ax1, ax2, ax3]:
                for axis in ['top','bottom','left','right']:
                    ax.spines[axis].set_color('black')
                    ax.spines[axis].set_linewidth(3)
            fig.tight_layout()
            fig.savefig('plot_overlap_%s.png'%fname, dpi=200, bbox_inches='tight')

        plot_svs('overlap_ACC_a2b.nii.gz', acc_b, anat_b, 'ACC')
        plot_svs('overlap_THA_a2b.nii.gz', tha_b, anat_b, 'THA')
        plot_svs('overlap_STR_a2b.nii.gz', str_b, anat_b, 'STR')
def preprocess_functional(population, workspace):

    count = 0
    for subject in population:
        count +=1
        #subject = population[subject_index]
        print '========================================================================================'
        print '%s. Preprocessing functional data for %s' %(count,subject)


        raw_dir   = mkdir_path(os.path.join(workspace, subject, 'RAW'))
        func_dir  = mkdir_path(os.path.join(workspace, subject, 'FUNCTIONAL'))
        moco_dir  = mkdir_path(os.path.join(func_dir, 'moco'))
        edit_dir  = mkdir_path(os.path.join(func_dir, 'edit'))

        ##### Minimal pre-processing

        if not os.path.isfile(os.path.join(func_dir, 'REST_EDIT.nii.gz')):

            print '.....Edit Functional Image (Slice-time-corr/Deoblique/Drop-TRs/Reorient) '

            os.chdir(func_dir)

            # get data
            os.system('fslchfiletype NIFTI_GZ  %s/REST.nii.gz %s/REST.nii.gz' %(raw_dir, func_dir))
            # shutil.copy(os.path.join(raw_dir, 'REST.nii.gz'), os.path.join(func_dir, 'REST.nii.gz'))

            # get params
            img_hdr = nb.load('REST.nii.gz').header
            TR      = img_hdr['pixdim'][4]
            nvols   = img_hdr['dim'][4]
            frames  = '[%s..%s]' % (4, nvols -1)

            print 'TR =', TR
            print 'N-Vols=', nvols

            # Deoblique
            os.system('3drefit -deoblique REST.nii.gz')

            # Slice time correction
            os.chdir(edit_dir)
            os.system('3dTshift -TR %s -tzero 0 -tpattern alt+z -prefix REST_slc.nii.gz ../REST.nii.gz' %(TR))

            # Dropping TRs
            os.system('3dcalc -a REST_slc.nii.gz%s -expr "a" -prefix REST_slc_drop.nii.gz' % frames)

            # Reorient to RPI
            os.system('3dresample -orient RPI  -prefix ../REST_EDIT.nii.gz -inset REST_slc_drop.nii.gz')


        ##### Generate Motion Paramters

        if not os.path.isfile(os.path.join(moco_dir, 'REST_EDIT_moco2.nii.gz')):

            print '.... Running two-step motion correction'

            os.chdir(moco_dir)
            # run No.1
            os.system('mcflirt -in ../REST_EDIT -out REST_EDIT_moco1 -mats -plots -stats -meanvol ')

            # run No.2
            os.system('mcflirt -in ../REST_EDIT -out REST_EDIT_moco2 -refvol REST_EDIT_moco1_meanvol -mats -plots -stats')

        ###### BET and Intensity normalization

        if not os.path.isfile(os.path.join(func_dir, 'REST_EDIT_BRAIN_MEAN.nii.gz')):

            print '....Brain extraction and intensity normalization'

            os.chdir(func_dir)

            # Create mask
            os.system('bet moco/REST_EDIT_moco2_meanvol.nii.gz moco/REST_EDIT_moco2_meanvol_brain -m -R -f 0.35' )
            os.system('cp moco/REST_EDIT_moco2_meanvol_brain_mask.nii.gz REST_BRAIN_MASK.nii.gz')

            # Extract Brain
            os.system('fslmaths REST_EDIT -mul REST_BRAIN_MASK REST_EDIT_BRAIN_nonorm')

            # Intensity Normalization'
            os.system('fslmaths REST_EDIT_BRAIN_nonorm -ing 1000 REST_EDIT_BRAIN -odt float')
            os.system('rm -rf REST_EDIT_BRAIN_nonorm.nii.gz')

            # Get Mean'
            os.system('fslmaths REST_EDIT_BRAIN -Tmean REST_EDIT_BRAIN_MEAN.nii' )
            os.system('cp moco/REST_EDIT_moco2_meanvol_brain.nii.gz REST_EDIT_MOCO_BRAIN_MEAN.nii.gz' )
Exemple #22
0
def calc_overlap(population, population_name):

    for subject in population:

        print 'Calculating DICE METRIC for subject', subject

        subdir_a = os.path.join(project_dir, 'study_a', population_name,
                                subject)
        subdir_b = os.path.join(project_dir, 'study_b', population_name,
                                subject)

        dice_dir = os.path.join(subdir_b, 'dice_metric')
        mkdir_path(dice_dir)
        os.chdir(dice_dir)

        anat_a = os.path.join(subdir_a, 'anatomical_original/ANATOMICAL.nii')
        anat_b = os.path.join(subdir_b, 'anatomical_original/ANATOMICAL.nii')

        acc_a = os.path.join(subdir_a,
                             'svs_voxel_mask/%sa_ACC_RDA_MASK.nii' % subject)
        tha_a = os.path.join(subdir_a,
                             'svs_voxel_mask/%sa_THA_RDA_MASK.nii' % subject)
        str_a = os.path.join(subdir_a,
                             'svs_voxel_mask/%sa_STR_RDA_MASK.nii' % subject)

        acc_b = os.path.join(subdir_b,
                             'svs_voxel_mask/%sb_ACC_RDA_MASK.nii' % subject)
        tha_b = os.path.join(subdir_b,
                             'svs_voxel_mask/%sb_THA_RDA_MASK.nii' % subject)
        str_b = os.path.join(subdir_b,
                             'svs_voxel_mask/%sb_STR_RDA_MASK.nii' % subject)

        print '....running anat registration'
        if not os.path.isfile('anat_a2b.mat'):
            os.system(
                'flirt -in %s -ref %s -omat anat_a2b.mat -out anat_a2b.nii.gz '
                '-dof 6 -cost mutualinfo -finesearch 18' % (anat_a, anat_b))

        print '....applying transform to SVS'
        if not os.path.isfile('overlap_ACC_a2b.nii.gz'):
            os.system(
                'flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_ACC_a2b.nii.gz'
                % (acc_a, anat_b))
            os.system(
                'flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_THA_a2b.nii.gz'
                % (tha_a, anat_b))
            os.system(
                'flirt -in %s -ref %s -applyxfm -init anat_a2b.mat -out overlap_STR_a2b.nii.gz'
                % (str_a, anat_b))

        print '....calculating dice'
        if not os.path.isfile('dice_metric_ACC.txt'):
            calc_dice_metric(acc_b,
                             os.path.join(dice_dir, 'overlap_ACC_a2b.nii.gz'),
                             'ACC')
            calc_dice_metric(tha_b,
                             os.path.join(dice_dir, 'overlap_THA_a2b.nii.gz'),
                             'THA')
            calc_dice_metric(str_b,
                             os.path.join(dice_dir, 'overlap_STR_a2b.nii.gz'),
                             'STR')

        print '....plotting'

        def plot_svs(svs_a, svs_b, anatomical, fname):
            import os
            import numpy as np
            import nibabel as nb
            import matplotlib
            import matplotlib.pyplot as plt
            import seaborn as sns
            from matplotlib import colors

            #get data into matrix
            anat_load = nb.load(anatomical)
            anat_data = anat_load.get_data()

            svs_load_a = nb.load(svs_a)
            svs_data_a = svs_load_a.get_data().astype(float)

            svs_load_b = nb.load(svs_b)
            svs_data_b = svs_load_b.get_data().astype(float)

            # get svs cut coords
            coords = find_cut_coords(svs_load_b)

            # convert zeros to nans for visualization purposes
            svs_data_a[svs_data_a == 0] = np.nan
            svs_data_b[svs_data_b == 0] = np.nan

            svs_x = svs_data_a * svs_data_b

            # plot voxel on anat
            fig = plt.figure()
            fig.set_size_inches(15, 15)
            fig.subplots_adjust(wspace=0.005)

            red = colors.ListedColormap(['red'])
            blue = colors.ListedColormap(['blue'])
            mix = colors.ListedColormap(['purple'])

            #1
            ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=1, rowspan=1)
            ax1.imshow(anat_data[coords[0], :, :], matplotlib.cm.bone_r)
            ax1.imshow(svs_data_b[coords[0], :, :], red, alpha=0.7)
            ax1.imshow(svs_data_a[coords[0], :, :], blue, alpha=0.7)
            ax1.imshow(svs_x[coords[0], :, :],
                       mix,
                       alpha=1,
                       interpolation='nearest')
            ax1.set_xlim(23, 157)
            ax1.set_ylim(101, 230)
            ax1.axes.get_yaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            #2
            ax2 = plt.subplot2grid((1, 3), (0, 1), colspan=1, rowspan=1)
            ax2.imshow(np.rot90(anat_data[:, :, coords[2]]),
                       matplotlib.cm.bone_r)
            ax2.imshow(np.rot90(svs_data_b[:, :, coords[2]]), red, alpha=0.7)
            ax2.imshow(np.rot90(svs_data_a[:, :, coords[2]]), blue, alpha=0.7)
            ax2.imshow(np.rot90(svs_x[:, :, coords[2]]), mix, alpha=0.7)

            ax2.set_xlim(230, 20)
            ax2.set_ylim(207, 4)
            ax2.axes.get_yaxis().set_visible(False)
            ax2.axes.get_xaxis().set_visible(False)
            #3
            ax3 = plt.subplot2grid((1, 3), (0, 2), colspan=1, rowspan=1)
            ax3.imshow(anat_data[:, coords[1], :],
                       matplotlib.cm.bone_r,
                       origin='lower')
            ax3.imshow(svs_data_b[:, coords[1], :],
                       red,
                       alpha=0.7,
                       origin='lower')
            ax3.imshow(svs_data_a[:, coords[1], :],
                       blue,
                       alpha=0.7,
                       origin='lower')
            ax3.imshow(svs_x[:, coords[1], :], mix, alpha=0.7, origin='lower')

            ax3.set_xlim(37, 140)
            ax3.set_ylim(160, 61)
            ax3.axes.get_yaxis().set_visible(False)
            ax3.axes.get_xaxis().set_visible(False)

            for ax in [ax1, ax2, ax3]:
                for axis in ['top', 'bottom', 'left', 'right']:
                    ax.spines[axis].set_color('black')
                    ax.spines[axis].set_linewidth(3)
            fig.tight_layout()
            fig.savefig('plot_overlap_%s.png' % fname,
                        dpi=200,
                        bbox_inches='tight')

        plot_svs('overlap_ACC_a2b.nii.gz', acc_b, anat_b, 'ACC')
        plot_svs('overlap_THA_a2b.nii.gz', tha_b, anat_b, 'THA')
        plot_svs('overlap_STR_a2b.nii.gz', str_b, anat_b, 'STR')
Exemple #23
0
def segment_spm(population, workspace_dir):
    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT NMR-093%s %s' %(workspace_dir[-10:-9], workspace_dir[-8:])
    print ''
    print '#############################################################################'

    count= 0

    for subject in population:
        count +=1
        print '========================================================================================'
        print '%s- Runnning SPM12 NewSegment on subject %s_%s' %(count, subject, workspace_dir[-10:-9])
        print ''

        # define subject directory and anatomical file path
        subject_dir     = os.path.join(workspace_dir ,  subject)
        anatomical_dir  = os.path.join(subject_dir   , 'anatomical_original')
        anatomical_file = os.path.join(anatomical_dir, 'ANATOMICAL.nii')

        # check if the file exists
        if os.path.isfile(os.path.join(workspace_dir, subject, 'segmentation_spm', 'TISSUE_CLASS_1_GM_prob.nii')):
            print 'Brain already segmented......... moving on'

        else:
            print '..... Segmenting Brain with SPM12-NewSegment'

            # define destination directory for spm segmentation outputs
            mkdir_path(os.path.join(subject_dir, 'segmentation_spm'))
            out_seg_dir  = str(os.path.join(subject_dir, 'segmentation_spm'))

            # run SPM segmentation
            print '..... Starting matlab no splash to run segmentation'
            seg                      = spm.NewSegment()
            seg.inputs.channel_files = anatomical_file
            seg.inputs.channel_info  = (0.0001, 60, (True, True))
            seg.out_dir              = out_seg_dir
            seg.run()

            # rename output files
            print '..... Renaming outputs and dumping into SPM segmenation dir'

            shutil.move(str(os.path.join(anatomical_dir, 'c1ANATOMICAL.nii')),
                        str(os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_prob.nii')))

            shutil.move(str(os.path.join(anatomical_dir, 'c2ANATOMICAL.nii')),
                        str(os.path.join(out_seg_dir, 'TISSUE_CLASS_2_WM_prob.nii')))

            shutil.move(str(os.path.join(anatomical_dir, 'c3ANATOMICAL.nii')),
                        str(os.path.join(out_seg_dir, 'TISSUE_CLASS_3_CSF_prob.nii')))

            shutil.move(str(os.path.join(anatomical_dir, 'c4ANATOMICAL.nii')),
                        str(os.path.join(out_seg_dir, '___Skull.nii')))

            shutil.move(str(os.path.join(anatomical_dir, 'c5ANATOMICAL.nii')),
                        str(os.path.join(out_seg_dir, '___SoftTissue.nii')))

            shutil.move((os.path.join(anatomical_dir, 'BiasField_ANATOMICAL.nii')),
                        (os.path.join(out_seg_dir, '___BiasFieldMap.nii')))

            shutil.move((os.path.join(anatomical_dir, 'mANATOMICAL.nii')),
                        (os.path.join(out_seg_dir, '___mFile.nii')))

            shutil.move((os.path.join(anatomical_dir, 'ANATOMICAL_seg8.mat')),
                        (os.path.join(out_seg_dir, '___seg8.mat')))

            '###########################################'
            # threshold and biniarize spm tissue masks
            print '..... Thresholding and binazing tissue probablity maps '
            out_seg_dir  = str(os.path.join(subject_dir, 'segmentation_spm'))
            gm_mask  = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_prob.nii'))
            wm_mask  = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_2_WM_prob.nii'))
            csf_mask = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_3_CSF_prob.nii'))

            thr_hbin_GM1                          = fsl.Threshold()
            thr_hbin_GM1.inputs.in_file           = gm_mask
            thr_hbin_GM1.inputs.thresh            = 0.5
            thr_hbin_GM1.inputs.args              = '-bin'
            thr_hbin_GM1.inputs.ignore_exception  = True
            thr_hbin_GM1.inputs.out_file          = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_BIN.nii.gz'))
            thr_hbin_GM1.run()

            thr_hbin_WM1                          = fsl.Threshold()
            thr_hbin_WM1.inputs.in_file           = wm_mask
            thr_hbin_WM1.inputs.thresh            = 0.5
            thr_hbin_WM1.inputs.args              = '-bin'
            thr_hbin_WM1.inputs.ignore_exception  = True
            thr_hbin_WM1.inputs.out_file          = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_2_WM_BIN.nii.gz'))
            thr_hbin_WM1.run()

            thr_hbin_CSF1                         = fsl.Threshold()
            thr_hbin_CSF1.inputs.in_file          = csf_mask
            thr_hbin_CSF1.inputs.thresh           = 0.5
            thr_hbin_CSF1.inputs.args             = '-bin'
            thr_hbin_CSF1.inputs.ignore_exception = True
            thr_hbin_CSF1.inputs.out_file         = str(os.path.join(out_seg_dir, 'TISSUE_CLASS_3_CSF_BIN.nii.gz'))
            thr_hbin_CSF1.run()

        '###########################################'


        out_seg_dir = os.path.join(subject_dir, 'segmentation_spm')
        if os.path.isfile(os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_OPTIMIZED.nii.gz')):
            print 'Optimized Tissue masks already created......... moving on'

        else:
            print '..... Segmentatiing Subcortex and creating optimized tissue masks'
            # create brain mask from GM, WM, CSF
            gm_bin = os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_BIN.nii.gz')
            wm_bin = os.path.join(out_seg_dir, 'TISSUE_CLASS_2_WM_BIN.nii.gz')
            cm_bin = os.path.join(out_seg_dir, 'TISSUE_CLASS_3_CSF_BIN.nii.gz')
            brain_mask = os.path.join(anatomical_dir, 'ANATOMICAL_brain_mask.nii.gz')
            os.system('fslmaths %s -add %s -add %s -fillh -dilM %s'%(gm_bin,wm_bin, cm_bin,brain_mask))

            # # deskull anatomical
            anatomical_deskull = os.path.join(anatomical_dir, 'ANATOMICAL_DESKULL.nii.gz')
            anatomical_deskull_rpi = os.path.join(anatomical_dir, 'ANATOMICAL_DESKULL_RPI.nii.gz')
            os.system('fslmaths %s -mul %s %s' %(anatomical_file, brain_mask, anatomical_deskull))
            os.system('fslswapdim %s RL PA IS %s'%(anatomical_deskull, anatomical_deskull_rpi))

            # run FLIRT and FIRST
            mkdir_path(os.path.join(out_seg_dir, 'FIRST_subcortical'))
            out_first_dir  = os.path.join(out_seg_dir, 'FIRST_subcortical')
            first_seg = os.path.join(out_first_dir, 'FIRST_all_fast_firstseg.nii.gz')

            if not os.path.isfile(first_seg):
            #if not os.path.isfile(os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_OPTIMIZED.nii.gz')):
                ref = '/usr/share/fsl/5.0/data/standard/MNI152_T1_1mm_brain.nii.gz'
                omat = os.path.join(anatomical_dir, 'ANATOMICAL_DESKULL_RPI_MNI.mat')
                anat2mni = os.path.join(anatomical_dir, 'ANATOMICAL_DESKULL_RPI_MNI.nii.gz')
                print 'running flirt'
                os.system('flirt -in %s -ref %s -out %s -omat %s -cost mutualinfo -dof 12'%(anatomical_deskull_rpi, ref, anat2mni, omat))
                print 'running first'
                os.system('run_first_all -v -i %s -a %s -o %s/FIRST'%(anatomical_deskull_rpi, omat, out_first_dir))

                # flip back to anatomical orientation
                first_seg = os.path.join(out_first_dir, 'FIRST_all_fast_firstseg.nii.gz')
                first_seg_ail = os.path.join(out_first_dir, 'FIRST_all_fast_firstseg_AIL.nii.gz')
                os.system('fslswapdim %s AP IS LR %s' %(first_seg, first_seg_ail))

                # create subcortically corrected tissue masks and flip them back to correct orientation
                first_seg_ail_bin = os.path.join(out_first_dir, 'FIRST_all_fast_firstseg_AIL_BIN.nii.gz')
                gm_combined = os.path.join(out_seg_dir, 'TISSUE_CLASS_1_GM_OPTIMIZED.nii.gz')
                wm_combined = os.path.join(out_seg_dir, 'TISSUE_CLASS_2_WM_OPTIMIZED.nii.gz')
                cm_combined = os.path.join(out_seg_dir, 'TISSUE_CLASS_3_CSF_OPTIMIZED.nii.gz')

                os.system('fslmaths %s -bin %s' %(first_seg_ail, first_seg_ail_bin))
                os.system('fslmaths %s -add %s -bin %s' %(first_seg_ail_bin, gm_bin, gm_combined))
                os.system('fslmaths %s -sub %s -bin %s' %(wm_bin, first_seg_ail_bin, wm_combined))
                os.system('fslmaths %s -sub %s -bin %s' %(cm_bin,first_seg_ail_bin, cm_combined))

            print 'done'
Exemple #24
0
def return_fd_tsnr_dist(population, out_dir, pipeline_name):

    fd_means = []
    tsnr_files = []
    mask_files = []
    missing_subjects = []
    for subject in population:

        subject_dir = os.path.join(out_dir, pipeline_name, subject)
        mkdir_path(os.path.join(subject_dir, 'quality_control'))
        qc_dir = os.path.join(subject_dir, 'quality_control')
        subject_dir = os.path.join(out_dir, pipeline_name, subject)

        fd1d = os.path.join(subject_dir, 'functional_motion_FDPower/FD.1D')
        if os.path.isfile(fd1d):
            fd_means.append(np.mean(np.genfromtxt(fd1d)))

        else:
            print subject, 'has no fd1d'
            missing_subjects.append(subject)

        os.chdir(qc_dir)
        pp_file = os.path.join(
            subject_dir,
            'functional_native_brain_preproc/REST_calc_resample_corrected_volreg_maths_brain.nii.gz'
        )

        tsnr_file = os.path.join(
            qc_dir,
            'REST_calc_resample_corrected_volreg_maths_brain_tsnr.nii.gz')
        mask_file = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_mask/REST_calc_resample_corrected_volreg_maths_brain_mask.nii.gz'
        )

        if os.path.isfile(tsnr_file):
            tsnr_files.append(tsnr_file)
            mask_files.append(mask_file)
        else:
            if os.path.isfile(pp_file):
                tsnr = TSNR()
                tsnr.inputs.in_file = pp_file
                res = tsnr.run()
                tsnr_files.append(res.outputs.tsnr_file)
            else:
                print subject, 'has no functional_native_preproc'

    tsnr_distributions = volumes.get_median_distribution(
        tsnr_files, mask_files)
    population_fd_means = fd_means

    np.savetxt(
        os.path.join(out_dir, 'GluConnectivity',
                     'population_fd_distributions.txt'), population_fd_means)
    np.savetxt(
        os.path.join(out_dir, 'GluConnectivity',
                     'population_tsnr_distributions.txt'), tsnr_distributions)

    print 'FD mean=', population_fd_means
    print 'TSNR_distribution=', tsnr_distributions
    print ''
def preprocess_anatomical(population, workspace):

    print '========================================================================================'
    print ''
    print '                    Tourettome - 001.Anatomical Data PreProcessing                      '
    print ''
    print '========================================================================================'


    for subject in population:
        print '========================================================================================'
        print '-Preprocessing anatomical data for %s' %subject

        rawdir    = os.path.join(workspace, subject, 'RAW')
        anatdir   = mkdir_path(os.path.join(workspace, subject, 'ANATOMICAL'))
        spmdir    = mkdir_path(os.path.join(anatdir, 'seg_spm'))
        firstdir  = mkdir_path(os.path.join(anatdir, 'seg_first'))

        ####### RUN SPM SEGMENTATION

        if not os.path.isfile(os.path.join(spmdir, 'c1ANATOMICAL.nii')):

            ### Deoblique ###  Replace transformation matrix in header with cardinal matrix.This option DOES NOT deoblique the volume.
            os.system('3drefit -deoblique %s' %os.path.join(rawdir, 'ANATOMICAL.nii.gz'))

            ### SEGMENT
            if not os.path.isfile(os.path.join(spmdir, 'mANATOMICAL.nii')):

                print '..... Running SPM segmentation'

                os.chdir(spmdir)
                if not os.path.isfile(os.path.join(spmdir, 'ANATOMICAL.nii')):
                    shutil.copy(os.path.join(rawdir, 'ANATOMICAL.nii.gz'), os.path.join(spmdir, 'ANATOMICAL.nii.gz'))
                    os.system('fslchfiletype NIFTI %s' % os.path.join(spmdir, 'ANATOMICAL'))

                    seg = spm.NewSegment()
                    seg.inputs.channel_files = os.path.join(spmdir, 'ANATOMICAL.nii')
                    seg.inputs.channel_info = (0.0001, 60, (True, True))
                    seg.base_dir = spmdir
                    seg.run()


        ####### DESKULL data using segmentation

        if not os.path.isfile(os.path.join(anatdir, 'ANATOMICAL_BRAIN.nii.gz')):

            print '..... Deskulling'

            os.chdir(spmdir)
            os.system('fslmaths c1ANATOMICAL -thr 0.5 -bin c1ANATOMICAL_thr05')
            os.system('fslmaths c2ANATOMICAL -thr 0.5 -bin c2ANATOMICAL_thr05')
            os.system('fslmaths c3ANATOMICAL -thr 0.5 -bin c3ANATOMICAL_thr05')
            os.system('fslmaths c1ANATOMICAL_thr05 -add c2ANATOMICAL_thr05 -add c3ANATOMICAL_thr05 -bin -fillh -s 3 -thr 0.5 -bin ../ANATOMICAL_BRAIN_MASK')
            os.system('fslmaths mANATOMICAL -mas ../ANATOMICAL_BRAIN_MASK ../ANATOMICAL_BRAIN')


        ####### OPTIMIZE masks with FSL-FIRST subcortical segmentation

        if not os.path.isfile(os.path.join(anatdir, 'ANATOMICAL_GM.nii.gz')):

            print '..... Optimizing tissue masks'

            if not os.path.isfile(os.path.join(firstdir, 'FIRST_all_fast_firstseg.nii.gz')):
                os.chdir(firstdir)
                print '.........flirt anat2mni for priors'
                os.system('flirt -in ../ANATOMICAL_BRAIN.nii.gz -ref %s -omat anat2mni.mat -out anat2mni -cost mutualinfo -dof 12'%(mni_brain_1mm)) # no skulls. brain to brain.
                print '......... fsl-first'
                os.system('run_first_all -d -i ../ANATOMICAL_BRAIN -b -a anat2mni.mat -o FIRST')

            os.system('fslmaths FIRST_all_fast_firstseg -sub FIRST-BrStem_corr -bin FIRST')
            os.system('fslmaths %s/c1ANATOMICAL -thr 0.55 -bin -add FIRST -bin ../ANATOMICAL_GM'  %spmdir)
            os.system('fslmaths %s/c2ANATOMICAL -thr 0.9  -bin -sub FIRST -bin ../ANATOMICAL_WM'  %spmdir)
            os.system('fslmaths %s/c3ANATOMICAL -sub 0.9  -bin -sub FIRST -bin ../ANATOMICAL_CSF' %spmdir)
        def run_lcmodel_raw(voxel_name, ppmst):

            print ''
            print 'PROCESSING SPECTRA WITH LCMODEL FOR %s PPMST = %s'%(voxel_name, ppmst)
            #
            #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name,  'ppm_%s'%ppmst, 'met'))
            #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name,  'ppm_%s'%ppmst, 'h2o'))
            #lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_twix',voxel_name,  'ppm_%s'%ppmst)

            mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH', voxel_name,  'ppm_%s'%ppmst, 'met'))
            mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH', voxel_name,  'ppm_%s'%ppmst, 'h2o'))
            lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH',voxel_name,  'ppm_%s'%ppmst)

            shutil.copy(os.path.join(twix_dir, '%s'%voxel_name, '%s'%voxel_name, '%s_lcm'%voxel_name),
                        os.path.join(lcmodel_dir, 'met', 'RAW'))

            shutil.copy(os.path.join(twix_dir, '%s'%voxel_name, '%s_w'%voxel_name, '%s_w_lcm'%voxel_name),
                        os.path.join(lcmodel_dir, 'h2o', 'RAW'))

            met = os.path.join(lcmodel_dir, 'met', 'RAW')
            h2o = os.path.join(lcmodel_dir, 'h2o', 'RAW')

            # read some data from the RDA header
            rda_info = []
            rda_header = open(os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name, 'ppm_%s'%ppmst, 'rda_header.txt'), 'r')
            for line in rda_header:
               rda_info.append(line)

            # define twix parameters
            nunfil = 2078
            hzpppm = 123.242398
            echot  = 30.0
            deltat = 0.000417


            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
                              Building the control file
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
            print '...building control file'
            file = open(os.path.join(lcmodel_dir, 'control'), "w")
            file.write(" $LCMODL\n")
            file.write(" title= 'TWIX - %s' \n" %rda_info[0])
            file.write(" srcraw= '%s' \n" %met)
            file.write(" srch2o= '%s' \n" %h2o)
            file.write(" savdir= '%s' \n" %lcmodel_dir)
            file.write(" ppmst= %s \n"%ppmst)
            file.write(" ppmend= 0.3\n")
            file.write(" nunfil= %s\n"%nunfil)
            file.write(" ltable= 7\n")
            file.write(" lps= 8\n")
            file.write(" lprint= 6\n")
            file.write(" lcsv= 11\n")
            file.write(" lcoraw= 10\n")
            file.write(" lcoord= 9\n")
            file.write(" hzpppm= %s\n"%hzpppm)
            file.write(" filtab= '%s/table'\n" %lcmodel_dir)
            file.write(" filraw= '%s/met/RAW'\n" %lcmodel_dir)
            file.write(" filps= '%s/ps'\n" %lcmodel_dir)
            file.write(" filpri= '%s/print'\n" %lcmodel_dir)
            file.write(" filh2o= '%s/h2o/RAW'\n" %lcmodel_dir)
            file.write(" filcsv= '%s/spreadsheet.csv'\n" %lcmodel_dir)
            file.write(" filcor= '%s/coraw'\n" %lcmodel_dir)
            file.write(" filcoo= '%s/coord'\n" %lcmodel_dir)
            file.write(" filbas= '/home/raid3/kanaan/.lcmodel/basis-sets/press_te30_3t_01a.basis'\n")
            file.write(" echot= %s \n" %echot)
            file.write(" dows= T \n")
            file.write(" NEACH= 999 \n") # export met fits
            #file.write(" DEGPPM =0 \n")
            file.write(" doecc= T\n")
            file.write(" deltat= %s\n"%deltat)
            file.write(" $END\n")
            file.close()

            if os.path.isfile(os.path.join(lcmodel_dir,  'spreadsheet.csv')):
                 print 'Spectrum already processed .................moving on'
            else:
                print '...running standardA4pdf execution-script '
                print ''
                lcm_command = ['/bin/sh','/home/raid3/kanaan/.lcmodel/execution-scripts/standardA4pdfv3','%s' %lcmodel_dir,'30','%s' %lcmodel_dir,'%s' %lcmodel_dir]
                print '... running execution script'
                print subprocess.list2cmdline(lcm_command)
                subprocess.call(lcm_command)

            reader = open(os.path.join(lcmodel_dir, 'table'), 'r')
            for line in reader:
                if 'FWHM' in line:
                    fwhm = float(line[9:14])
                    snrx  = line[29:31]
                if 'Data shift' in line:
                    shift = line[15:21]
                if 'Ph:' in line:
                    ph0 = line[6:10]
                    ph1 = line[19:24]
                    fwhm_hz = fwhm * 123.24

                    file = open(os.path.join(lcmodel_dir, 'snr.txt'), "w")
                    file.write('%s, %s, %s, %s, %s, %s' %(fwhm,fwhm_hz, snrx, shift, ph0, ph1))
                    file.close()
            print '###############################################################################'
    def create_group_dataframe(voxel_name, analysis_type, ppmst):

        df_group = []
        for subject in population:
            print subject
            header = open(
                os.path.join(
                    workspace_dir, subject, 'svs_rda', 'ACC', 'h2o',
                    '%s%s_ACC_WATER.rda' %
                    (subject, workspace_dir[-10:-9]))).read().splitlines()
            gender = [i[11:15] for i in header if 'PatientSex' in i][0]
            age = [i[13:15] for i in header if 'PatientAge' in i][0]

            analysis_file = os.path.join(workspace_dir, subject,
                                         'lcmodel_%s' % analysis_type,
                                         voxel_name, 'ppm_%s' % ppmst, 'table')

            if not os.path.isfile(analysis_file):
                print 'Subject %s has not %s data for %s' % (
                    subject, analysis_type, voxel_name)

            else:
                print os.path.join(workspace_dir, subject,
                                   'lcmodel_%s' % analysis_type, voxel_name,
                                   'ppm_%s' % ppmst, 'snr.txt')
                quality = np.genfromtxt(os.path.join(
                    workspace_dir, subject, 'lcmodel_%s' % analysis_type,
                    voxel_name, 'ppm_%s' % ppmst, 'snr.txt'),
                                        delimiter=',')

                # grab tissue proprotion data
                prop_gm, prop_wm, prop_csf, prop_all = np.genfromtxt(
                    os.path.join(
                        workspace_dir, subject, 'svs_voxel_stats',
                        '%s_voxel_statistics_spm_opt.txt' % voxel_name),
                    delimiter=',')

                # get lcmodel metabolites from spreadsheet csv
                csv = pd.read_csv(
                    os.path.join(workspace_dir, subject,
                                 'lcmodel_%s' % analysis_type, voxel_name,
                                 'ppm_%s' % ppmst, 'spreadsheet.csv'))

                # create dataframe with subject demographics, frequency data and reliable metabolite concentrations
                columns = [
                    'Age',
                    'Gender',
                    'FWHM',
                    'SNR',
                    'Shift',
                    'Ph0',
                    'Ph1',
                    'GM',
                    'WM',
                    'CSF',
                    'AllTissue',
                    'Cre',
                    'Cre%',
                    'tCho',
                    'tCho%',
                    'NAA',
                    'NAA%',
                    'NAAG',
                    'NAAG%',
                    'tNAA',
                    'tNAA%',
                    'mIno',
                    'mIno%',
                    'Glu',
                    'Glu%',
                    'Gln',
                    'Gln%',
                    'Glx',
                    'Glx%',
                    'Glu_Cre',
                    'Gln_Cre',
                    'Glx_Cre',
                    'GABA',
                    'GABA%',
                    'Asp',
                    'Asp%',
                    'Tau',
                    'Tau%',
                    'Lac',
                    'Lac%',
                    'Ala',
                    'Ala%',
                    'Asp',
                    'Asp%',
                    'Scy',
                    'Scy%',
                    'Glc',
                    'Glc%',
                    'Gua',
                    'Gua%',
                ]

                df_subject = pd.DataFrame(columns=columns,
                                          index=['%s' % subject])
                df_subject.loc['%s' % subject] = pd.Series({
                    'Age':
                    age,
                    'Gender':
                    gender,
                    'FWHM':
                    quality[1],
                    'SNR':
                    quality[2],
                    'Shift':
                    quality[3],
                    'Ph0':
                    quality[4],
                    'Ph1':
                    quality[5],
                    'GM':
                    prop_gm * 100.,
                    'WM':
                    prop_wm * 100.,
                    'CSF':
                    prop_csf * 100.,
                    'AllTissue':
                    prop_all * 100.,
                    'Cre':
                    float(csv[' Cre']),
                    'Cre%':
                    float(csv[' Cre %SD']),
                    'tCho':
                    float(csv[' GPC+PCh']),
                    'tCho%':
                    float(csv[' GPC+PCh %SD']),
                    'tNAA':
                    float(csv[' NAA+NAAG']),
                    'tNAA%':
                    float(csv[' NAA+NAAG %SD']),
                    'NAA':
                    float(csv[' NAA']),
                    'NAA%':
                    float(csv[' NAA %SD']),
                    'NAAG':
                    float(csv[' NAAG']),
                    'NAAG%':
                    float(csv[' NAAG %SD']),
                    'mIno':
                    float(csv[' mI']),
                    'mIno%':
                    float(csv[' mI %SD']),
                    'Glu':
                    float(csv[' Glu']),
                    'Glu%':
                    float(csv[' Glu %SD']),
                    'Gln':
                    float(csv[' Gln']),
                    'Gln%':
                    float(csv[' Gln %SD']),
                    'Glx':
                    float(csv[' Glu+Gln']),
                    'Glx%':
                    float(csv[' Glu+Gln %SD']),
                    'Glu_Cre':
                    float(csv[' Glu/Cre']),
                    'Gln_Cre':
                    float(csv[' Gln/Cre']),
                    'Glx_Cre':
                    float(csv[' Glu+Gln/Cre']),
                    'GABA':
                    float(csv[' GABA']),
                    'GABA%':
                    float(csv[' GABA %SD']),
                    'Asp':
                    float(csv[' Asp']),
                    'Asp%':
                    float(csv[' Asp %SD']),
                    'Ala':
                    float(csv[' Ala']),
                    'Ala%':
                    float(csv[' Ala %SD']),
                    'Lac':
                    float(csv[' Lac']),
                    'Lac%':
                    float(csv[' Lac %SD']),
                    'Tau':
                    float(csv[' Tau']),
                    'Tau%':
                    float(csv[' Tau %SD']),
                    #'Gua'         : float(csv[' Gua']),         'Gua%'        : float(csv[' Gua %SD']),
                    #'Glc'         : float(csv[' Glc']),         'Glc%'        : float(csv[' Glc %SD']),
                    'Scy':
                    float(csv[' Scyllo']),
                    'Scy%':
                    float(csv[' Scyllo %SD']),
                })
                # append subject data to list
                df_group.append(df_subject)

        group_dataframe = pd.concat(df_group,
                                    ignore_index=False).sort(columns='Age')

        # create results directory and save group dataframe
        mkdir_path(os.path.join(results_dir, voxel_name))
        #group_dataframe.to_csv(os.path.join(results_dir, voxel_name, 'lcmodel_%s_%s_ppmst_%s_%s_%s.csv'%(voxel_name, analysis_type, ppmst, workspace_dir[-8:],workspace_dir[-10:-9])))
        group_dataframe.to_csv(
            os.path.join(
                results_dir, voxel_name,
                'v2_lcmodel_%s_%s_ppmst_%s_%s_%s.csv' %
                (voxel_name, analysis_type, ppmst, workspace_dir[-8:],
                 workspace_dir[-10:-9])))

        print 'NMR-093%s_ %s %s_%s Results here: %s' % (
            workspace_dir[-10:-9], workspace_dir[-8:], voxel_name,
            analysis_type, results_dir)
def run_freesurfer_mask_connectivity(
    pop_name,
    population,
    freesurfer_dir,
    workspace_dir,
    mrs_datadir,
):

    df = pd.DataFrame(index=[population], columns=columnx)

    for subject in population:

        print '####################### Subject %s' % subject

        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'RSFC_CONNECTIVITY')
        mkdir_path(outdir)

        func_pproc = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz'
        )  # 2.3 mm
        func_mean = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_mean/REST_calc_resample_corrected_volreg_maths_tstat.nii'
        )
        func_aroma = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_FWHM_AROMA/denoised_func_data_nonaggr.nii.gz'
        )
        func_gm = os.path.join(
            subject_dir,
            'functional_native_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_flirt_thresh_maths.nii.gz'
        )
        anat_func = os.path.join(
            subject_dir,
            'anatomical_FUNC2mm_brain/MP2RAGE_DESKULL_RPI_resample_ero_flirt_flirt.nii.gz'
        )  # 2.3mm
        anat_func_xfm = os.path.join(
            subject_dir,
            'anatomical_FUNC2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt_inv.mat'
        )
        mni2natwarp = os.path.join(
            subject_dir,
            'MNI2mm_ANAT_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp_inverse.nii.gz'
        )

        #######################################    Grab ATAG masks   #######################################

        STN_LEFT = os.path.join(outdir, 'ATAG_STN_LEFT.nii.gz')
        SN_LEFT = os.path.join(outdir, 'ATAG_SN_LEFT.nii.gz')
        GPe_LEFT = os.path.join(outdir, 'ATAG_GPE_left.nii.gz')
        GPi_LEFT = os.path.join(outdir, 'ATAG_GPi_left.nii.gz')

        os.system('applywarp -i %s -r %s -w %s --postmat=%s -o %s' %
                  (mni_stn_left_1mm, anat_func, mni2natwarp, anat_func_xfm,
                   STN_LEFT))
        os.system(
            'applywarp -i %s -r %s -w %s --postmat=%s -o %s' %
            (mni_sn_left_1mm, anat_func, mni2natwarp, anat_func_xfm, SN_LEFT))

        os.system('fslmaths %s -bin %s' % (STN_LEFT, STN_LEFT))
        os.system('fslmaths %s -bin %s' % (SN_LEFT, SN_LEFT))

        #######################################    Grab Subcortical masks   #######################################
        print '1. grabbing FIRST Subcortical masks'

        STR = os.path.join(subject_dir, 'functional_subcortical',
                           'left_str.nii.gz')
        CAUx = os.path.join(subject_dir, 'functional_subcortical',
                            'left_caudate.nii.gz')
        PUT = os.path.join(subject_dir, 'functional_subcortical',
                           'left_putamen.nii.gz')
        PAL = os.path.join(subject_dir, 'functional_subcortical',
                           'left_pallidum.nii.gz')
        NAC = os.path.join(subject_dir, 'functional_subcortical',
                           'left_nacc.nii.gz')
        HIP = os.path.join(subject_dir, 'functional_subcortical',
                           'left_hipoocampus.nii.gz')
        AMG = os.path.join(subject_dir, 'functional_subcortical',
                           'left_amygdala.nii.gz')

        THA = os.path.join(subject_dir, 'functional_subcortical',
                           'thalamus.nii.gz')
        lTHA = os.path.join(subject_dir, 'functional_subcortical',
                            'left_thalamus.nii.gz')
        rTHA = os.path.join(subject_dir, 'functional_subcortical',
                            'right_thalamus.nii.gz')

        #######################################    Fill Caudate Holes   #######################################

        CAU = os.path.join(subject_dir, 'functional_subcortical',
                           'left_caudate_fill.nii.gz')

        if not os.path.isfile(CAU):
            os.system('fslmaths %s -fillh %s' % (CAUx, CAU))

        #######################################    Grab svs masks   #######################################
        print '2. grabbing SVS masks'

        svs_acc_src = os.path.join(
            mrs_datadir, pop_name, subject, 'svs_voxel_mask',
            '%s%s_ACC_RDA_MASK.nii' % (subject, mrs_datadir[-1]))
        svs_tha_src = os.path.join(
            mrs_datadir, pop_name, subject, 'svs_voxel_mask',
            '%s%s_THA_RDA_MASK.nii' % (subject, mrs_datadir[-1]))
        svs_str_src = os.path.join(
            mrs_datadir, pop_name, subject, 'svs_voxel_mask',
            '%s%s_STR_RDA_MASK.nii' % (subject, mrs_datadir[-1]))

        svs_acc = os.path.join(outdir, 'svs_acc.nii.gz')
        svs_tha = os.path.join(outdir, 'svs_tha.nii.gz')
        svs_str = os.path.join(outdir, 'svs_str.nii.gz')

        svs_acc_func = os.path.join(outdir, 'svs_acc_func.nii.gz')
        svs_tha_func = os.path.join(outdir, 'svs_tha_func.nii.gz')
        svs_str_func = os.path.join(outdir, 'svs_str_func.nii.gz')

        if not os.path.isfile(svs_acc_func):
            os.system('fslswapdim %s RL PA IS %s' % (svs_acc_src, svs_acc))
            os.system('fslswapdim %s RL PA IS %s' % (svs_tha_src, svs_tha))
            os.system('fslswapdim %s RL PA IS %s' % (svs_str_src, svs_str))

            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                      (svs_acc, anat_func, anat_func_xfm, svs_acc_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                      (svs_tha, anat_func, anat_func_xfm, svs_tha_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                      (svs_str, anat_func, anat_func_xfm, svs_str_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %
                      (svs_acc_func, svs_acc_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %
                      (svs_tha_func, svs_tha_func))
            os.system('fslmaths %s -thr 0.5 -bin %s' %
                      (svs_str_func, svs_str_func))

        #######################################   Grab freesurfer masks  #######################################
        print '3. grabbing Freesurfer masks'

        os.system('export SUBJECTS_DIR=%s' % (freesurfer_dir))

        t1mgz = os.path.join(freesurfer_dir, subject, 'mri', 'T1.mgz')
        segmgz = os.path.join(freesurfer_dir, subject, 'mri',
                              'aparc.a2009s+aseg.mgz')
        t1nii = os.path.join(outdir, 'freesurfer_T1.nii.gz')
        segnii = os.path.join(outdir, 'freesurfer_seg.nii.gz')

        fs_la_acc = os.path.join(outdir, 'freesurfer_seg_la_MCC_11107.nii.gz'
                                 )  # 11107  ctx_lh_G_and_S_cingul-Mid-Ant
        fs_ra_acc = os.path.join(outdir, 'freesurfer_seg_ra_MCC_12107.nii.gz'
                                 )  # 12107  ctx_lh_G_and_S_cingul-Mid-Ant
        fs_acc = os.path.join(outdir, 'freesurfer_seg_aMCC_11107_12107.nii.gz')

        fs_la_insula = os.path.join(outdir,
                                    'freesurfer_seg_la_INS_11148.nii.gz'
                                    )  # 11148  ctx_lh_S_circular_insula_ant
        fs_ra_insula = os.path.join(outdir,
                                    'freesurfer_seg_ra_INS_12148.nii.gz'
                                    )  # 12148  ctx_lh_S_circular_insula_ant

        if not os.path.isfile(fs_acc):
            os.system('mri_convert %s %s' % (t1mgz, t1nii))
            os.system('mri_convert %s %s' % (segmgz, segnii))

            os.system('fslmaths %s -thr 11107 -uthr 11107 %s ' %
                      (segnii, fs_la_acc))
            os.system('fslmaths %s -thr 12107 -uthr 12107 %s ' %
                      (segnii, fs_ra_acc))
            os.system('fslmaths %s -add %s -dilM -bin %s' %
                      (fs_la_acc, fs_ra_acc, fs_acc))

            os.system('fslmaths %s -thr 11148 -uthr 11148 -dilM -bin %s' %
                      (segnii, fs_la_insula))
            os.system('fslmaths %s -thr 12148 -uthr 12148 -dilM -bin %s' %
                      (segnii, fs_ra_insula))

        labels_dir = os.path.join(freesurfer_dir, subject, 'label')
        fs_ba6_rh = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_rh.nii.gz')
        fs_ba6_lh = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_lh.nii.gz')
        fs_sma = os.path.join(outdir, 'freesurfer_seg_SMA_BA6.nii.gz')

        if not os.path.isfile(fs_sma):
            os.system(
                'mri_label2vol --label %s/rh.BA6.thresh.label --subject %s --temp %s --regheader %s --o %s'
                % (labels_dir, subject, t1mgz, t1mgz, fs_ba6_rh))
            os.system(
                'mri_label2vol --label %s/lh.BA6.thresh.label --subject %s --temp %s --regheader %s --o %s'
                % (labels_dir, subject, t1mgz, t1mgz, fs_ba6_lh))
            os.system('fslmaths  %s -add %s -dilM -dilM %s' %
                      (fs_ba6_rh, fs_ba6_lh, fs_sma))

        #######################################   TRANSFORM Freesurfer masks to native func space   #######################################
        print '4. Transforming Freesurfer masks to native func space'
        t1nii_rpi = os.path.join(outdir, 'freesurfer_T1_RPI.nii.gz')
        fs_acc_rpi = os.path.join(
            outdir, 'freesurfer_seg_aMCC_11107_12107_RPI.nii.gz')
        fs_la_insula_rpi = os.path.join(
            outdir, 'freesurfer_seg_la_INS_11148_RPI.nii.gz')
        fs_ra_insula_rpi = os.path.join(
            outdir, 'freesurfer_seg_ra_INS_12148_RPI.nii.gz')
        fs_sma_rpi = os.path.join(outdir, 'freesurfer_seg_SMA_BA6_RPI.nii.gz')

        fst1omat = os.path.join(outdir, 'freesurfer2func.mat')
        fst1func = os.path.join(outdir, 'freesurfer_T1_func.nii.gz')
        fs_acc_func = os.path.join(
            outdir, 'freesurfer_seg_aMCC_11107_12107_func.nii.gz')
        fs_la_insula_func = os.path.join(
            outdir, 'freesurfer_seg_la_INS_11148_func.nii.gz')
        fs_ra_insula_func = os.path.join(
            outdir, 'freesurfer_seg_ra_INS_11148_func.nii.gz')
        fs_sma_func = os.path.join(outdir,
                                   'freesurfer_seg_SMA_BA6_func.nii.gz')

        if not os.path.isfile(t1nii_rpi):
            os.system('fslswapdim %s RL PA IS %s' % (t1nii, t1nii_rpi))
            os.system('fslswapdim %s RL PA IS %s' % (fs_acc, fs_acc_rpi))
            os.system('fslswapdim %s RL PA IS %s' %
                      (fs_la_insula, fs_la_insula_rpi))
            os.system('fslswapdim %s RL PA IS %s' %
                      (fs_ra_insula, fs_ra_insula_rpi))
            os.system('fslswapdim %s RL PA IS %s' % (fs_sma, fs_sma_rpi))
            os.system(
                'flirt -in %s -ref %s -omat %s -dof 6 -out %s -cost mutualinfo'
                % (t1nii_rpi, anat_func, fst1omat, fst1func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                      (fs_acc_rpi, anat_func, fst1omat, fs_acc_func))
            os.system(
                'flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                (fs_la_insula_rpi, anat_func, fst1omat, fs_la_insula_func))
            os.system(
                'flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                (fs_ra_insula_rpi, anat_func, fst1omat, fs_ra_insula_func))
            os.system('flirt -in %s -ref %s -init %s -applyxfm -out %s' %
                      (fs_sma_rpi, anat_func, fst1omat, fs_sma_func))

            os.system('fslmaths  %s -thr 0.5 -bin %s' %
                      (fs_acc_func, fs_acc_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %
                      (fs_la_insula_func, fs_la_insula_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %
                      (fs_ra_insula_func, fs_ra_insula_func))
            os.system('fslmaths  %s -thr 0.5 -bin %s' %
                      (fs_sma_func, fs_sma_func))

        if os.path.isfile(fs_sma_func):
            sma_load = nb.load(fs_sma_func).get_data()
            x, y, z = center_of_mass(sma_load)
            sma_point = os.path.join(outdir, 'sma_point.nii.gz')
            fs_sma_optimized = os.path.join(
                outdir, 'freesurfer_seg_SMA_BA6_func_opt.nii.gz')

            os.system(
                'fslmaths %s -mul 0 -add 1 -roi %s 1 %s 1 %s 1 0 1 %s -odt float'
                % (func_mean, x, y, z, sma_point))
            os.system(
                'fslmaths %s -kernel sphere 10 -fmean -dilM -dilM -ero -ero %s -odt float'
                % (sma_point, fs_sma_optimized))

        #######################################   GET MOTION PARAMS   #######################################
        print '5. Grabbing motion paramaters'

        motion = os.path.join(
            subject_dir,
            'functional_motion_statistics/motion_power_params.txt')
        if os.path.isfile(motion):
            power = pd.read_csv(motion)  #n, ignore_index = True)
            exclude = power.loc[subject][' FD_exclude']
            fd = power.loc[subject]['Subject']
        if os.path.isfile(func_aroma) and os.path.isfile(func_gm):
            dvars = np.mean(return_DVARS(func_aroma, func_gm))
            print dvars

        #######################################   GEN TIMESERIES OF ROIs   #######################################
        print '6. Extracting timeseries and calculating connectivity'

        if os.path.isfile(func_pproc):

            stn_timeseries = input_data.NiftiLabelsMasker(
                labels_img=STN_LEFT,
                standardize=True).fit_transform(func_pproc)
            sn_timeseries = input_data.NiftiLabelsMasker(
                labels_img=SN_LEFT, standardize=True).fit_transform(func_pproc)

            str_timeseries = input_data.NiftiLabelsMasker(
                labels_img=STR, standardize=True).fit_transform(func_pproc)
            tha_timeseries = input_data.NiftiLabelsMasker(
                labels_img=THA, standardize=True).fit_transform(func_pproc)
            thaL_timeseries = input_data.NiftiLabelsMasker(
                labels_img=lTHA, standardize=True).fit_transform(func_pproc)
            thaR_timeseries = input_data.NiftiLabelsMasker(
                labels_img=rTHA, standardize=True).fit_transform(func_pproc)

            cau_timeseries = input_data.NiftiLabelsMasker(
                labels_img=CAU, standardize=True).fit_transform(func_pproc)
            put_timeseries = input_data.NiftiLabelsMasker(
                labels_img=PUT, standardize=True).fit_transform(func_pproc)
            pal_timeseries = input_data.NiftiLabelsMasker(
                labels_img=PAL, standardize=True).fit_transform(func_pproc)
            nac_timeseries = input_data.NiftiLabelsMasker(
                labels_img=NAC, standardize=True).fit_transform(func_pproc)
            hip_timeseries = input_data.NiftiLabelsMasker(
                labels_img=HIP, standardize=True).fit_transform(func_pproc)
            amg_timeseries = input_data.NiftiLabelsMasker(
                labels_img=AMG, standardize=True).fit_transform(func_pproc)

            mACC_timeseries = input_data.NiftiLabelsMasker(
                labels_img=fs_acc_func,
                standardize=True).fit_transform(func_pproc)
            lINS_timeseries = input_data.NiftiLabelsMasker(
                labels_img=fs_la_insula_func,
                standardize=True).fit_transform(func_pproc)
            rINS_timeseries = input_data.NiftiLabelsMasker(
                labels_img=fs_ra_insula_func,
                standardize=True).fit_transform(func_pproc)
            SMA_timeseries = input_data.NiftiLabelsMasker(
                labels_img=fs_sma_optimized,
                standardize=True).fit_transform(func_pproc)

            mACCX_timeseries = input_data.NiftiLabelsMasker(
                labels_img=svs_acc_func,
                standardize=True).fit_transform(func_pproc)
            strX_timeseries = input_data.NiftiLabelsMasker(
                labels_img=svs_str_func,
                standardize=True).fit_transform(func_pproc)
            thaX_timeseries = input_data.NiftiLabelsMasker(
                labels_img=svs_tha_func,
                standardize=True).fit_transform(func_pproc)

            print '......calculating Subthalamic Nucleus connectivity'
            df.loc[subject]['stn_pal'] = float(
                pearsonr(stn_timeseries, pal_timeseries)[0])
            df.loc[subject]['stn_acc'] = float(
                pearsonr(stn_timeseries, mACC_timeseries)[0])
            df.loc[subject]['stn_tha'] = float(
                pearsonr(stn_timeseries, tha_timeseries)[0])
            df.loc[subject]['stn_thaX'] = float(
                pearsonr(stn_timeseries, thaX_timeseries)[0])
            df.loc[subject]['stn_thaL'] = float(
                pearsonr(stn_timeseries, thaL_timeseries)[0])
            df.loc[subject]['stn_thaR'] = float(
                pearsonr(stn_timeseries, thaR_timeseries)[0])
            df.loc[subject]['stn_hip'] = float(
                pearsonr(stn_timeseries, hip_timeseries)[0])
            df.loc[subject]['stn_amg'] = float(
                pearsonr(stn_timeseries, amg_timeseries)[0])
            df.loc[subject]['stn_accX'] = float(
                pearsonr(stn_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['stn_lins'] = float(
                pearsonr(stn_timeseries, lINS_timeseries)[0])
            df.loc[subject]['stn_rins'] = float(
                pearsonr(stn_timeseries, rINS_timeseries)[0])
            df.loc[subject]['stn_sma'] = float(
                pearsonr(stn_timeseries, SMA_timeseries)[0])
            df.loc[subject]['stn_strX'] = float(
                pearsonr(stn_timeseries, strX_timeseries)[0])
            df.loc[subject]['stn_str'] = float(
                pearsonr(stn_timeseries, str_timeseries)[0])
            df.loc[subject]['stn_cau'] = float(
                pearsonr(stn_timeseries, cau_timeseries)[0])
            df.loc[subject]['stn_put'] = float(
                pearsonr(stn_timeseries, put_timeseries)[0])
            df.loc[subject]['stn_nac'] = float(
                pearsonr(stn_timeseries, nac_timeseries)[0])

            print '......calculating Substantia Nigra connectivity'
            df.loc[subject]['sn_pal'] = float(
                pearsonr(sn_timeseries, pal_timeseries)[0])
            df.loc[subject]['sn_acc'] = float(
                pearsonr(sn_timeseries, mACC_timeseries)[0])
            df.loc[subject]['sn_tha'] = float(
                pearsonr(sn_timeseries, tha_timeseries)[0])
            df.loc[subject]['sn_thaX'] = float(
                pearsonr(sn_timeseries, thaX_timeseries)[0])
            df.loc[subject]['sn_thaL'] = float(
                pearsonr(sn_timeseries, thaL_timeseries)[0])
            df.loc[subject]['sn_thaR'] = float(
                pearsonr(sn_timeseries, thaR_timeseries)[0])
            df.loc[subject]['sn_hip'] = float(
                pearsonr(sn_timeseries, hip_timeseries)[0])
            df.loc[subject]['sn_amg'] = float(
                pearsonr(sn_timeseries, amg_timeseries)[0])
            df.loc[subject]['sn_accX'] = float(
                pearsonr(sn_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['sn_lins'] = float(
                pearsonr(sn_timeseries, lINS_timeseries)[0])
            df.loc[subject]['sn_rins'] = float(
                pearsonr(sn_timeseries, rINS_timeseries)[0])
            df.loc[subject]['sn_sma'] = float(
                pearsonr(sn_timeseries, SMA_timeseries)[0])
            df.loc[subject]['sn_strX'] = float(
                pearsonr(sn_timeseries, strX_timeseries)[0])
            df.loc[subject]['sn_str'] = float(
                pearsonr(sn_timeseries, str_timeseries)[0])
            df.loc[subject]['sn_cau'] = float(
                pearsonr(sn_timeseries, cau_timeseries)[0])
            df.loc[subject]['sn_put'] = float(
                pearsonr(sn_timeseries, put_timeseries)[0])
            df.loc[subject]['sn_nac'] = float(
                pearsonr(sn_timeseries, nac_timeseries)[0])

            print '......calculating STR_SVS connectivity'
            df.loc[subject]['strX_acc'] = float(
                pearsonr(strX_timeseries, mACC_timeseries)[0])
            df.loc[subject]['strX_tha'] = float(
                pearsonr(strX_timeseries, tha_timeseries)[0])
            df.loc[subject]['strX_thaX'] = float(
                pearsonr(strX_timeseries, thaX_timeseries)[0])
            df.loc[subject]['strX_thaL'] = float(
                pearsonr(strX_timeseries, thaL_timeseries)[0])
            df.loc[subject]['strX_thaR'] = float(
                pearsonr(strX_timeseries, thaR_timeseries)[0])
            df.loc[subject]['strX_hip'] = float(
                pearsonr(strX_timeseries, hip_timeseries)[0])
            df.loc[subject]['strX_amg'] = float(
                pearsonr(strX_timeseries, amg_timeseries)[0])
            df.loc[subject]['strX_accX'] = float(
                pearsonr(strX_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['strX_lins'] = float(
                pearsonr(strX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['strX_rins'] = float(
                pearsonr(strX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['strX_sma'] = float(
                pearsonr(strX_timeseries, SMA_timeseries)[0])

            print '......calculating STR connetivity'
            df.loc[subject]['str_acc'] = float(
                pearsonr(str_timeseries, mACC_timeseries)[0])
            df.loc[subject]['str_tha'] = float(
                pearsonr(str_timeseries, tha_timeseries)[0])
            df.loc[subject]['str_thaX'] = float(
                pearsonr(str_timeseries, thaX_timeseries)[0])
            df.loc[subject]['str_thaL'] = float(
                pearsonr(str_timeseries, thaL_timeseries)[0])
            df.loc[subject]['str_thaR'] = float(
                pearsonr(str_timeseries, thaR_timeseries)[0])
            df.loc[subject]['str_hip'] = float(
                pearsonr(str_timeseries, hip_timeseries)[0])
            df.loc[subject]['str_amg'] = float(
                pearsonr(str_timeseries, amg_timeseries)[0])
            df.loc[subject]['str_accX'] = float(
                pearsonr(str_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['str_lins'] = float(
                pearsonr(str_timeseries, lINS_timeseries)[0])
            df.loc[subject]['str_rins'] = float(
                pearsonr(str_timeseries, rINS_timeseries)[0])
            df.loc[subject]['str_sma'] = float(
                pearsonr(str_timeseries, SMA_timeseries)[0])

            print '......calculating CAUDATE connectivity'
            df.loc[subject]['cau_acc'] = float(
                pearsonr(cau_timeseries, mACC_timeseries)[0])
            df.loc[subject]['cau_tha'] = float(
                pearsonr(cau_timeseries, tha_timeseries)[0])
            df.loc[subject]['cau_thaX'] = float(
                pearsonr(cau_timeseries, thaX_timeseries)[0])
            df.loc[subject]['cau_thaL'] = float(
                pearsonr(cau_timeseries, thaL_timeseries)[0])
            df.loc[subject]['cau_thaR'] = float(
                pearsonr(cau_timeseries, thaR_timeseries)[0])
            df.loc[subject]['cau_pal'] = float(
                pearsonr(cau_timeseries, pal_timeseries)[0])
            df.loc[subject]['cau_hip'] = float(
                pearsonr(cau_timeseries, hip_timeseries)[0])
            df.loc[subject]['cau_amg'] = float(
                pearsonr(cau_timeseries, amg_timeseries)[0])
            df.loc[subject]['cau_accX'] = float(
                pearsonr(cau_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['cau_lins'] = float(
                pearsonr(cau_timeseries, lINS_timeseries)[0])
            df.loc[subject]['cau_rins'] = float(
                pearsonr(cau_timeseries, rINS_timeseries)[0])
            df.loc[subject]['cau_sma'] = float(
                pearsonr(cau_timeseries, SMA_timeseries)[0])

            print '......calculating PUTAMEN connectivity'
            df.loc[subject]['put_tha'] = float(
                pearsonr(put_timeseries, tha_timeseries)[0])
            df.loc[subject]['put_thaX'] = float(
                pearsonr(put_timeseries, thaX_timeseries)[0])
            df.loc[subject]['put_thaL'] = float(
                pearsonr(put_timeseries, thaL_timeseries)[0])
            df.loc[subject]['put_thaR'] = float(
                pearsonr(put_timeseries, thaR_timeseries)[0])
            df.loc[subject]['put_pal'] = float(
                pearsonr(put_timeseries, pal_timeseries)[0])
            df.loc[subject]['put_hip'] = float(
                pearsonr(put_timeseries, hip_timeseries)[0])
            df.loc[subject]['put_amg'] = float(
                pearsonr(put_timeseries, amg_timeseries)[0])
            df.loc[subject]['put_acc'] = float(
                pearsonr(put_timeseries, mACC_timeseries)[0])
            df.loc[subject]['put_accX'] = float(
                pearsonr(put_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['put_lins'] = float(
                pearsonr(put_timeseries, lINS_timeseries)[0])
            df.loc[subject]['put_rins'] = float(
                pearsonr(put_timeseries, rINS_timeseries)[0])
            df.loc[subject]['put_sma'] = float(
                pearsonr(put_timeseries, SMA_timeseries)[0])

            print '......calcualting NUCLESUS ACCUMBENS connectivity'
            df.loc[subject]['nac_tha'] = float(
                pearsonr(nac_timeseries, tha_timeseries)[0])
            df.loc[subject]['nac_thaX'] = float(
                pearsonr(nac_timeseries, thaX_timeseries)[0])
            df.loc[subject]['nac_thaL'] = float(
                pearsonr(nac_timeseries, thaL_timeseries)[0])
            df.loc[subject]['nac_thaR'] = float(
                pearsonr(nac_timeseries, thaR_timeseries)[0])
            df.loc[subject]['nac_pal'] = float(
                pearsonr(nac_timeseries, pal_timeseries)[0])
            df.loc[subject]['nac_hip'] = float(
                pearsonr(nac_timeseries, hip_timeseries)[0])
            df.loc[subject]['nac_amg'] = float(
                pearsonr(nac_timeseries, amg_timeseries)[0])
            df.loc[subject]['nac_acc'] = float(
                pearsonr(nac_timeseries, mACC_timeseries)[0])
            df.loc[subject]['nac_accX'] = float(
                pearsonr(nac_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['nac_lins'] = float(
                pearsonr(nac_timeseries, lINS_timeseries)[0])
            df.loc[subject]['nac_rins'] = float(
                pearsonr(nac_timeseries, rINS_timeseries)[0])
            df.loc[subject]['nac_sma'] = float(
                pearsonr(nac_timeseries, SMA_timeseries)[0])

            print '......calcualting PALLIDUM connectivity'
            df.loc[subject]['pal_tha'] = float(
                pearsonr(pal_timeseries, tha_timeseries)[0])
            df.loc[subject]['pal_thaX'] = float(
                pearsonr(pal_timeseries, thaX_timeseries)[0])
            df.loc[subject]['pal_thaL'] = float(
                pearsonr(pal_timeseries, thaL_timeseries)[0])
            df.loc[subject]['pal_thaR'] = float(
                pearsonr(pal_timeseries, thaR_timeseries)[0])
            df.loc[subject]['pal_hip'] = float(
                pearsonr(pal_timeseries, hip_timeseries)[0])
            df.loc[subject]['pal_amg'] = float(
                pearsonr(pal_timeseries, amg_timeseries)[0])
            df.loc[subject]['pal_acc'] = float(
                pearsonr(pal_timeseries, mACC_timeseries)[0])
            df.loc[subject]['pal_accX'] = float(
                pearsonr(pal_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['pal_lins'] = float(
                pearsonr(pal_timeseries, lINS_timeseries)[0])
            df.loc[subject]['pal_rins'] = float(
                pearsonr(pal_timeseries, rINS_timeseries)[0])
            df.loc[subject]['pal_sma'] = float(
                pearsonr(pal_timeseries, SMA_timeseries)[0])

            print '......calcualting THA_SVS connectivity'
            df.loc[subject]['thaX_cau'] = float(
                pearsonr(thaX_timeseries, cau_timeseries)[0])
            df.loc[subject]['thaX_put'] = float(
                pearsonr(thaX_timeseries, put_timeseries)[0])
            df.loc[subject]['thaX_pal'] = float(
                pearsonr(thaX_timeseries, pal_timeseries)[0])
            df.loc[subject]['thaX_nac'] = float(
                pearsonr(thaX_timeseries, nac_timeseries)[0])
            df.loc[subject]['thaX_hip'] = float(
                pearsonr(thaX_timeseries, hip_timeseries)[0])
            df.loc[subject]['thaX_amg'] = float(
                pearsonr(thaX_timeseries, amg_timeseries)[0])
            df.loc[subject]['thaX_acc'] = float(
                pearsonr(thaX_timeseries, mACC_timeseries)[0])
            df.loc[subject]['thaX_accX'] = float(
                pearsonr(thaX_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaX_lins'] = float(
                pearsonr(thaX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaX_rins'] = float(
                pearsonr(thaX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaX_sma'] = float(
                pearsonr(thaX_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS FULL connectivity'
            df.loc[subject]['tha_cau'] = float(
                pearsonr(tha_timeseries, cau_timeseries)[0])
            df.loc[subject]['tha_put'] = float(
                pearsonr(tha_timeseries, put_timeseries)[0])
            df.loc[subject]['tha_pal'] = float(
                pearsonr(tha_timeseries, pal_timeseries)[0])
            df.loc[subject]['tha_nac'] = float(
                pearsonr(tha_timeseries, nac_timeseries)[0])
            df.loc[subject]['tha_hip'] = float(
                pearsonr(tha_timeseries, hip_timeseries)[0])
            df.loc[subject]['tha_amg'] = float(
                pearsonr(tha_timeseries, amg_timeseries)[0])
            df.loc[subject]['tha_acc'] = float(
                pearsonr(tha_timeseries, mACC_timeseries)[0])
            df.loc[subject]['tha_accX'] = float(
                pearsonr(tha_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['tha_lins'] = float(
                pearsonr(tha_timeseries, lINS_timeseries)[0])
            df.loc[subject]['tha_rins'] = float(
                pearsonr(tha_timeseries, rINS_timeseries)[0])
            df.loc[subject]['tha_sma'] = float(
                pearsonr(tha_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS RIGHT connectivity'
            df.loc[subject]['thaR_cau'] = float(
                pearsonr(thaR_timeseries, cau_timeseries)[0])
            df.loc[subject]['thaR_put'] = float(
                pearsonr(thaR_timeseries, put_timeseries)[0])
            df.loc[subject]['thaR_pal'] = float(
                pearsonr(thaR_timeseries, pal_timeseries)[0])
            df.loc[subject]['thaR_nac'] = float(
                pearsonr(thaR_timeseries, nac_timeseries)[0])
            df.loc[subject]['thaR_hip'] = float(
                pearsonr(thaR_timeseries, hip_timeseries)[0])
            df.loc[subject]['thaR_amg'] = float(
                pearsonr(thaR_timeseries, amg_timeseries)[0])
            df.loc[subject]['thaR_acc'] = float(
                pearsonr(thaR_timeseries, mACC_timeseries)[0])
            df.loc[subject]['thaR_accX'] = float(
                pearsonr(thaR_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaR_lins'] = float(
                pearsonr(thaR_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaR_rins'] = float(
                pearsonr(thaR_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaR_sma'] = float(
                pearsonr(thaR_timeseries, SMA_timeseries)[0])

            print '......calcualting THALAMUS LEFT connectivity'
            df.loc[subject]['thaL_cau'] = float(
                pearsonr(thaL_timeseries, cau_timeseries)[0])
            df.loc[subject]['thaL_put'] = float(
                pearsonr(thaL_timeseries, put_timeseries)[0])
            df.loc[subject]['thaL_pal'] = float(
                pearsonr(thaL_timeseries, pal_timeseries)[0])
            df.loc[subject]['thaL_nac'] = float(
                pearsonr(thaL_timeseries, nac_timeseries)[0])
            df.loc[subject]['thaL_hip'] = float(
                pearsonr(thaL_timeseries, hip_timeseries)[0])
            df.loc[subject]['thaL_amg'] = float(
                pearsonr(thaL_timeseries, amg_timeseries)[0])
            df.loc[subject]['thaL_acc'] = float(
                pearsonr(thaL_timeseries, mACC_timeseries)[0])
            df.loc[subject]['thaL_accX'] = float(
                pearsonr(thaL_timeseries, mACCX_timeseries)[0])
            df.loc[subject]['thaL_lins'] = float(
                pearsonr(thaL_timeseries, lINS_timeseries)[0])
            df.loc[subject]['thaL_rins'] = float(
                pearsonr(thaL_timeseries, rINS_timeseries)[0])
            df.loc[subject]['thaL_sma'] = float(
                pearsonr(thaL_timeseries, SMA_timeseries)[0])

            print '......calcualting ACC connectivity'
            df.loc[subject]['acc_lins'] = float(
                pearsonr(mACC_timeseries, lINS_timeseries)[0])
            df.loc[subject]['acc_rins'] = float(
                pearsonr(mACC_timeseries, rINS_timeseries)[0])
            df.loc[subject]['acc_sma'] = float(
                pearsonr(mACC_timeseries, rINS_timeseries)[0])
            df.loc[subject]['accX_lins'] = float(
                pearsonr(mACCX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['accX_rins'] = float(
                pearsonr(mACCX_timeseries, rINS_timeseries)[0])
            df.loc[subject]['accX_sma'] = float(
                pearsonr(mACCX_timeseries, SMA_timeseries)[0])

            print '......calcualting SMA connectivity'
            df.loc[subject]['sma_lins'] = float(
                pearsonr(mACCX_timeseries, lINS_timeseries)[0])
            df.loc[subject]['sma_rins'] = float(
                pearsonr(mACCX_timeseries, rINS_timeseries)[0])

            df.loc[subject]['fd'] = fd
            df.loc[subject]['exclude'] = exclude
            df.loc[subject]['dvars'] = dvars

    df.to_csv(
        os.path.join(workspace_dir, 'GluConnectivity',
                     'x4_RSFC_df_%s_%s.csv' % (pop_name, mrs_datadir[-1])))
    print 'done'
        def run_lcmodel_raw(voxel_name, ppmst):

            print ''
            print 'PROCESSING SPECTRA WITH LCMODEL FOR %s PPMST = %s' % (
                voxel_name, ppmst)
            #
            #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name,  'ppm_%s'%ppmst, 'met'))
            #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name,  'ppm_%s'%ppmst, 'h2o'))
            #lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_twix',voxel_name,  'ppm_%s'%ppmst)

            mkdir_path(
                os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH',
                             voxel_name, 'ppm_%s' % ppmst, 'met'))
            mkdir_path(
                os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH',
                             voxel_name, 'ppm_%s' % ppmst, 'h2o'))
            lcmodel_dir = os.path.join(workspace_dir, subject,
                                       'lcmodel_twix_NMEACH', voxel_name,
                                       'ppm_%s' % ppmst)

            shutil.copy(
                os.path.join(twix_dir, '%s' % voxel_name, '%s' % voxel_name,
                             '%s_lcm' % voxel_name),
                os.path.join(lcmodel_dir, 'met', 'RAW'))

            shutil.copy(
                os.path.join(twix_dir, '%s' % voxel_name, '%s_w' % voxel_name,
                             '%s_w_lcm' % voxel_name),
                os.path.join(lcmodel_dir, 'h2o', 'RAW'))

            met = os.path.join(lcmodel_dir, 'met', 'RAW')
            h2o = os.path.join(lcmodel_dir, 'h2o', 'RAW')

            # read some data from the RDA header
            rda_info = []
            rda_header = open(
                os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name,
                             'ppm_%s' % ppmst, 'rda_header.txt'), 'r')
            for line in rda_header:
                rda_info.append(line)

            # define twix parameters
            nunfil = 2078
            hzpppm = 123.242398
            echot = 30.0
            deltat = 0.000417
            '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''
                              Building the control file
            ''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' ''''''
            print '...building control file'
            file = open(os.path.join(lcmodel_dir, 'control'), "w")
            file.write(" $LCMODL\n")
            file.write(" title= 'TWIX - %s' \n" % rda_info[0])
            file.write(" srcraw= '%s' \n" % met)
            file.write(" srch2o= '%s' \n" % h2o)
            file.write(" savdir= '%s' \n" % lcmodel_dir)
            file.write(" ppmst= %s \n" % ppmst)
            file.write(" ppmend= 0.3\n")
            file.write(" nunfil= %s\n" % nunfil)
            file.write(" ltable= 7\n")
            file.write(" lps= 8\n")
            file.write(" lprint= 6\n")
            file.write(" lcsv= 11\n")
            file.write(" lcoraw= 10\n")
            file.write(" lcoord= 9\n")
            file.write(" hzpppm= %s\n" % hzpppm)
            file.write(" filtab= '%s/table'\n" % lcmodel_dir)
            file.write(" filraw= '%s/met/RAW'\n" % lcmodel_dir)
            file.write(" filps= '%s/ps'\n" % lcmodel_dir)
            file.write(" filpri= '%s/print'\n" % lcmodel_dir)
            file.write(" filh2o= '%s/h2o/RAW'\n" % lcmodel_dir)
            file.write(" filcsv= '%s/spreadsheet.csv'\n" % lcmodel_dir)
            file.write(" filcor= '%s/coraw'\n" % lcmodel_dir)
            file.write(" filcoo= '%s/coord'\n" % lcmodel_dir)
            file.write(
                " filbas= '/home/raid3/kanaan/.lcmodel/basis-sets/press_te30_3t_01a.basis'\n"
            )
            file.write(" echot= %s \n" % echot)
            file.write(" dows= T \n")
            file.write(" NEACH= 999 \n")  # export met fits
            #file.write(" DEGPPM =0 \n")
            file.write(" doecc= T\n")
            file.write(" deltat= %s\n" % deltat)
            file.write(" $END\n")
            file.close()

            if os.path.isfile(os.path.join(lcmodel_dir, 'spreadsheet.csv')):
                print 'Spectrum already processed .................moving on'
            else:
                print '...running standardA4pdf execution-script '
                print ''
                lcm_command = [
                    '/bin/sh',
                    '/home/raid3/kanaan/.lcmodel/execution-scripts/standardA4pdfv3',
                    '%s' % lcmodel_dir, '30',
                    '%s' % lcmodel_dir,
                    '%s' % lcmodel_dir
                ]
                print '... running execution script'
                print subprocess.list2cmdline(lcm_command)
                subprocess.call(lcm_command)

            reader = open(os.path.join(lcmodel_dir, 'table'), 'r')
            for line in reader:
                if 'FWHM' in line:
                    fwhm = float(line[9:14])
                    snrx = line[29:31]
                if 'Data shift' in line:
                    shift = line[15:21]
                if 'Ph:' in line:
                    ph0 = line[6:10]
                    ph1 = line[19:24]
                    fwhm_hz = fwhm * 123.24

                    file = open(os.path.join(lcmodel_dir, 'snr.txt'), "w")
                    file.write('%s, %s, %s, %s, %s, %s' %
                               (fwhm, fwhm_hz, snrx, shift, ph0, ph1))
                    file.close()
            print '###############################################################################'
Exemple #30
0
def dicom_convert(population, data_dir, workspace_dir):

    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT %s %s' % (data_dir[12:19],
                                                       workspace_dir[-8:])
    print ''
    print '#############################################################################'

    count = 0
    for subject in population:
        count += 1
        print '====================================================================='
        print '%s- DICOM CONVERSION for %s' % (count, subject)

        # define dicom directory for each subject
        dicom_dir = os.path.join(data_dir, subject, 'DICOM')

        # define destination directory for NIFTI outputs
        mkdir_path(os.path.join(workspace_dir, subject, 'anatomical_original'))
        out_nifti_dir = str(
            os.path.join(workspace_dir, subject, 'anatomical_original'))

        if not os.path.isfile(os.path.join(out_nifti_dir, 'ANATOMICAL.nii')):
            # create a list of all dicoms with absolute paths for each file
            dicom_list = []
            for dicom in os.listdir(dicom_dir):
                dicomstr = os.path.join(dicom_dir, dicom)
                dicom_list.append(dicomstr)

            # grab SeriesDescription and append T1 files to list
            T1_list = []
            print 'Reading dicom series descriptions'
            for dicom in dicom_list:
                try:
                    dcm_read = pydicom.read_file(dicom, force=True)
                    sequence = dcm_read.SeriesDescription
                except AttributeError:
                    continue

                if 'mp2rage_p3_602B_UNI_Images' in sequence:
                    T1_list.append(dicom)

            # convert T1 anatomical to NIFTI with SPM
            print 'Converting Dicom to Nifti for %s' % subject
            spm_dicom_convert = spmu.DicomImport()
            spm_dicom_convert.inputs.format = 'nii'
            spm_dicom_convert.inputs.in_files = T1_list
            spm_dicom_convert.inputs.output_dir = out_nifti_dir
            spm_dicom_convert.run()

            #rename output file
            for file in os.listdir(out_nifti_dir):
                if file.endswith('nii'):
                    os.rename(
                        str(os.path.join(out_nifti_dir, file)),
                        str(os.path.join(out_nifti_dir, 'ANATOMICAL.nii')))
        else:
            print 'subject already processed.......moving on'

        print '====================================================================='
        print ''
def quantiation_correction(population, workspace_dir, analysis_type):

    print "#############################################################################"
    print ""
    print "                 RUNNNING PROJECT NMR-093%s %s" % (workspace_dir[-10:-9], workspace_dir[-8:])
    print ""
    print "#############################################################################"

    # output dir
    mkdir_path(os.path.join(workspace_dir[:-8], "group_statistics"))
    results_dir = os.path.join(workspace_dir[:-8], "group_statistics")

    def save_reliable_concentrations(population, workspace_dir, voxel_name, analysis_type):

        csv_list = []
        for subject in population:
            # get metabolite data for each subject and append to a list
            csv = os.path.join(workspace_dir, subject, "lcmodel_%s" % analysis_type, voxel_name, "spreadsheet.csv")
            if os.path.isfile(csv):
                reader = pd.read_csv(csv)
                reader.insert(0, "Subject", subject)
                csv_list.append(reader)

        # creat a dataframe and place reliable metabolite data for every subject
        df = pd.concat(csv_list, ignore_index=True)
        reliable = df.loc[
            :,
            [
                "Subject",
                " Cre",
                " Cre %SD",
                " GPC+PCh",
                " GPC+PCh %SD",
                " NAA+NAAG",
                " NAA+NAAG %SD",
                " mI",
                " mI %SD",
                " Glu",
                " Glu %SD",
                " Gln",
                " Gln %SD",
                " Glu+Gln",
                " Glu+Gln %SD",
            ],
        ]

        # sort subjects alphabetically and reset index....
        reliable.sort(columns="Subject", inplace=True)
        reliable.reset_index(drop=True, inplace=True)

        # save reliable dataframe
        reliable.to_csv(
            os.path.join(
                results_dir,
                "lcmodel_%s_%s_%s_%s.csv" % (analysis_type, voxel_name, workspace_dir[-8:], workspace_dir[-10:-9]),
            )
        )

        return reliable

    print "1. Creating new dataframe with reliable LC-Model concentrations for ACC,THA,STR"
    acc_reliable = save_reliable_concentrations(population, workspace_dir, "ACC", analysis_type)
    tha_reliable = save_reliable_concentrations(population, workspace_dir, "THA", analysis_type)
    str_reliable = save_reliable_concentrations(population, workspace_dir, "STR", analysis_type)

    def save_tissue_proportions(population, workspace_dir, voxel_name):
        list_spm = []

        for subject in population:
            # grab tissue proportion data for all subjects and dump into list
            spm = pd.read_csv(
                os.path.join(workspace_dir, subject, "svs_voxel_stats", "%s_voxel_statistics_spm.txt" % voxel_name),
                header=None,
            )
            spm.insert(0, "SUBJECT", subject)
            list_spm.append(spm)

        # create concatenated dataframe for all tissue data in list
        df_spm = pd.concat(list_spm, ignore_index=True)
        df_spm.columns = [
            "SUBJECT",
            "%s_GM" % voxel_name,
            "%s_WM" % voxel_name,
            "%s_CSF" % voxel_name,
            "%s_SUM" % voxel_name,
        ]

        df_spm.to_csv(
            os.path.join(
                results_dir, "proportions_%s_%s_%s.csv" % (voxel_name, workspace_dir[-8:], workspace_dir[-10:-9])
            )
        )
        return df_spm

    print "2. Creating new dataframe with SPM tissue proportions for ACC,THA,STR"
    acc_props = save_tissue_proportions(population, workspace_dir, "ACC")
    tha_prpos = save_tissue_proportions(population, workspace_dir, "THA")
    str_props = save_tissue_proportions(population, workspace_dir, "STR")

    def calc_asbolute(lcmodel, frac_gm, frac_wm, frac_csf):

        import math

        # lcmodel correction factor
        factor = 55.55 / (35.88 * 0.7)

        # relative water content in tissue.. determined experimentally.
        alpha_gm = 0.81  # 0.78
        alpha_wm = 0.71  # 0.65
        alpha_csf = 1.0  # 1.0

        # attentuation factor for water
        R_H2O_GM = (1.0 - math.e ** (-3000.0 / 1820.0)) * math.e ** (-30.0 / 99.0)
        R_H2O_WM = (1.0 - math.e ** (-3000.0 / 1084.0)) * math.e ** (-30.0 / 69.0)
        R_H2O_CSF = (1.0 - math.e ** (-3000.0 / 4163.0)) * math.e ** (-30.0 / 503.0)

        #########  Correction Equations  #######
        # tissel equation
        Cmet1 = lcmodel * (((frac_csf * 1.0 * (1.0 - frac_csf)) + (frac_gm * 0.81 + frac_wm * 0.71)) / (1.0 - frac_csf))

        # gusseuw equation
        Cmet2 = (
            (lcmodel)
            * (
                (
                    (frac_gm * alpha_gm * R_H2O_GM + frac_wm * alpha_wm * R_H2O_WM + frac_csf * alpha_csf * R_H2O_CSF)
                    / (frac_gm * 1.0 + frac_wm * 1.0)
                )
            )
            * factor
        )
        # gusseew csf equation
        Cmet3 = (lcmodel) * (1 / (1 - frac_csf))

        return Cmet2

    def create_absolute_df(reliable, proportions, voxel_name):
        cre = calc_asbolute(
            reliable[" Cre"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        cho = calc_asbolute(
            reliable[" GPC+PCh"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        naa = calc_asbolute(
            reliable[" NAA+NAAG"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        ino = calc_asbolute(
            reliable[" mI"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        glu = calc_asbolute(
            reliable[" Glu"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        gln = calc_asbolute(
            reliable[" Gln"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )
        glx = calc_asbolute(
            reliable[" Glu+Gln"],
            proportions["%s_GM" % voxel_name],
            proportions["%s_WM" % voxel_name],
            proportions["%s_CSF" % voxel_name],
        )

        absolute = pd.DataFrame(
            {
                "Subjects": reliable["Subject"],
                "Cre": cre,
                "GPC+PCh": cho,
                "NAA+NAAG": naa,
                "mI": ino,
                "Glu": glu,
                "Gln": gln,
                "Glu+Gln": glx,
            }
        )

        column_order = ["Subjects", "Cre", "GPC+PCh", "NAA+NAAG", "mI", "Glu", "Gln", "Glu+Gln"]
        absolute = absolute.reindex(columns=column_order)

        absolute.to_csv(
            os.path.join(
                results_dir,
                "absolute_%s_%s_%s_%s.csv" % (analysis_type, voxel_name, workspace_dir[-8:], workspace_dir[-10:-9]),
            )
        )

    print "3. Creating new dataframe with Absolute Concentrations for ACC,THA,STR"
    create_absolute_df(acc_reliable, acc_props, "ACC")
    create_absolute_df(tha_reliable, tha_prpos, "THA")
    create_absolute_df(str_reliable, str_props, "STR")
Exemple #32
0
def calculate_voxel_statistics(population, workspace_dir):

    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT NMR-093%s %s' % (
        workspace_dir[-10:-9], workspace_dir[-8:])
    print ''
    print '#############################################################################'

    count = 0
    for subject in population:
        count += 1
        print '========================================================================================'
        print '%s- Calculating Voxel statistics for subject %s_%s' % (
            count, subject, workspace_dir[-10:-9])
        print '.'

        # input files
        spm_dir = os.path.join(workspace_dir, subject, 'segmentation_spm')

        acc_mask = os.path.join(
            workspace_dir, subject, 'svs_voxel_mask',
            '%s%s_ACC_RDA_MASK.nii' % (subject, workspace_dir[-10:-9]))
        tha_mask = os.path.join(
            workspace_dir, subject, 'svs_voxel_mask',
            '%s%s_THA_RDA_MASK.nii' % (subject, workspace_dir[-10:-9]))
        str_mask = os.path.join(
            workspace_dir, subject, 'svs_voxel_mask',
            '%s%s_STR_RDA_MASK.nii' % (subject, workspace_dir[-10:-9]))

        spm_gm = os.path.join(spm_dir, 'TISSUE_CLASS_1_GM_OPTIMIZED.nii.gz')
        spm_wm = os.path.join(spm_dir, 'TISSUE_CLASS_2_WM_OPTIMIZED.nii.gz')
        spm_cm = os.path.join(spm_dir, 'TISSUE_CLASS_3_CSF_OPTIMIZED.nii.gz')

        # output folders
        mkdir_path(os.path.join(workspace_dir, subject, 'svs_voxel_stats'))
        stats_dir = os.path.join(workspace_dir, subject, 'svs_voxel_stats')

        print 'Calculating tissue proportions'

        def calc_props(mask_file, seg_gm, seg_wm, seg_cm, voxel_name):

            if not os.path.isfile(mask_file):
                print 'IOError: [Errno 2] SVS %s mask Does not exist, create masks and come back' % voxel_name

            else:
                if os.path.isfile(
                        os.path.join(
                            stats_dir,
                            '%s_voxel_statistics_spm_opt.txt' % voxel_name)):
                    print 'Voxel statistics already calculated ... moving on'

                else:
                    #grab data
                    spm_gm_data = nb.load(seg_gm).get_data().squeeze()
                    spm_wm_data = nb.load(seg_wm).get_data().squeeze()
                    spm_cm_data = nb.load(seg_cm).get_data().squeeze()
                    vox_data = nb.load(mask_file).get_data().squeeze()

                    #multiply SVS ROI with segmented data for ACC
                    vox_spm_gm = vox_data * spm_gm_data
                    vox_spm_wm = vox_data * spm_wm_data
                    vox_spm_cm = vox_data * spm_cm_data

                    #extract stats from segmentation for acc
                    vox_total_svs = np.sum(vox_data)
                    vox_total_spm_gm = np.sum(vox_spm_gm)
                    vox_total_spm_wm = np.sum(vox_spm_wm)
                    vox_total_spm_cm = np.sum(vox_spm_cm)

                    percent_svs = float(vox_total_svs) / float(vox_total_svs)
                    percent_spm_gm = np.round(
                        float(vox_total_spm_gm) / float(vox_total_svs), 3)
                    percent_spm_wm = np.round(
                        float(vox_total_spm_wm) / float(vox_total_svs), 3)
                    percent_spm_cm = np.round(
                        float(vox_total_spm_cm) / float(vox_total_svs), 3)
                    sum_spm = np.round(
                        float(percent_spm_gm + percent_spm_wm +
                              percent_spm_cm), 3)

                    print '%s.....' % voxel_name
                    print '...%s SPM NewSegment Tissue Proportions = %s%% GM, %s%% WM, %s%% CSF = %s' % (
                        voxel_name, percent_spm_gm, percent_spm_wm,
                        percent_spm_cm, sum_spm)

                    spm_txt = os.path.join(
                        stats_dir,
                        '%s_voxel_statistics_spm_opt.txt' % voxel_name)
                    write_spm = open(spm_txt, 'w')
                    write_spm.write('%s, %s, %s, %s' %
                                    (percent_spm_gm, percent_spm_wm,
                                     percent_spm_cm, sum_spm))
                    write_spm.close()

        calc_props(acc_mask, spm_gm, spm_wm, spm_cm, 'ACC')
        calc_props(tha_mask, spm_gm, spm_wm, spm_cm, 'THA')
        calc_props(str_mask, spm_gm, spm_wm, spm_cm, 'STR')

        print '========================================================================================'
def scrub_data(population, workspace_dir):

    scub_subjects = []

    for subject in population:
        print '###############################################################################'
        print 'Scrubbing Denoised Data for subject %s' %subject
        print ''

        #input
        subject_dir       = os.path.join(workspace_dir, subject)

        native_residual_compor   = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_NATIVE_detrend_compcor_friston_bp_fwhm.nii.gz')
        native_residual_wmcsf    = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_NATIVE_detrend_wmcsf_friston_bp_fwhm.nii.gz')
        native_residual_global   = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_NATIVE_detrend_global_wmcsf_friston_bp_fwhm.nii.gz')

        mni_residual_compor   = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_detrend_compcor_friston_bp_fwhm.nii.gz')
        mni_residual_wmcsf    = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_detrend_wmcsf_friston_bp_fwhm.nii.gz')
        mni_residual_global   = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_detrend_global_wmcsf_friston_bp_fwhm.nii.gz')

        mni_aroma_compcor     = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_FWHM_AROMA_detrend_compcor_friston_bp.nii.gz')
        mni_aroma_wmcsf       = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_FWHM_AROMA_detrend_wmcsf_friston_bp.nii.gz')
        mni_aroma_global      = os.path.join(subject_dir, 'FUNC_DENOISE/RESIDUAL_MNI2mm_FWHM_AROMA_detrend_global_wmcsf_friston_bp.nii.gz')

        #output
        scrub_dir =  os.path.join(subject_dir,'FUNC_DENOISE_SCRUB')
        mkdir_path(scrub_dir)
        os.chdir(scrub_dir)

        # scrubbing function
        def scrub(denoised_img, denoised_name):
            print '..Scrubbing Denoised image'
            print '----->', denoised_name[9:]
            scrubbed   = os.path.join(scrub_dir , '%s_scrubbed.nii.gz'%denoised_name)
            if not os.path.isfile(scrubbed):
                os.system("3dcalc -a %s%s -expr 'a' -prefix %s" %(denoised_img, in_frames_string, scrubbed))

        # get good frames amd limit to 150
        in_frames = []
        fd1d        =   np.genfromtxt(os.path.join(subject_dir,'QUALITY_CONTROL/FD.1D'))
        for frame, fd in enumerate(fd1d):
            if fd < 0.2:
                in_frames.append(frame)
        if len(in_frames) > 150:
            print '..Subject has more than 150 Good Frames (3.6 mins)'
            in_frames_string =  str(in_frames[0:150]).replace(" ","")

        else:
            print 'Subject has less than 150 Good frames'

        # run scrubbing
        scrub(native_residual_compor, 'RESIDUAL_NATIVE_detrend_compcor_friston_bp_fwhm')
        scrub(native_residual_wmcsf,  'RESIDUAL_NATIVE_detrend_wmcsf_friston_bp_fwhm')
        scrub(native_residual_global, 'RESIDUAL_NATIVE_detrend_global_wmcsf_friston_bp_fwhm')

        scrub(mni_residual_compor, 'RESIDUAL_MNI2mm_detrend_compcor_friston_bp_fwhm')
        scrub(mni_residual_wmcsf,  'RESIDUAL_MNI2mm_detrend_wmcsf_friston_bp_fwhm')
        scrub(mni_residual_global, 'RESIDUAL_MNI2mm_detrend_global_wmcsf_friston_bp_fwhm')

        scrub(mni_aroma_compcor, 'RESIDUAL_MNI2mm_FWHM_AROMA_detrend_compcor_friston_bp')
        scrub(mni_aroma_wmcsf, 'RESIDUAL_MNI2mm_FWHM_AROMA_detrend_wmcsf_friston_bp')
        scrub(mni_aroma_global, 'RESIDUAL_MNI2mm_FWHM_AROMA_detrend_global_wmcsf_friston_bp')
Exemple #34
0
        def create_voxel_plots(voxel_name, ppmst):
            print '...Working on %s %s ' % (analysis_type, ppmst)
            #create output QC directory
            mkdir_path(
                os.path.join(subject_dir, 'quality_control', analysis_type,
                             voxel_name, 'ppm_%s' % ppmst, 'tmp'))
            qc_dir = os.path.join(subject_dir, 'quality_control',
                                  analysis_type, voxel_name, 'ppm_%s' % ppmst)
            tmp_dir = os.path.join(qc_dir, 'tmp')

            #grab lcmodel plots
            svs_lcmodel = os.path.join(subject_dir,
                                       'lcmodel_%s' % analysis_type,
                                       voxel_name, 'ppm_%s' % ppmst, 'ps.pdf')

            # create localization pngs
            make_png = [
                'convert', '-density', '300', '-trim',
                '%s' % svs_lcmodel, '-quality', '300', '-sharpen', '0x1.0',
                '%s/%s_lcmodel.png' % (tmp_dir, voxel_name)
            ]
            subprocess.call(make_png)
            lcm_plot = os.path.join(tmp_dir, '%s_lcmodel-0.png' % voxel_name)

            #grab snr/fwhm data
            svs_snr = np.genfromtxt(os.path.join(subject_dir,
                                                 'lcmodel_%s' % analysis_type,
                                                 voxel_name, 'ppm_%s' % ppmst,
                                                 'snr.txt'),
                                    delimiter=',')

            #grab voxel mask
            svs = os.path.join(
                subject_dir, 'svs_voxel_mask', '%s%s_%s_RDA_MASK.nii' %
                (subject, workspace_dir[-10:-9], voxel_name))

            #get data into matrix
            anat_load = nb.load(anatomical)
            svs_load = nb.load(svs)
            anat_data = anat_load.get_data()
            svs_data = svs_load.get_data()

            # get svs cut coords
            coords = find_cut_coords(svs_load)

            # convert zeros to nans for visualization purposes
            svs_data[svs_data == 0] = np.nan

            # plot voxel on anat
            fig = plt.figure()
            fig.set_size_inches(6.5, 6.5)
            fig.subplots_adjust(wspace=0.005)
            #1
            ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=1, rowspan=1)
            ax1.imshow(anat_data[coords[0], :, :], matplotlib.cm.bone_r)
            ax1.imshow(svs_data[coords[0], :, :],
                       matplotlib.cm.rainbow_r,
                       alpha=0.7)
            ax1.set_xlim(23, 157)
            ax1.set_ylim(101, 230)
            ax1.axes.get_yaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            #2
            ax2 = plt.subplot2grid((1, 3), (0, 1), colspan=1, rowspan=1)
            ax2.imshow(np.rot90(anat_data[:, :, coords[2]]),
                       matplotlib.cm.bone_r)
            ax2.imshow(np.rot90(svs_data[:, :, coords[2]]),
                       matplotlib.cm.rainbow_r,
                       alpha=0.7)
            ax2.set_xlim(230, 20)
            ax2.set_ylim(207, 4)
            ax2.axes.get_yaxis().set_visible(False)
            ax2.axes.get_xaxis().set_visible(False)
            #3
            ax3 = plt.subplot2grid((1, 3), (0, 2), colspan=1, rowspan=1)
            ax3.imshow(anat_data[:, coords[1], :],
                       matplotlib.cm.bone_r,
                       origin='lower')
            ax3.imshow(svs_data[:, coords[1], :],
                       matplotlib.cm.rainbow_r,
                       alpha=0.7,
                       origin='lower')
            ax3.set_xlim(38, 140)
            ax3.set_ylim(160, 60)
            ax3.axes.get_yaxis().set_visible(False)
            ax3.axes.get_xaxis().set_visible(False)
            fig.tight_layout()
            fig.savefig('%s/localization_%s.png' % (qc_dir, voxel_name),
                        dpi=200,
                        bbox_inches='tight')

            # create qc report
            report = canvas.Canvas(os.path.join(
                qc_dir, 'QC_REPORT_%s.pdf' % voxel_name),
                                   pagesize=(1280, 1556))
            report.setFont("Helvetica", 40)
            report.drawImage(
                os.path.join(qc_dir, 'localization_%s.png' % voxel_name), 1,
                inch * 13.5)
            report.drawImage(lcm_plot, 30, inch * 1, width=1200, height=800)
            report.drawString(
                230, inch * 20, ' %s%s, %s_%s , SNR=%s FWHM=%s ' %
                (subject, workspace_dir[-10:-9], voxel_name, analysis_type,
                 svs_snr[2], svs_snr[1]))
            report.showPage()

            if analysis_type is 'twix':
                fig_f6 = os.path.join(subject_dir, 'svs_twix', voxel_name,
                                      'f6_frequency_drift_correction.png')
                fig_f7 = os.path.join(subject_dir, 'svs_twix', voxel_name,
                                      'f7_estimated_phase_phase_drift.png')
                fig_f8 = os.path.join(subject_dir, 'svs_twix', voxel_name,
                                      'f8_estimated_frequency_drift.png')
                reader = open(
                    os.path.join(subject_dir, 'svs_twix', voxel_name,
                                 voxel_name, 'readme.txt'), 'r')
                for line in reader:
                    if 'bad' in line:
                        badavn = line[34:38]
                report.drawImage(fig_f6, 1, inch * 7.2)
                report.drawImage(fig_f7, 90, inch * 1, width=540, height=450)
                report.drawImage(fig_f8, 590, inch * 1, width=540, height=450)
                report.setFont("Helvetica", 40)
                report.drawString(350, inch * 20,
                                  'Number of Bad Averages =%s' % (badavn))
                report.save()
            else:
                report.save()
        def run_lcmodel_on_voxel(voxel_name, ppmst):

            #define input and output directories
            svs_dir   = os.path.join(workspace_dir, subject, 'svs_rda')
            rda_met  = os.path.join(svs_dir, voxel_name, 'met', '%s%s_%s_SUPPRESSED.rda' %(subject, workspace_dir[-10:-9], voxel_name))
            rda_h2o  = os.path.join(svs_dir, voxel_name, 'h2o', '%s%s_%s_WATER.rda' %(subject, workspace_dir[-10:-9], voxel_name))
            mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name, 'ppm_%s'%ppmst, 'met'))
            mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name, 'ppm_%s'%ppmst, 'h2o'))
            lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name, 'ppm_%s'%ppmst)


            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
                                RUN LCMODEL BIN2RAW
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

            if os.path.isfile(os.path.join(lcmodel_dir, 'met', 'RAW')) and  os.path.isfile(os.path.join(lcmodel_dir, 'h2o', 'RAW')):
                pass
                #print 'Bin2raw already run.........................moving on '
            else:
                print ' Generating RAW frequency files with BIN2RAW'
                met_bin2raw = ['/home/raid3/kanaan/.lcmodel/siemens/bin2raw', '%s'%rda_met, '%s/'%lcmodel_dir, 'met']
                h2o_bin2raw = ['/home/raid3/kanaan/.lcmodel/siemens/bin2raw', '%s'%rda_h2o, '%s/'%lcmodel_dir, 'h2o']

                subprocess.call(met_bin2raw)
                subprocess.call(h2o_bin2raw)

            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
                              Read Scan parameters from RDA file
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
            reader = open(rda_met, 'r')
            for line in reader:
                if 'SeriesDescription' in line:
                    Series = line[19:30]
                elif 'TR:' in line:
                    TR = line[4:8]
                elif 'TE:' in line:
                    TE = line[4:6]
                elif 'VectorSize' in line:
                    nfil = line[12:16]
                elif 'NumberOfAverages' in line:
                    NS = line[18:21]
                elif 'AcquisitionNumber' in line:#
                    ACQ = line[19:21]
                elif 'PatientSex' in line:
                    Sex = line[12:13]
                elif 'SeriesNumber' in line:
                    Seriesnum = line[14:16]
                elif 'PatientAge' in line:#
                    Age = line[12:15]
                elif 'PatientWeight' in line:
                    Weight = line[15:17]
                elif 'PixelSpacingRow' in line:
                    PSR = float(line[17:20])
                elif 'PixelSpacingCol' in line:
                    PSC = float(line[17:20])
                elif 'PixelSpacing3D:' in line:
                    PS3d = float(line[16:19])
                elif 'StudyDate' in line:
                    datex = line[0:19]

            volume = np.round(((PSR * PSC * PS3d) / 1000),2)

            header = open(os.path.join(lcmodel_dir, 'rda_header.txt'), 'w')
            header.write('%s(%s %s %skg); %s; %s %sx%sx%s=%s; TR/TE/NS=%s/%s/%s'
                          %(subject, Sex, Age, Weight, datex, voxel_name, PSR,PSC,PS3d,volume, TR,TE, NS))
            header.close()
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
                              Building the control file
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
            if os.path.isfile(os.path.join(lcmodel_dir, 'control')):
                pass
                #print 'Control file already created................moving on'
            else:
                print 'Processing Spectra with LCMODEL'
                print '...building control file'

                file = open(os.path.join(lcmodel_dir, 'control'), "w")
                file.write(" $LCMODL\n")
                file.write(" title= 'RDA - %s(%s %s %skg), %s, %s %sx%sx%s, TR/TE/NS=%s/%s/%s' \n" %(subject, Sex, Age, Weight, datex, voxel_name, PSR,PSC,PS3d, TR,TE, NS ))
                file.write(" srcraw= '%s' \n" %rda_met)
                file.write(" srch2o= '%s' \n" %rda_h2o)
                file.write(" savdir= '%s' \n" %lcmodel_dir)
                file.write(" ppmst= %s \n"%ppmst)
                file.write(" ppmend= 0.3\n")
                file.write(" nunfil= %s\n"%nfil)
                file.write(" ltable= 7\n")
                file.write(" lps= 8\n")
                file.write(" lcsv= 11\n")
                file.write(" lcoraw= 10\n")
                file.write(" lcoord= 9\n")
                file.write(" hzpppm= 1.2328e+02\n")
                file.write(" filtab= '%s/table'\n" %lcmodel_dir)
                file.write(" filraw= '%s/met/RAW'\n" %lcmodel_dir)
                file.write(" filps= '%s/ps'\n" %lcmodel_dir)
                file.write(" filh2o= '%s/h2o/RAW'\n" %lcmodel_dir)
                file.write(" filcsv= '%s/spreadsheet.csv'\n" %lcmodel_dir)
                file.write(" filcor= '%s/coraw'\n" %lcmodel_dir)
                file.write(" filcoo= '%s/coord'\n" %lcmodel_dir)
                file.write(" filbas= '/home/raid3/kanaan/.lcmodel/basis-sets/press_te30_3t_01a.basis'\n")
                file.write(" echot= %s.00 \n" %TE)
                file.write(" dows= T \n")
                file.write(" doecc= T\n")
                file.write(" deltat= 8.330e-04\n")
                file.write(" $END\n")
                file.close()

            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
                          Execute quantitation.... run standardA4pdf
            '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

            if os.path.isfile(os.path.join(lcmodel_dir,  'spreadsheet.csv')):
                    print 'Spectrum already processed .................moving on'
            else:
                print '...running standardA4pdf execution-script '
                print ''
                lcmodel_command = ['/bin/sh','/home/raid3/kanaan/.lcmodel/execution-scripts/standardA4pdfv3',
                                   '%s' %lcmodel_dir,'19','%s' %lcmodel_dir, '%s' %lcmodel_dir]

                print subprocess.list2cmdline(lcmodel_command)
                print ''
                subprocess.call(lcmodel_command)

            reader = open(os.path.join(lcmodel_dir, 'table'), 'r')
            for line in reader:
                if 'FWHM' in line:
                    fwhm = float(line[9:14])
                    snrx  = line[29:31]
                    fwhm_hz = fwhm * 123.24
                if 'Data shift' in line:
                    shift = line[15:21]
                if 'Ph:' in line:
                    ph0 = line[6:10]
                    ph1 = line[19:24]

                    filex = open(os.path.join(lcmodel_dir, 'snr.txt'), "w")
                    filex.write('%s, %s, %s, %s, %s, %s' %(fwhm,fwhm_hz, snrx, shift, ph0, ph1))
                    filex.close()
Exemple #36
0
def calc_ecm(population, workspace_dir):

    # all_gm = []
    #
    # if not os.path.isfile(os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK.nii.gz')):
    #     print 'Creating Group GM mask'
    #     for subject in population:
    #
    #         # input and output folders
    #         subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
    #         out_mask        = os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK.nii.gz')
    #         MNI2mm_gm   = os.path.join(subject_dir , 'anatomical_MNI2mm_tissue_gm/TISSUE_CLASS_1_GM_OPTIMIZED_resample_warp_thresh.nii.gz')
    #
    #         if os.path.isfile(MNI2mm_gm):
    #             all_gm.append(MNI2mm_gm)
    #
    #     maths_input = []
    #     for i in all_gm:
    #         x = '-add %s'%i
    #         maths_input.append(x)
    #
    #     maths_string = ' '.join(maths_input)[5:]
    #     os.system('fslmaths %s -mul /usr/share/fsl/data/standard/MNI152_T1_2mm_brain_mask.nii.gz %s'%(maths_string, out_mask))
    #
    #     out_mask_4mm = os.path.join(workspace_dir, 'GluConnectivity', 'COMBINED_GM_MASK_4mm.nii.gz')
    #     mni_4mm = '/SCR/ROI/brain_4mm.nii.gz'
    #     os.system('flirt -in %s -ref %s -out %s -applyisoxfm 4' %(out_mask, mni_4mm, out_mask_4mm ))

    for subject in population:
        print 'Running Subject %s' % subject

        # input and output folders
        subject_dir = os.path.join(workspace_dir, 'GluConnectivity', subject)
        outdir = os.path.join(subject_dir, 'ECM_LIPSIA')
        mkdir_path(outdir)
        os.chdir(outdir)

        # TRANSFORM NATIVE IMAGE TO MNI2mm
        mkdir_path(
            os.path.join(
                subject_dir,
                'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp'))
        pproc = os.path.join(
            subject_dir,
            'functional_native_brain_preproc_FWHM_AROMA_residual_bp/bandpassed_demeaned_filtered.nii.gz'
        )
        pproc_2mm = os.path.join(
            subject_dir,
            'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/pproc.nii.gz'
        )
        anat2mni = os.path.join(
            subject_dir,
            'anatomical_MNI2mm_xfm/MP2RAGE_DESKULL_RPI_resample_ero_fieldwarp.nii.gz'
        )
        func2anat = os.path.join(
            subject_dir,
            'functional_ANAT2mm_xfm/REST_calc_resample_corrected_volreg_maths_tstat_flirt.mat'
        )
        if not os.path.isfile(pproc_2mm):
            if os.path.isfile(pproc):
                print '... Warping to MNI'
                os.system(' '.join([
                    'applywarp', '--in=' + pproc, '--ref=' +
                    '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz',
                    '--out=' + pproc_2mm, '--warp=' + anat2mni,
                    '--premat=' + func2anat
                ]))

        # Convert VISTA to NIFTI
        pproc_2mmv = os.path.join(
            subject_dir,
            'functional_MNI2mm_brain_preproc_FWHM_AROMA_residual_bp/pproc.v')
        if not os.path.isfile(pproc_2mmv):
            print '...Converting NIFTI to VISTA.. make sure you are running this on telemann'
            os.system('isisconv -in %s -out %s' % (pproc_2mm, pproc_2mmv))
def construct_features_dataframe(derivatives_dir, control_outliers, patients_outliers, rsfc_seeds):

    print '========================================================================================'
    print ''
    print '                    Tourettome - 009. Create Feature Dataframes                         '
    print ''
    print '========================================================================================'
    print ''

    #I/O
    features_dir = mkdir_path(os.path.join(derivatives_dir, 'feature_matrices'))

    ########################################################################################################
    print '###########################################################'
    print ' Inspecting sample size'

    df_pheno = pd.read_csv(os.path.join(tourettome_phenotypic, 'tourettome_phenotypic.csv'),
                           index_col=0).drop(control_outliers+patients_outliers,axis =0)
    population = df_pheno.index

    # Extract groups
    patients = sorted([i for i in population if df_pheno.loc[i]['Group'] == 'patients'])
    controls = sorted([i for i in population if df_pheno.loc[i]['Group'] == 'controls'])
    tourettome_subjects = sorted(controls + patients)

    # create group phenotypic dataframes
    df_pheno_controls = df_pheno.drop([i for i in df_pheno.index if i not in controls], axis=0)
    df_pheno_patients = df_pheno.drop([i for i in df_pheno.index if i not in patients], axis=0)
    df_pheno_controls = df_pheno_controls.drop([i for i in df_pheno_controls.columns if i not in terms], axis=1)
    df_pheno_patients = df_pheno_patients.drop([i for i in df_pheno_patients.columns if i not in terms], axis=1)

    df_pheno_controls.to_csv(os.path.join(tourettome_phenotypic, 'tourettome_phenotypic_controls.csv'))
    df_pheno_patients.to_csv(os.path.join(tourettome_phenotypic, 'tourettome_phenotypic_patients.csv'))

    # Included subjects
    print 'n_controls=', len(controls)
    print 'n_patients=', len(patients)
    print 'n_total =', len(tourettome_subjects)
    print ''

    # Outliers
    print 'n_control_outliers=', len(control_outliers)
    print 'n_patients_outliers=', len(patient_outliers)
    # print 'n_total_outliers =', len([i for i in control_outliers+patient_outliers if i not in hamburg])
    print 'n_total_outliers =', len(control_outliers+patient_outliers)
    print ''

    #######################################################################################################
    print '###########################################################'
    print '... Extracting SCA, CT & ECM data for QCd tourettome population'

    ###################
    # SCA
    if not os.path.isfile(os.path.join(features_dir, 'sca_tourettome_raw.csv')):
        print '...... Checking SCA data'
        sca_tourettome_raw = []
        for seed_name in rsfc_seeds:
            print '......... %s'%seed_name
            sca_tourettome_raw.append(return_sca_data(seed_name, tourettome_subjects, derivatives_dir))

        # Save raw dataframes
        sca_tourettome_raw = pd.concat(sca_tourettome_raw)
        sca_tourettome_raw.to_csv(os.path.join(features_dir, 'sca_tourettome_raw.csv'))
        plt_features_heatmap(sca_tourettome_raw, os.path.join(features_dir, 'sca_tourettome_raw.png'), vmin=-1,vmax=1)

    else:
        sca_tourettome_raw = pd.read_csv(os.path.join(features_dir, 'sca_tourettome_raw.csv'), index_col=0)

    ###################
    # CT
    if not os.path.isfile(os.path.join(features_dir, 'ct_tourettome_raw.csv')):
        print '...... Checking CT data'

        # only take subjects that have ct and are not outliers
        tourettome_subjects_ct = np.unique([i[0:5] for i in os.listdir(os.path.join(tourettome_derivatives, 'struct_cortical_thickness'))])
        tourettome_subjects_ct = [i for i in tourettome_subjects_ct if i not in control_outliers + patient_outliers]

        #check data and save
        ct_tourettome_raw= return_ct_data(tourettome_subjects_ct, tourettome_derivatives)
        ct_tourettome_raw.to_csv(os.path.join(features_dir, 'ct_tourettome_raw.csv'))
        plt_features_heatmap(ct_tourettome_raw, os.path.join(features_dir, 'ct_tourettome_raw.png'), vmin=-0, vmax=4)

    else:
        ct_tourettome_raw = pd.read_csv(os.path.join(features_dir, 'ct_tourettome_raw.csv'),index_col=0)

    ###################
    # ECM
    if not os.path.isfile(os.path.join(features_dir, 'ecm_tourettome_raw.csv')):
        print '...... Checking ECM data'
        # check data and save
        ecm_tourettome_raw = return_ecm_data(tourettome_subjects, tourettome_derivatives)
        ecm_tourettome_raw.to_csv(os.path.join(features_dir, 'ecm_tourettome_raw.csv'))
        plt_features_heatmap(ecm_tourettome_raw, os.path.join(features_dir, 'ecm_tourettome_raw.png'), vmin=-1, vmax=1)
    else:
        ecm_tourettome_raw = pd.read_csv(os.path.join(features_dir, 'ecm_tourettome_raw.csv'), index_col=0)

    ############################################################################################################
    print '###########################################################'
    print '... Building Design Matrix'

    if not os.path.isfile(os.path.join(features_dir, 'design_matrix_tourettome.csv')):
        # Create design matrix
        design_matrix = pd.DataFrame(index=sca_tourettome_raw.columns)
        design_matrix['Age'] = df_pheno['Age']

        def make_dmat_category(old_col, new_col):
            for subject in design_matrix.index:
                if df_pheno.loc[subject][old_col] == new_col:
                    design_matrix.loc[subject, new_col] = 1
                else:
                    design_matrix.loc[subject, new_col] = 0

        make_dmat_category('Sex', 'male')
        make_dmat_category('Sex', 'female')
        make_dmat_category('Site', 'HANNOVER_A')
        make_dmat_category('Site', 'HANNOVER_B')
        #make_dmat_category('Site', 'HAMBURG')
        make_dmat_category('Site', 'Leipzig')
        make_dmat_category('Site', 'PARIS')
        design_matrix['FD'] = df_pheno['qc_func_fd']
        #design_matrix['CJV'] = df_pheno['qc_anat_cjv']
        # design_matrix['DVARS'] = df_pheno['qc_func_dvars']
        # design_matrix['TSNR'] = df_pheno['qc_func_tsnr']

        # Save design matrix data
        design_matrix.to_csv(os.path.join(features_dir, 'design_matrix_tourettome.csv'))

        # Plot design matrix
        f = plt.figure(figsize=(12, 8))
        for i in ['Age', 'FD']:
            design_matrix[i] = preprocessing.scale(design_matrix[i])
        sns.heatmap(design_matrix, yticklabels=False, cmap=cmap_gradient, vmin=-2.5, vmax=2.5)
        plt.xticks(size=20, rotation=90, weight='bold')
        f.savefig(os.path.join(features_dir, 'design_matrix_tourettome.png'), dpi = 300, bbox_inches='tight')

    design_matrix = pd.read_csv(os.path.join(features_dir, 'design_matrix_tourettome.csv'), index_col = 0)

    ########################################################################################################
    print '###########################################################'
    print '... Denoising SCA features'

    if not os.path.isfile(os.path.join(features_dir, 'sca_patients_resid_z.csv')):

        #####################
        # Regress
        if not os.path.isfile(os.path.join(features_dir, 'sca_tourettome_resid.csv')):
            print '...... Regressing nuisace variables for SCA dataframes'
            sca_tourettome_resid = regress_nuisance_covariates(sca_tourettome_raw, design_matrix, formula)

            # save residual data
            sca_tourettome_resid = pd.concat(sca_tourettome_resid, axis=1).T  # transpose here to get back to RAW shape
            sca_tourettome_resid.to_csv(os.path.join(features_dir, 'sca_tourettome_resid.csv'))

            # plot sca residuals
            plt_features_heatmap(sca_tourettome_resid, os.path.join(features_dir, 'sca_tourettome_resid.png'),
                                 vmin=-2,vmax=2)

        #####################
        # Break down sca_tourettome_resid to patient and control dataframes
        if not os.path.isfile(os.path.join(features_dir, 'sca_patients_resid.csv')):
            sca_tourettome_resid = pd.read_csv(os.path.join(features_dir, 'sca_tourettome_resid.csv'), index_col =0)
            sca_patients_resid = sca_tourettome_resid.drop(controls, axis=1)
            sca_controls_resid = sca_tourettome_resid.drop(patients, axis=1)

            # save separately
            sca_patients_resid.to_csv(os.path.join(features_dir, 'sca_patients_resid.csv'))
            sca_controls_resid.to_csv(os.path.join(features_dir, 'sca_controls_resid.csv'))

            # plot separate sca residuals
            plt_features_heatmap(sca_controls_resid, os.path.join(features_dir, 'sca_controls_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))
            plt_features_heatmap(sca_patients_resid, os.path.join(features_dir, 'sca_patients_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))

        else:
            sca_controls_resid = pd.read_csv(os.path.join(features_dir, 'sca_controls_resid.csv'), index_col=0)
            sca_patients_resid = pd.read_csv(os.path.join(features_dir, 'sca_patients_resid.csv'), index_col=0)

        #####################
        # Z-Score
        if not os.path.isfile(os.path.join(features_dir, 'sca_patients_resid_z.csv')):
            print ' ... Z-scoring SCA dataframes'
            sca_controls_resid_z, sca_patients_resid_z = z_score_features(sca_controls_resid, sca_patients_resid)

            # save data
            sca_controls_resid_z.to_csv(os.path.join(features_dir, 'sca_controls_resid_z.csv'))
            sca_patients_resid_z.to_csv(os.path.join(features_dir, 'sca_patients_resid_z.csv'))
            plt_features_heatmap(sca_controls_resid_z, os.path.join(features_dir, 'sca_controls_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))
            plt_features_heatmap(sca_patients_resid_z, os.path.join(features_dir, 'sca_patients_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))

    else:
        print 'SCA features already denoised'

    ########################################################################################################
    print '###########################################################'
    print '... Denoising CT features'

    if not os.path.isfile(os.path.join(features_dir, 'ct_patients_resid_z.csv')):

        #####################
        # Drop CT subjects from design_matrix
        print '...... Dropping subjects with no freesurfer segmentation'
        print '...... Regressing nuisace variables for CT dataframes'

        # first drop subjects from design matrix that dont have CT----- due to freesurfer failure. get those subs back
        design_matrix = pd.read_csv(os.path.join(features_dir, 'design_matrix_tourettome.csv'), index_col=0)
        design_matrix = design_matrix.drop([i for i in design_matrix.index if i not in ct_tourettome_raw.columns],
                                           axis=0)

        #####################
        # Regress
        if not os.path.isfile(os.path.join(features_dir, 'ct_tourettome_resid.csv')):
            print '...... Regressing nuisace variables for CT dataframes'
            ct_tourettome_resid = regress_nuisance_covariates(ct_tourettome_raw, design_matrix, formula)

            # save residual data
            ct_tourettome_resid = pd.concat(ct_tourettome_resid, axis=1).T  # transpose here to get back to RAW shape
            ct_tourettome_resid.to_csv(os.path.join(features_dir, 'ct_tourettome_resid.csv'))

            # plot ct residuals
            plt_features_heatmap(ct_tourettome_resid, os.path.join(features_dir, 'ct_tourettome_resid.png'),
                                 vmin=-2, vmax=2)

        #####################
        # Break Down mats into patients and controls
        if not os.path.isfile(os.path.join(features_dir, 'ct_patients_resid.csv')):
            # Break down ct_tourettome_resid to patient and control dataframes
            ct_tourettome_resid = pd.read_csv(os.path.join(features_dir, 'ct_tourettome_resid.csv'), index_col=0)
            ct_patients_resid = ct_tourettome_resid.drop([i for i in controls if i in ct_tourettome_resid.columns], axis=1)
            ct_controls_resid = ct_tourettome_resid.drop([i for i in patients if i in ct_tourettome_resid.columns], axis=1)

            # save separately
            ct_patients_resid.to_csv(os.path.join(features_dir, 'ct_patients_resid.csv'))
            ct_controls_resid.to_csv(os.path.join(features_dir, 'ct_controls_resid.csv'))

            # plot separate sca residuals
            plt_features_heatmap(ct_controls_resid, os.path.join(features_dir, 'ct_controls_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))
            plt_features_heatmap(ct_patients_resid, os.path.join(features_dir, 'ct_patients_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))

        else:
            ct_controls_resid = pd.read_csv(os.path.join(features_dir, 'ct_controls_resid.csv'), index_col=0)
            ct_patients_resid = pd.read_csv(os.path.join(features_dir, 'ct_patients_resid.csv'), index_col=0)

        #####################
        # Z-Score
        if not os.path.isfile(os.path.join(features_dir, 'ct_patients_resid_z.csv')):
            print ' ... Z-scoring SCA dataframes'
            ct_controls_resid_z, ct_patients_resid_z = z_score_features(ct_controls_resid, ct_patients_resid)

            # save data
            ct_controls_resid_z.to_csv(os.path.join(features_dir, 'ct_controls_resid_z.csv'))
            ct_patients_resid_z.to_csv(os.path.join(features_dir, 'ct_patients_resid_z.csv'))
            plt_features_heatmap(ct_controls_resid_z, os.path.join(features_dir, 'ct_controls_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))
            plt_features_heatmap(ct_patients_resid_z, os.path.join(features_dir, 'ct_patients_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))

        else:
            print 'CT features already denoised'


    ########################################################################################################
    print '###########################################################'
    print '... Denoising ECM features'

    if not os.path.isfile(os.path.join(features_dir, 'ecm_patients_resid_z.csv')):

        #####################
        # Regress
        if not os.path.isfile(os.path.join(features_dir, 'ecm_tourettome_resid.csv')):
            print '...... Regressing nuisace variables for ECM dataframes'
            # must read dmat again since we dropped missing subjects from ct
            design_matrix = pd.read_csv(os.path.join(features_dir, 'design_matrix_tourettome.csv'), index_col=0)
            ecm_tourettome_resid = regress_nuisance_covariates(ecm_tourettome_raw, design_matrix, formula)

            # save residual data
            ecm_tourettome_resid = pd.concat(ecm_tourettome_resid, axis=1).T  # transpose here to get back to RAW shape
            ecm_tourettome_resid.to_csv(os.path.join(features_dir, 'ecm_tourettome_resid.csv'))
            plt_features_heatmap(ecm_tourettome_resid, os.path.join(features_dir, 'ecm_tourettome_resid.png'),vmin=-2,vmax=2)

        #####################
        # Break Down mats into patients and controls
        if not os.path.isfile(os.path.join(features_dir, 'ecm_patients_resid.csv')):
            ecm_patients_resid = ecm_tourettome_resid.drop(controls, axis=1)
            ecm_controls_resid = ecm_tourettome_resid.drop(patients, axis=1)

            # save separately
            ecm_patients_resid.to_csv(os.path.join(features_dir, 'ecm_patients_resid.csv'))
            ecm_controls_resid.to_csv(os.path.join(features_dir, 'ecm_controls_resid.csv'))

            # plot separate sca residuals
            plt_features_heatmap(ecm_controls_resid, os.path.join(features_dir, 'ecm_controls_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))
            plt_features_heatmap(ecm_patients_resid, os.path.join(features_dir, 'ecm_patients_resid.png'),
                                 vmin=-1, vmax=1, figsize=(17.5, 10))

        else:
            ecm_controls_resid = pd.read_csv(os.path.join(features_dir, 'ecm_controls_resid.csv'), index_col=0)
            ecm_patients_resid = pd.read_csv(os.path.join(features_dir, 'ecm_patients_resid.csv'), index_col=0)

        #####################
        # Z-Score
        if not os.path.isfile(os.path.join(features_dir, 'ecm_patients_resid_z.csv')):
            print ' ... Z-scoring ECM dataframes'
            ecm_controls_resid_z, ecm_patients_resid_z = z_score_features(ecm_controls_resid, ecm_patients_resid)
            ecm_controls_resid_z.to_csv(os.path.join(features_dir, 'ecm_controls_resid_z.csv'))
            ecm_patients_resid_z.to_csv(os.path.join(features_dir, 'ecm_patients_resid_z.csv'))
            # plot separate sca residuals
            plt_features_heatmap(ecm_controls_resid_z, os.path.join(features_dir, 'ecm_controls_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))
            plt_features_heatmap(ecm_patients_resid_z, os.path.join(features_dir, 'ecm_patients_resid_z.png'),
                                 vmin=-4, vmax=4, figsize=(17.5, 10))

    else:
        print 'ECM features already denoised'
        def create_voxel_plots(voxel_name, ppmst):
            print '...Working on %s %s ' %(analysis_type, ppmst)
            #create output QC directory
            mkdir_path(os.path.join(subject_dir, 'quality_control', analysis_type, voxel_name, 'ppm_%s' %ppmst, 'tmp'))
            qc_dir  = os.path.join(subject_dir, 'quality_control',  analysis_type,  voxel_name, 'ppm_%s' %ppmst)
            tmp_dir = os.path.join(qc_dir, 'tmp')

            #grab lcmodel plots
            svs_lcmodel = os.path.join(subject_dir, 'lcmodel_%s'%analysis_type, voxel_name, 'ppm_%s'%ppmst, 'ps.pdf')

            # create localization pngs
            make_png = ['convert', '-density', '300', '-trim', '%s'%svs_lcmodel,  '-quality', '300', '-sharpen', '0x1.0', '%s/%s_lcmodel.png'%(tmp_dir, voxel_name)]
            subprocess.call(make_png)
            lcm_plot = os.path.join(tmp_dir, '%s_lcmodel-0.png'%voxel_name)

            #grab snr/fwhm data
            svs_snr = np.genfromtxt(os.path.join(subject_dir, 'lcmodel_%s'%analysis_type, voxel_name, 'ppm_%s'%ppmst, 'snr.txt'), delimiter = ',')

            #grab voxel mask
            svs = os.path.join(subject_dir, 'svs_voxel_mask','%s%s_%s_RDA_MASK.nii'%(subject,workspace_dir[-10:-9], voxel_name))

            #get data into matrix
            anat_load = nb.load(anatomical)
            svs_load  = nb.load(svs)
            anat_data = anat_load.get_data()
            svs_data  = svs_load.get_data()

            # get svs cut coords
            coords = find_cut_coords(svs_load)

            # convert zeros to nans for visualization purposes
            svs_data[svs_data==0]=np.nan

            # plot voxel on anat
            fig =plt.figure()
            fig.set_size_inches(6.5, 6.5)
            fig.subplots_adjust(wspace=0.005)
            #1
            ax1 = plt.subplot2grid((1,3), (0,0),  colspan = 1, rowspan =1)
            ax1.imshow(anat_data[coords[0],:,:], matplotlib.cm.bone_r)
            ax1.imshow(svs_data[coords[0],:,:] , matplotlib.cm.rainbow_r, alpha = 0.7)
            ax1.set_xlim(23, 157)
            ax1.set_ylim(101, 230)
            ax1.axes.get_yaxis().set_visible(False)
            ax1.axes.get_xaxis().set_visible(False)
            #2
            ax2 = plt.subplot2grid((1,3), (0,1),  colspan = 1, rowspan =1)
            ax2.imshow(np.rot90(anat_data[:,:,coords[2]]), matplotlib.cm.bone_r )
            ax2.imshow(np.rot90(svs_data[:,:,coords[2]]) , matplotlib.cm.rainbow_r, alpha = 0.7 )
            ax2.set_xlim(230, 20)
            ax2.set_ylim(207, 4)
            ax2.axes.get_yaxis().set_visible(False)
            ax2.axes.get_xaxis().set_visible(False)
            #3
            ax3 = plt.subplot2grid((1,3), (0,2),  colspan = 1, rowspan =1)
            ax3.imshow(anat_data[:,coords[1],:], matplotlib.cm.bone_r, origin='lower')
            ax3.imshow(svs_data[:,coords[1],:] , matplotlib.cm.rainbow_r, alpha = 0.7, origin='lower')
            ax3.set_xlim(38, 140)
            ax3.set_ylim(160, 60)
            ax3.axes.get_yaxis().set_visible(False)
            ax3.axes.get_xaxis().set_visible(False)
            fig.tight_layout()
            fig.savefig('%s/localization_%s.png'%(qc_dir, voxel_name), dpi=200, bbox_inches='tight')

            # create qc report
            report = canvas.Canvas(os.path.join(qc_dir,'QC_REPORT_%s.pdf'%voxel_name), pagesize=(1280, 1556))
            report.setFont("Helvetica", 40)
            report.drawImage(os.path.join(qc_dir,'localization_%s.png'%voxel_name), 1, inch*13.5)
            report.drawImage(lcm_plot, 30, inch*1, width = 1200, height = 800)
            report.drawString(230, inch*20, ' %s%s, %s_%s , SNR=%s FWHM=%s ' %(subject,workspace_dir[-10:-9], voxel_name, analysis_type,  svs_snr[2],svs_snr[1]) )
            report.showPage()

            if analysis_type is 'twix':
                fig_f6 = os.path.join(subject_dir, 'svs_twix', voxel_name, 'f6_frequency_drift_correction.png')
                fig_f7 = os.path.join(subject_dir, 'svs_twix', voxel_name, 'f7_estimated_phase_phase_drift.png')
                fig_f8 = os.path.join(subject_dir, 'svs_twix', voxel_name, 'f8_estimated_frequency_drift.png')
                reader = open(os.path.join(subject_dir, 'svs_twix', voxel_name, voxel_name, 'readme.txt'), 'r')
                for line in reader:
                    if 'bad' in line:
                        badavn = line[34:38]
                report.drawImage(fig_f6, 1, inch*7.2)
                report.drawImage(fig_f7, 90, inch*1, width = 540, height = 450)
                report.drawImage(fig_f8, 590, inch*1, width = 540, height = 450)
                report.setFont("Helvetica", 40)
                report.drawString(350, inch*20, 'Number of Bad Averages =%s' %(badavn))
                report.save()
            else:
                report.save()
def calculate_voxel_statistics(population, workspace_dir):

    print '#############################################################################'
    print ''
    print '                 RUNNNING PROJECT NMR-093%s %s' %(workspace_dir[-10:-9], workspace_dir[-8:])
    print ''
    print '#############################################################################'

    count = 0
    for subject in population:
        count +=1
        print '========================================================================================'
        print '%s- Calculating Voxel statistics for subject %s_%s' %(count,subject, workspace_dir[-10:-9])
        print '.'


        # input files
        spm_dir       = os.path.join(workspace_dir, subject, 'segmentation_spm')

        acc_mask      = os.path.join(workspace_dir, subject, 'svs_voxel_mask', '%s%s_ACC_RDA_MASK.nii'%(subject,workspace_dir[-10:-9]))
        tha_mask      = os.path.join(workspace_dir, subject, 'svs_voxel_mask', '%s%s_THA_RDA_MASK.nii'%(subject,workspace_dir[-10:-9]))
        str_mask      = os.path.join(workspace_dir, subject, 'svs_voxel_mask', '%s%s_STR_RDA_MASK.nii'%(subject,workspace_dir[-10:-9]))

        spm_gm        = os.path.join(spm_dir, 'TISSUE_CLASS_1_GM_OPTIMIZED.nii.gz')
        spm_wm        = os.path.join(spm_dir, 'TISSUE_CLASS_2_WM_OPTIMIZED.nii.gz')
        spm_cm        = os.path.join(spm_dir, 'TISSUE_CLASS_3_CSF_OPTIMIZED.nii.gz')

        # output folders
        mkdir_path(os.path.join(workspace_dir, subject, 'svs_voxel_stats'))
        stats_dir = os.path.join(workspace_dir, subject, 'svs_voxel_stats')

        print 'Calculating tissue proportions'
        def calc_props(mask_file, seg_gm, seg_wm, seg_cm, voxel_name):

            if not os.path.isfile(mask_file):
                print 'IOError: [Errno 2] SVS %s mask Does not exist, create masks and come back' %voxel_name

            else:
                if os.path.isfile(os.path.join(stats_dir, '%s_voxel_statistics_spm_opt.txt'%voxel_name)):
                    print 'Voxel statistics already calculated ... moving on'

                else:
                    #grab data
                    spm_gm_data = nb.load(seg_gm).get_data().squeeze()
                    spm_wm_data = nb.load(seg_wm).get_data().squeeze()
                    spm_cm_data = nb.load(seg_cm).get_data().squeeze()
                    vox_data    = nb.load(mask_file).get_data().squeeze()

                    #multiply SVS ROI with segmented data for ACC
                    vox_spm_gm = vox_data * spm_gm_data
                    vox_spm_wm = vox_data * spm_wm_data
                    vox_spm_cm = vox_data * spm_cm_data

                    #extract stats from segmentation for acc
                    vox_total_svs    = np.sum(vox_data)
                    vox_total_spm_gm = np.sum(vox_spm_gm)
                    vox_total_spm_wm = np.sum(vox_spm_wm)
                    vox_total_spm_cm = np.sum(vox_spm_cm)

                    percent_svs      = float(vox_total_svs)/ float(vox_total_svs)
                    percent_spm_gm   = np.round(float(vox_total_spm_gm) / float(vox_total_svs), 3)
                    percent_spm_wm   = np.round(float(vox_total_spm_wm) / float(vox_total_svs), 3)
                    percent_spm_cm   = np.round(float(vox_total_spm_cm) / float(vox_total_svs), 3)
                    sum_spm          =  np.round(float(percent_spm_gm + percent_spm_wm + percent_spm_cm), 3)

                    print '%s.....' %voxel_name
                    print '...%s SPM NewSegment Tissue Proportions = %s%% GM, %s%% WM, %s%% CSF = %s'  %(voxel_name, percent_spm_gm, percent_spm_wm, percent_spm_cm, sum_spm)

                    spm_txt  = os.path.join(stats_dir, '%s_voxel_statistics_spm_opt.txt'%voxel_name)
                    write_spm = open(spm_txt, 'w')
                    write_spm.write('%s, %s, %s, %s'%(percent_spm_gm, percent_spm_wm, percent_spm_cm, sum_spm))
                    write_spm.close()

        calc_props(acc_mask, spm_gm, spm_wm, spm_cm, 'ACC')
        calc_props(tha_mask, spm_gm, spm_wm, spm_cm, 'THA')
        calc_props(str_mask, spm_gm, spm_wm, spm_cm, 'STR')

        print '========================================================================================'