Beispiel #1
0
def measure_ttest_py (mean1fname, mean2fname,\
                      var1fname, var2fname,  \
                      std1fname, std2fname,  \
                      numsubjs1, numsubjs2,  \
                      experimentname, outdir, exclude_idx=-1):

   if not os.path.exists (outdir):
      os.mkdir (outdir)

   #following the equation:
   #t = (m1 - m2) / sqrt( (v1^2)/N1 + (v2^2)/N2 )
   #from:
   #http://en.wikipedia.org/wiki/Student%27s_t-test#Unequal_sample_sizes.2C_unequal_variance

   aff   = nib.load(mean1fname).get_affine()
   mean1 = nib.load(mean1fname).get_data()
   mean2 = nib.load(mean2fname).get_data()
   var1  = nib.load( var1fname).get_data()
   var2  = nib.load( var2fname).get_data()

   ttest = (mean1 - mean2) / np.sqrt((np.square(var1) / numsubjs1) + (np.square(var2) / numsubjs2))
   ttest[np.isnan(ttest)] = 0
   ttest[np.isinf(ttest)] = 0
   #ttest = np.nan_to_num(ttest)

   ttstfname = outdir   + os.path.sep + experimentname + '_ttest' + au.ext_str()

   au.save_nibabel(ttstfname, ttest, aff)

   return ttstfname
def volstats (invol, groupname, groupsize, outdir=''):

    slicesdir = os.path.dirname(invol)

    if not outdir:
        outdir = slicesdir

    base      = os.path.basename(au.remove_ext(invol))
    outmeanf  = outdir + os.path.sep + base + '_' + au.mean_str()
    outvarf   = outdir + os.path.sep + base + '_' + au.var_str()
    outstdf   = outdir + os.path.sep + base + '_' + au.std_str()
    outsumsf  = outdir + os.path.sep + base + '_' + au.sums_str()

    vol = nib.load(invol).get_data()
    aff = nib.load(invol).get_affine()

    if not os.path.exists(outmeanf):
        mean = np.mean(vol, axis=3)
        au.save_nibabel(outmeanf, mean, aff)

    if not os.path.exists(outstdf):
        std = np.std(vol, axis=3)
        au.save_nibabel(outstdf, std, aff)

    if not os.path.exists(outvarf):
        var = np.var(vol, axis=3)
        au.save_nibabel(outvarf, var, aff)

    if not os.path.exists(outsumsf):
        sums = np.sum(vol, axis=3)
        au.save_nibabel(outsumsf, sums, aff)

    return [outsumsf,outmeanf,outvarf,outstdf]
def volstats(invol, groupname, groupsize, outdir=''):

    slicesdir = os.path.dirname(invol)

    if not outdir:
        outdir = slicesdir

    base = os.path.basename(au.remove_ext(invol))
    outmeanf = outdir + os.path.sep + base + '_' + au.mean_str()
    outvarf = outdir + os.path.sep + base + '_' + au.var_str()
    outstdf = outdir + os.path.sep + base + '_' + au.std_str()
    outsumsf = outdir + os.path.sep + base + '_' + au.sums_str()

    vol = nib.load(invol).get_data()
    aff = nib.load(invol).get_affine()

    if not os.path.exists(outmeanf):
        mean = np.mean(vol, axis=3)
        au.save_nibabel(outmeanf, mean, aff)

    if not os.path.exists(outstdf):
        std = np.std(vol, axis=3)
        au.save_nibabel(outstdf, std, aff)

    if not os.path.exists(outvarf):
        var = np.var(vol, axis=3)
        au.save_nibabel(outvarf, var, aff)

    if not os.path.exists(outsumsf):
        sums = np.sum(vol, axis=3)
        au.save_nibabel(outsumsf, sums, aff)

    return [outsumsf, outmeanf, outvarf, outstdf]
def change_to_absolute_values (niifname, outfname=''):

    niifname = au.add_extension_if_needed(niifname, au.ext_str())

    if not outfname:
        outfname = niifname

    try:
        #load data
        vol = nib.load(niifname).get_data()
        aff = nib.load(niifname).get_affine()

        vol = np.abs(vol)

        #save nifti file
        au.save_nibabel (outfname, vol, aff)

    except:
        au.log.error ("Change_to_absolute_values:: Unexpected error: ", sys.exc_info()[0])
        raise
def measure_bhattacharyya_distance_py (mean1fname, mean2fname, \
                                        var1fname, var2fname,  \
                                        std1fname, std2fname,  \
                                        numsubjs1, numsubjs2,  \
                                        experimentname, outdir, exclude_idx=-1):

    if not os.path.exists(outdir):
        os.mkdir(outdir)

    #following the equations:
    #1/4 * (m1-m2)^2/(var1+var2) + 1/2 * log( (var1+var2)/(2*std1*std2) )
    #from:
    #1
    #Bhattacharyya clustering with applications to mixture simplifications
    #Frank Nielsen, Sylvain Boltz, and Olivier Schwander
    #2010 International Conference on Pattern Recognition
    #2
    #The Divergence and Bhattacharyya Distance Measures in Signal Selection
    #Kailath, T.
    #http://dx.doi.org/10.1109/TCOM.1967.1089532

    aff = nib.load(mean1fname).get_affine()
    m1 = nib.load(mean1fname).get_data()
    m2 = nib.load(mean2fname).get_data()
    v1 = nib.load(var1fname).get_data()
    v2 = nib.load(var2fname).get_data()
    s1 = nib.load(std1fname).get_data()
    s2 = nib.load(std2fname).get_data()

    b1 = 0.25 * (np.square(m1 - m2) / (v1 + v2)) + 0.5 * (np.log(
        (v1 + v2) / (2 * s1 * s2)))
    b1[np.isnan(b1)] = 0
    b1[np.isinf(b1)] = 0
    #b1 = np.nan_to_num(b1)

    bhatta = outdir + os.path.sep + experimentname + '_' + au.bhattacharyya_str(
    ) + au.ext_str()

    au.save_nibabel(bhatta, b1, aff)

    return bhatta
def change_to_absolute_values(niifname, outfname=''):

    niifname = au.add_extension_if_needed(niifname, au.ext_str())

    if not outfname:
        outfname = niifname

    try:
        #load data
        vol = nib.load(niifname).get_data()
        aff = nib.load(niifname).get_affine()

        vol = np.abs(vol)

        #save nifti file
        au.save_nibabel(outfname, vol, aff)

    except:
        au.log.error("Change_to_absolute_values:: Unexpected error: ",
                     sys.exc_info()[0])
        raise
def measure_bhattacharyya_distance_py (mean1fname, mean2fname, \
                                        var1fname, var2fname,  \
                                        std1fname, std2fname,  \
                                        numsubjs1, numsubjs2,  \
                                        experimentname, outdir, exclude_idx=-1):

   if not os.path.exists (outdir):
      os.mkdir (outdir)

   #following the equations:
   #1/4 * (m1-m2)^2/(var1+var2) + 1/2 * log( (var1+var2)/(2*std1*std2) )
   #from:
   #1
      #Bhattacharyya clustering with applications to mixture simplifications
      #Frank Nielsen, Sylvain Boltz, and Olivier Schwander
      #2010 International Conference on Pattern Recognition
   #2
      #The Divergence and Bhattacharyya Distance Measures in Signal Selection
      #Kailath, T.
      #http://dx.doi.org/10.1109/TCOM.1967.1089532

   aff = nib.load(mean1fname).get_affine()
   m1  = nib.load(mean1fname).get_data()
   m2  = nib.load(mean2fname).get_data()
   v1  = nib.load (var1fname).get_data()
   v2  = nib.load (var2fname).get_data()
   s1  = nib.load (std1fname).get_data()
   s2  = nib.load (std2fname).get_data()

   b1  = 0.25 * (np.square(m1 - m2) / (v1 + v2)) + 0.5  * (np.log((v1 + v2) / (2*s1*s2)))
   b1[np.isnan(b1)] = 0
   b1[np.isinf(b1)] = 0
   #b1 = np.nan_to_num(b1)

   bhatta = outdir + os.path.sep + experimentname + '_' + au.bhattacharyya_str() + au.ext_str()

   au.save_nibabel(bhatta, b1, aff)

   return bhatta
def remove_subject_from_stats(meanfname,
                              varfname,
                              samplesize,
                              subjvolfname,
                              newmeanfname,
                              newvarfname,
                              newstdfname=''):

    meanfname = au.add_extension_if_needed(meanfname, au.ext_str())
    varfname = au.add_extension_if_needed(varfname, au.ext_str())
    subjvolfname = au.add_extension_if_needed(subjvolfname, au.ext_str())

    newmeanfname = au.add_extension_if_needed(newmeanfname, au.ext_str())
    newvarfname = au.add_extension_if_needed(newvarfname, au.ext_str())

    if newstdfname:
        newstdfname = au.add_extension_if_needed(newstdfname, au.ext_str())

    #load data
    n = samplesize

    meanv = nib.load(meanfname).get_data()
    varv = nib.load(varfname).get_data()
    subjv = nib.load(subjvolfname).get_data()
    aff = nib.load(meanfname).get_affine()

    #calculate new mean: ((oldmean*N) - x)/(N-1)
    newmean = meanv.copy()
    newmean = ((newmean * n) - subjv) / (n - 1)
    newmean = np.nan_to_num(newmean)

    #calculate new variance:
    # oldvar = (n/(n-1)) * (sumsquare/n - oldmu^2)
    # s = ((oldvar * (n/(n-1)) ) + oldmu^2) * n
    # newvar = ((n-1)/(n-2)) * (((s - x^2)/(n-1)) - newmu^2)
    s = varv.copy()
    s = ((s * (n / (n - 1))) + np.square(meanv)) * n
    newvar = ((n - 1) / (n - 2)) * (((s - np.square(subjv)) /
                                     (n - 1)) - np.square(newmean))
    newvar = np.nan_to_num(newvar)

    #save nifti files
    au.save_nibabel(newmeanfname, newmean, aff)
    au.save_nibabel(newvarfname, newvar, aff)

    #calculate new standard deviation: sqrt(newvar)
    if newstdfname:
        newstd = np.sqrt(newvar)
        newstd = np.nan_to_num(newstd)
        au.save_nibabel(newstdfname, newstd, aff)
def remove_subject_from_stats (meanfname, varfname, samplesize, subjvolfname, newmeanfname, newvarfname, newstdfname=''):

    meanfname    = au.add_extension_if_needed(meanfname,    au.ext_str())
    varfname     = au.add_extension_if_needed(varfname,     au.ext_str())
    subjvolfname = au.add_extension_if_needed(subjvolfname, au.ext_str())

    newmeanfname = au.add_extension_if_needed(newmeanfname, au.ext_str())
    newvarfname  = au.add_extension_if_needed(newvarfname,  au.ext_str())

    if newstdfname:
        newstdfname = au.add_extension_if_needed(newstdfname, au.ext_str())

    #load data
    n = samplesize

    meanv = nib.load(meanfname).get_data()
    varv  = nib.load( varfname).get_data()
    subjv = nib.load(subjvolfname).get_data()
    aff   = nib.load(meanfname).get_affine()

    #calculate new mean: ((oldmean*N) - x)/(N-1)
    newmean = meanv.copy()
    newmean = ((newmean * n) - subjv)/(n-1)
    newmean = np.nan_to_num(newmean)

    #calculate new variance: 
    # oldvar = (n/(n-1)) * (sumsquare/n - oldmu^2)
    # s = ((oldvar * (n/(n-1)) ) + oldmu^2) * n
    # newvar = ((n-1)/(n-2)) * (((s - x^2)/(n-1)) - newmu^2)
    s = varv.copy()
    s = ((s * (n/(n-1)) ) + np.square(meanv)) * n
    newvar = ((n-1)/(n-2)) * (((s - np.square(subjv))/(n-1)) - np.square(newmean))
    newvar = np.nan_to_num(newvar)

    #save nifti files
    au.save_nibabel (newmeanfname, newmean, aff)
    au.save_nibabel (newvarfname , newvar,  aff)

    #calculate new standard deviation: sqrt(newvar)
    if newstdfname:
        newstd = np.sqrt(newvar)
        newstd = np.nan_to_num(newstd)
        au.save_nibabel (newstdfname, newstd, aff)
Beispiel #10
0
def measure_pearson(datafname,
                    labelsfile,
                    outfname,
                    maskfname='',
                    exclufname='',
                    exclude_idx=-1):
    #reading label file
    labels = np.loadtxt(labelsfile, dtype=int)

    if exclufname:
        exclus = np.loadtxt(exclufname, dtype=int)

    #reading input volume
    vol = nib.load(datafname)
    n = vol.get_shape()[3]

    if n != len(labels):
        err = 'Numbers do not match: ' + datafname + ' and ' + labelsfile
        raise IOError(err)
    elif exclufname:
        if n != len(exclus):
            err = 'Numbers do not match: ' + datafname + ' and ' + excludef
            raise IOError(err)

    exclude_log = ''
    if exclude_idx > -1:
        exclude_log = ' excluding subject ' + str(exclude_idx)

    au.log.debug('Pearson correlation of ' + os.path.basename(datafname) +
                 exclude_log)

    #reading volume
    data = vol.get_data()

    #excluding subjects
    if exclufname and exclude_idx > -1:
        exclus[exclude_idx] = 1

    if exclufname:
        data = data[:, :, :, exclus == 0]
        labels = labels[exclus == 0]

    elif exclude_idx > -1:
        exclus = np.zeros(n, dtype=int)
        exclus[exclude_idx] = 1

        data = data[:, :, :, exclus == 0]
        labels = labels[exclus == 0]

    subsno = data.shape[3]

    #preprocessing data
    shape = data.shape[0:3]
    siz = np.prod(shape)
    temp = data.reshape(siz, subsno)
    ind = range(len(temp))

    if maskfname:
        mask = nib.load(maskfname)
        mskdat = mask.get_data()
        mskdat = mskdat.reshape(siz)
        ind = np.where(mskdat != 0)[0]

    #creating output volume file
    odat = np.zeros(shape, dtype=vol.get_data_dtype())

    for i in range(len(ind)):
        idx = ind[i]
        x = temp[idx, :]
        p = stats.pearsonr(labels, x)[0]

        #ldemean = labels - np.mean(labels)
        #xdemean = x - np.mean(x)
        #p = np.sum(ldemean * xdemean) / (np.sqrt(np.sum(np.square(ldemean))) * np.sqrt(np.sum(np.square(xdemean))))

        if math.isnan(p): p = 0

        odat[np.unravel_index(idx, shape)] = p

    au.save_nibabel(outfname, odat, vol.get_affine())

    return outfname
Beispiel #11
0
def do_darya_localizations ():
    import scipy.io as sio

    sys.path.append('/home/alexandre/Dropbox/Documents/work/visualize_volume')
    import visualize_volume as vis

    import aizkolari_utils as au
    au.setup_logger()

    locd = '/home/alexandre/Dropbox/ELM 2013/ELM-2013-Darya/localization'

    wd, xd, dd, labelsf, phenof, dataf, masks, dilmasks, templates, pipe = get_filepaths()

    maskf          = dilmasks[0]
    mask, hdr, aff = au.get_nii_data(maskf)
    indices        = np.array(np.where(mask > 0))

    tmpltf         = templates[0]
    tmplt, _, _    = au.get_nii_data(tmpltf)

    flist = os.listdir(locd)
    flist = au.find(flist, '.mat')
    flist.sort()

    for f in flist:
        data = sio.loadmat(os.path.join(locd, f))

        name = au.remove_ext(f)

        if f.find('cobre') >= 0:
            p = data['pearson'].squeeze()
            locs = p > 0
            lidx = indices[:, locs].squeeze()

            if f.find('reho') >= 0:
                preho = p.copy()
            elif f.find('alff') >= 0:
                palff = p.copy()

        else:
            locs = data[name].squeeze()
            locs -= 1
            if f.find('pearson') >= 0:

                if f.find('reho') >= 0:
                    lidx = indices[:, preho > 0]
                elif f.find('alff') >= 0:
                    lidx = indices[:, palff > 0]

                lidx = lidx[:, locs]
            else:
                lidx = indices[:, locs].squeeze()

        locvol = np.zeros_like(mask, dtype=np.float)
        locvol[tuple(lidx)] = 1

        #save nifti volume
        au.save_nibabel (os.path.join(locd, name + '.nii.gz'), locvol, aff, hdr)

        #save image
        fig = vis.show_many_slices(tmplt, locvol, volaxis=1, vol2_colormap=plt.cm.autumn, figtitle=name)
        aizc.save_fig_to_png(fig, os.path.join(locd, name + '.png'))
        if os.access (os.path.join(xd,'innercrop'), os.X_OK):
            au.exec_command (os.path.join(xd,'innercrop') + ' -o white ' + fig2name + ' ' + fig2name)
def measure_pearson (datafname, labelsfile, outfname, maskfname='', exclufname='', exclude_idx=-1):
   #reading label file
   labels = np.loadtxt(labelsfile, dtype=int)

   if exclufname:
      exclus = np.loadtxt(exclufname, dtype=int)

   #reading input volume
   vol = nib.load(datafname)
   n   = vol.get_shape()[3]

   if n != len(labels):
      err = 'Numbers do not match: ' + datafname + ' and ' + labelsfile
      raise IOError(err)
   elif exclufname:
      if n != len(exclus):
         err = 'Numbers do not match: ' + datafname + ' and ' + excludef
         raise IOError(err)

   exclude_log = ''
   if exclude_idx > -1:
      exclude_log = ' excluding subject ' + str(exclude_idx)

   au.log.debug ('Pearson correlation of ' + os.path.basename(datafname) + exclude_log)

   #reading volume
   data   = vol.get_data()

   #excluding subjects
   if exclufname and exclude_idx > -1:
      exclus[exclude_idx] = 1

   if exclufname:
      data   = data  [:,:,:,exclus == 0]
      labels = labels[exclus == 0]

   elif exclude_idx > -1:
      exclus = np.zeros(n, dtype=int)
      exclus[exclude_idx] = 1

      data   = data  [:,:,:,exclus == 0]
      labels = labels[exclus == 0]

   subsno = data.shape[3]

   #preprocessing data
   shape = data.shape[0:3]
   siz   = np.prod(shape)
   temp  = data.reshape(siz, subsno)
   ind   = range(len(temp))

   if maskfname:
      mask   = nib.load(maskfname)
      mskdat = mask.get_data()
      mskdat = mskdat.reshape(siz)
      ind    = np.where(mskdat!=0)[0]

   #creating output volume file
   odat = np.zeros(shape, dtype=vol.get_data_dtype())

   for i in range(len(ind)):
      idx = ind[i]
      x = temp[idx,:]
      p = stats.pearsonr (labels,x)[0];

      #ldemean = labels - np.mean(labels)
      #xdemean = x - np.mean(x)
      #p = np.sum(ldemean * xdemean) / (np.sqrt(np.sum(np.square(ldemean))) * np.sqrt(np.sum(np.square(xdemean))))

      if math.isnan (p): p = 0

      odat[np.unravel_index(idx, shape)] = p

   au.save_nibabel(outfname, odat, vol.get_affine())

   return outfname