#checking number of files processed
    nmasks = len(masklst)
    nouts = 0
    m = 0
    for maskf in masklst:
        if not scale:
            ofname = au.features_str() + get_out_extension(otype)
        else:
            ofname = au.features_str() + '.' + au.scaled_str(
            ) + get_out_extension(otype)

        if prefixes[m]:
            ofname = prefixes[m] + '_' + ofname

        oc = len(au.find(os.listdir(outdir), ofname))
        nouts += oc
        m += 1

    if nouts >= nmasks:
        au.log.debug('Nothing to do in ' + outdir + '. All files processed.')
        return -1
    else:
        au.log.debug('Processing to output in: ' + outdir)

    #number of subjects
    subjsnum = au.file_len(subjsf)

    #reading subjects list
    subjlabels = np.zeros(subjsnum, dtype=int)
    subjslist = {}
Example #2
0
         for tidx in tenum:
            t = thrs[tidx]

            for each subject
            calculate_mean_without_subject
            create_svmperf_file ()
            perform_grid_search
            perform_train
            perform_test
            save_results

            try:
               #in this case we will only have one trainset file per threshold
               if scaled:
                  trainfeatsf = np.sort(au.find (os.listdir('.'), str(t) + 'thrP_features.scaled.svmperf'))[0]
                  testfeatsf  = np.sort(au.find (os.listdir('.'), str(t) + 'thrP_excludedfeats.scaled.svmperf'))[0]
               else:
                  trainfeatsf = np.sort(au.find (os.listdir('.'), str(t) + 'thrP_features.svmperf'))[0]
                  testfeatsf  = np.sort(au.find (os.listdir('.'), str(t) + 'thrP_excludedfeats.svmperf'))[0]
            except:
               print ('Unexpected error: ' + str(sys.exc_info()))
               print ('Failed looking for file ' + str(t) + 'thrP*.svmperf ' + ' in ' + os.getcwd())
               exit(-1)

            expname = m + '.' + d + '.' + str(t) + 'thr'
            if not rocarea_opt:
               expname += '.errorrate'
            else:
               expname += '.rocarea'
Example #3
0
def do_darya_localizations ():
    import scipy.io as sio

    sys.path.append('/home/alexandre/Dropbox/Documents/work/visualize_volume')
    import visualize_volume as vis

    import aizkolari_utils as au
    au.setup_logger()

    locd = '/home/alexandre/Dropbox/ELM 2013/ELM-2013-Darya/localization'

    wd, xd, dd, labelsf, phenof, dataf, masks, dilmasks, templates, pipe = get_filepaths()

    maskf          = dilmasks[0]
    mask, hdr, aff = au.get_nii_data(maskf)
    indices        = np.array(np.where(mask > 0))

    tmpltf         = templates[0]
    tmplt, _, _    = au.get_nii_data(tmpltf)

    flist = os.listdir(locd)
    flist = au.find(flist, '.mat')
    flist.sort()

    for f in flist:
        data = sio.loadmat(os.path.join(locd, f))

        name = au.remove_ext(f)

        if f.find('cobre') >= 0:
            p = data['pearson'].squeeze()
            locs = p > 0
            lidx = indices[:, locs].squeeze()

            if f.find('reho') >= 0:
                preho = p.copy()
            elif f.find('alff') >= 0:
                palff = p.copy()

        else:
            locs = data[name].squeeze()
            locs -= 1
            if f.find('pearson') >= 0:

                if f.find('reho') >= 0:
                    lidx = indices[:, preho > 0]
                elif f.find('alff') >= 0:
                    lidx = indices[:, palff > 0]

                lidx = lidx[:, locs]
            else:
                lidx = indices[:, locs].squeeze()

        locvol = np.zeros_like(mask, dtype=np.float)
        locvol[tuple(lidx)] = 1

        #save nifti volume
        au.save_nibabel (os.path.join(locd, name + '.nii.gz'), locvol, aff, hdr)

        #save image
        fig = vis.show_many_slices(tmplt, locvol, volaxis=1, vol2_colormap=plt.cm.autumn, figtitle=name)
        aizc.save_fig_to_png(fig, os.path.join(locd, name + '.png'))
        if os.access (os.path.join(xd,'innercrop'), os.X_OK):
            au.exec_command (os.path.join(xd,'innercrop') + ' -o white ' + fig2name + ' ' + fig2name)
            tstdir = cvdir + os.path.sep + d + '_' + i
            outdir = tstdir
            ofname = outdir + os.path.sep + ofname

            os.chdir(tstdir)
            au.log.debug('cd ' + tstdir)

            for tidx in tenum:
                t = thrs[tidx]

                try:
                    #in this case we will only have one trainset file per threshold
                    if scaled:
                        trainfeatsf = np.sort(
                            au.find(os.listdir('.'),
                                    str(t) +
                                    'thrP_features.scaled.svmperf'))[0]
                        testfeatsf = np.sort(
                            au.find(
                                os.listdir('.'),
                                str(t) +
                                'thrP_excludedfeats.scaled.svmperf'))[0]
                    else:
                        trainfeatsf = np.sort(
                            au.find(os.listdir('.'),
                                    str(t) + 'thrP_features.svmperf'))[0]
                        testfeatsf = np.sort(
                            au.find(os.listdir('.'),
                                    str(t) + 'thrP_excludedfeats.svmperf'))[0]
                except:
                    au.log.error('Unexpected error: ' + str(sys.exc_info()))
    au.setup_logger(verbose)

    # checking number of files processed
    nmasks = len(masklst)
    nouts = 0
    m = 0
    for maskf in masklst:
        if not scale:
            ofname = au.features_str() + get_out_extension(otype)
        else:
            ofname = au.features_str() + "." + au.scaled_str() + get_out_extension(otype)

        if prefixes[m]:
            ofname = prefixes[m] + "_" + ofname

        oc = len(au.find(os.listdir(outdir), ofname))
        nouts += oc
        m += 1

    if nouts >= nmasks:
        au.log.debug("Nothing to do in " + outdir + ". All files processed.")
        return -1
    else:
        au.log.debug("Processing to output in: " + outdir)

    # number of subjects
    subjsnum = au.file_len(subjsf)

    # reading subjects list
    subjlabels = np.zeros(subjsnum, dtype=int)
    subjslist = {}