示例#1
0
def loadTest(yObject,yMatch,features):
        
        """
        Method to load the test data into the object.  We might be interested
        in loading new test data into, so we have explicitly defined this is
        as a method.
        
        Parameters:
        - - - - -
            y : SubjectFeatures object for a test brain      
            yMatch : MatchingFeaturesTest object containing vertLib attribute 
                    detailing which labels each vertex in surface y maps to 
                    in the training data
        """
        features = list(features.split(','))

        loadFeatures = copy.copy(features)
        loadFeatures = list(set(features).difference({'label'}))

        # load test subject data, save as attribtues
        tObject = ld.loadH5(yObject,*['full'])
        ID = tObject.attrs['ID']

        parsedData = ld.parseH5(tObject,loadFeatures)
        tObject.close()

        data = parsedData[ID]
        mtd = cu.mergeFeatures(data,features)

        threshed = ld.loadMat(yMatch)

        ltvm = cu.vertexMemberships(threshed,180)

        return [threshed,mtd,ltvm]
示例#2
0
def loadTest(yObject, yMatch, features):
    """
        Method to load the test data into the object.  We might be interested
        in loading new test data into, so we have explicitly defined this is
        as a method.
        
        Parameters:
        - - - - -
            y : SubjectFeatures object for a test brain   
            
            yMatch : MatchingFeaturesTest object containing vertLib attribute 
                    detailing which labels each vertex in surface y maps to 
                    in the training data
                    
            features : feature to included in the test data numpy array
                        these must be the same as the training data
        """

    nf = []
    for f in features:
        if f != 'label':
            nf.append(f)

    print 'Train features: {}'.format(features)
    print 'Test features: {}'.format(nf)

    # load test subject data, save as attribtues
    tObject = ld.loadH5(yObject, *['full'])
    ID = tObject.attrs['ID']

    parsedData = ld.parseH5(tObject, nf)
    tObject.close()

    data = parsedData[ID]
    mtd = cu.mergeFeatures(data, nf)

    threshed = ld.loadMat(yMatch)

    ltvm = cu.vertexMemberships(threshed, 180)

    return [threshed, mtd, ltvm]
示例#3
0
                        testOutput = '{}{}.{}'.format(outDirIter, test_subj,
                                                      outputExt)
                        #print 'Test Output: {}'.format(testOutput)

                        if not os.path.isfile(testOutput):

                            testObject = '{}{}.{}.{}'.format(
                                testDir, test_subj, hExt, testExt)

                            testMids = '{}{}.{}.{}'.format(
                                midsDir, test_subj, hExt, midsExt)
                            #print 'Test Mids: {}'.format(testMids)

                            testMatch = '{}{}.{}.{}'.format(
                                matchDir, test_subj, hExt, matchExt)
                            tm = ld.loadMat(testMatch)
                            #print 'Test Match: {}'.format(testMatch)

                            mids = ld.loadMat(testMids) - 1

                            if fExt == '.p':
                                # If model was a random forest,current model is a LIST
                                # of models.  We feed this in to malp.parallelPredictiong
                                # along with the test data

                                if classifier == 'GMM':

                                    [mm, mtd,
                                     ltvm] = loadTest(currentModel, testObject,
                                                      testMatch)
midDir = args.midlineDir
midExt = args.midlineExt

subjectList = args.subjectList

with open(subjectList,'r') as inSubj:
    subjects = inSubj.readlines()
subjects = [x.strip() for x in subjects]

hemiMap = {'Left': 'L',
           'Right': 'R'}

for subj in subjects:
    
    for h in hemiMap.keys():
        
        matchMatrix = ''.join([matchDir,subj,'.',hemiMap[h],'.',matchExt])
        midline = ''.join([midDir,subj,'.',hemiMap[h],'.',midExt])
        outFile = ''.join([predDir,subj,'.',hemiMap[h],'.',predExt])
        
        if os.path.exists(matchMatrix) and os.path.exists(midline):
            if not os.path.exists(outFile):
            
                matching = ld.loadMat(matchMatrix)
                midline = ld.loadMat(midline) - 1
                prediction = np.argmax(matching,axis=1)+1
                prediction[midline] = 0
                
                wg.writeGiftiImage(prediction,outFile,''.join(['Cortex',h]))
        
示例#5
0
                        #print 'Test Output: {}'.format(testOutput)
                        
                        # Check to make sure current subject hasn't been run yet
                        # If it has, skip
                        if not os.path.isfile(testOutput):
                            
                            testObject = '{}{}.{}.{}'.format(testDir,test_subj,hExt,testExt)
                            #print 'Test Object: {}'.format(testObject)
                            
                            testMids = '{}{}.{}.{}'.format(midsDir,test_subj,hExt,midsExt)
                            #print 'Test Mids: {}'.format(testMids)
                            
                            testMatch = '{}{}.{}.{}'.format(matchDir,test_subj,hExt,matchExt)
                            #print 'Test Match: {}'.format(testMatch)
    
                            mids = ld.loadMat(testMids)-1

                            if fExt == '.p':
                                # If model was a random forest,current model is a LIST
                                # of models.  We feed this in to malp.parallelPredictiong
                                # along with the test data
                                if classifier == 'RandomForest':
        
                                    P = parallelPredictRF(currentModel,
                                                                  testObject,
                                                                  testMatch,
                                                                  testMids)
        
                                    P[mids] = 0
                
                                    myl.darrays[0].data = np.array(P).astype(np.float32)
示例#6
0
        outDest = '{}{}.{}.{}'.format(fullDir, s, hstr, troExt)

        data = h5py.File(trainingObject, mode='r+')
        arrays = data[data.keys()[0]]

        if 'fs_central' in arrays.keys():
            del (arrays['fs_central'])
        if 'vertVar' in arrays.keys():
            del (arrays['vertVar'])

        if 'subcort' in arrays.keys():
            arrays['fs_subcort'] = arrays.pop('subcort')

        curv = ld.loadGii(curvObject)
        fsSubCort = ld.loadMat(fsSubCortObject)
        print fsSubCort.shape
        ptxCort = ld.loadMat(ptxCortObject)
        ptxSubCort = ld.loadMat(ptxSubCortObject)

        if 'fs_subcort' in arrays.keys():
            del (arrays['fs_subcort'])
            arrays['fs_subcort'] = fsSubCort

        if 'curv' in arrays.keys():
            del (arrays['curv'])
            arrays['curv'] = curv

        if 'pt_cort' in arrays.keys():
            del (arrays['pt_cort'])
            arrays['pt_cort'] = np.log(ptxCort)
示例#7
0
# Loop over test subjects
for test_subj in testList:

    print 'Test Subject: {}'.format(test_subj)

    # Load functional file for saving prediction later
    inFunc = '{}MyelinDensity/{}.{}.MyelinMap.32k_fs_LR.func.gii'.format(
        baseDir, test_subj, hm)
    assert os.path.isfile(inFunc)
    func = nb.load(inFunc)

    # Load midline indices
    mid = '{}Midlines/{}.{}.Midline_Indices.mat'.format(baseDir, test_subj, hm)
    assert os.path.isfile(mid)
    mid = ld.loadMat(mid) - 1

    # Construct output file name
    outExtension = ''.join(['.', hm, '.', oute])
    outPrediction = ''.join([outd, test_subj, outExtension])

    if not os.path.isfile(outPrediction):

        # Scale test data, load matching frequencies, compute label-to-vertex mappings
        [data, match, ltvm] = pcd.testing(P, test_subj, trDir=objd, trExt=obje)
        # Compute prediction
        [baseline, threshold, predicted] = nnu.predict(data,
                                                       match,
                                                       model,
                                                       power=pw)