def main():
    path='/local/attale00/'
    
    allFiles = utils.getAllFiles(path+'Multi-PIE/extracted')
    
    allLabelFiles = utils.getAllFiles(path+'Multi-PIE/db_labels')
    np.random.shuffle(allLabelFiles)
    #get the labels from the database, for each person
    sliceat=250
    labelstest=utils.parseLabelFiles(path+'Multi-PIE/db_labels','sex',allLabelFiles[0:sliceat],cutoffSeq='',suffix='')
    
    labelstraining = utils.parseLabelFiles(path+'Multi-PIE/db_labels','sex',allLabelFiles[sliceat:],cutoffSeq='',suffix='')
    #now generate the label dict for each file
    
    labsTest={}
    labsTraining={}
    for f in allFiles:
        if labelstest.has_key(f[0:3]+'.labels'):
            labsTest[f]=labelstest[f[0:3]+'.labels']
        elif labelstraining.has_key(f[0:3]+'.labels'):
            labsTraining[f]=labelstraining[f[0:3]+'.labels' ]
            
    testSet = fg.dataContainer(labsTest)
    trainingSet = fg.dataContainer(labsTraining)
    
    roi=(0,64,0,64)
    ppc=(8,8)
    cpb=(8,8)
    
    fg.getHogFeature(testSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None,pixels_per_cell=ppc,cells_per_block=cpb)
    
    fg.getHogFeature(trainingSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None,pixels_per_cell=ppc, cells_per_block=cpb)
    
    testSet.targetNum=map(utils.mapSexLabel2Two,testSet.target)
    trainingSet.targetNum = map(utils.mapSexLabel2Two,trainingSet.target)
    
    rf1=classifierUtils.standardRF(max_features=np.sqrt(len(testSet.data[0])))
    rf2=classifierUtils.standardRF(max_features=np.sqrt(len(trainingSet.data[0])))
    
    rf1.fit(testSet.data,testSet.targetNum)
    
    s=rf1.score(trainingSet.data,trainingSet.targetNum)
    trainingSet.classifiedAs=rf1.predict(trainingSet.data)
    trainingSet.hasBeenClassified=True
    classifierUtils.evaluateClassification(trainingSet,{0:'male',1:'female'})
    
    
    
    print 'Score: {}'.format(s)
    
    print '----------other way around ----\n'
    
    rf2.fit(trainingSet.data,trainingSet.targetNum)
    
    s=rf2.score(testSet.data,testSet.targetNum)
    testSet.classifiedAs=rf2.predict(testSet.data)
    testSet.hasBeenClassified=True
    classifierUtils.evaluateClassification(testSet,{0:'male',1:'female'})
    print 'Score: {}'.format(s)
def main(mode):
    path = "/local/attale00/extracted_pascal__4__Multi-PIE"
    path_ea = path + "/color128/"

    allLabelFiles = utils.getAllFiles("/local/attale00/a_labels")

    labeledImages = [i[0:16] + ".png" for i in allLabelFiles]

    # labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs = utils.parseLabelFiles(
        "/local/attale00/a_labels", "mouth", labeledImages, cutoffSeq=".png", suffix="_face0.labels"
    )

    testSet = fg.dataContainer(labs)
    roi = (50, 74, 96, 160)
    X = fg.getAllImagesFlat(path_ea, testSet.fileNames, (128, 256), roi=roi)

    # perform ICA
    if mode not in ["s", "v"]:
        ica = FastICA(n_components=100, whiten=True)
        ica.fit(X)
        meanI = np.mean(X, axis=0)
        X1 = X - meanI
        data = ica.transform(X1)
        filters = ica.components_

    elif mode in ["s", "v"]:
        W = np.load("/home/attale00/Desktop/classifiers/ica/filter1.npy")
        m = np.load("/home/attale00/Desktop/classifiers/ica/meanI1.npy")
        X1 = X - m
        data = np.dot(X1, W.T)

    for i in range(len(testSet.data)):
        testSet.data[i].extend(data[i, :])

    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

    # fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    # fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=10)
    testSet.targetNum = map(utils.mapMouthLabels2Two, testSet.target)

    rf = classifierUtils.standardRF(max_features=np.sqrt(len(testSet.data[0])), min_split=5, max_depth=40)
    if mode in ["s", "v"]:
        print "Classifying with loaded classifier"
        classifierUtils.classifyWithOld(
            path, testSet, mode, clfPath="/home/attale00/Desktop/classifiers/ica/rf128ICA_1"
        )
    elif mode in ["c"]:
        print "cross validation of data"
        print "Scores"
        # print classifierUtils.standardCrossvalidation(rf,testSet,n_jobs=5)
        # _cvDissect(testSet,rf)
        classifierUtils.dissectedCV(rf, testSet)
        print "----"

    elif mode in ["save"]:
        print "saving new classifier"
        _saveRF(testSet)
    else:
        print "not doing anything"
Example #3
0
def errorPatch():
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/mouth_img_error_multiPie/'
    
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
        
    
#    
    fileNames = labeledImages;

   
    
    
    testSet = fg.dataContainer(labs)
    
    
 
    fg.getImagePatchStat(testSet,path=path_ea,patchSize=(4,12),overlap = 2)
  
 
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)

    clfPath = '/home/attale00/Desktop/classifiers/errorpatches/rferror'
    f=file(clfPath,'r')
    print 'classifier used: '+ f.name
    clf = pickle.load(f)
    testSet.classifiedAs=clf.predict(testSet.data)
    testSet.probabilities=clf.predict_proba(testSet.data)      
    return testSet
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/mouth_img_error/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    fg_mode = 0
    size=(4,12)
    overlap=2
    #size=(40,120)
    fg.getImagePatchStat(testSet,path=path_ea,patchSize=size,overlap = overlap,mode=fg_mode)
    print 'feature vector length: {}'.format(len(testSet.data[0]))

    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('errorpatch_size_{}'.format(size[0]),'w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
    
    #fileNames = utils.getAllFiles(path_ea);
    
    
    
    
    #labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(50,74,96,160)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 8, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,'s')
    elif mode in ['c']:
        print 'cross validation of data'
        print 'Scores'
        #print classifierUtils.standardCrossvalidation(rf,testSet,n_jobs=5)
        #_cvDissect(testSet,rf)
        classifierUtils.dissectedCV(rf,testSet)        
        print '----'
       
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def multiPiePose():



    path_pose = '/local/attale00/poseLabels/multipie'
    
    poseLabel = utils.parseLabelFiles(path_pose,'rotY',labeledImages,cutoffSeq='.png',suffix='.frog')
    
    poses = [(k,np.double(v)*180/np.pi) for k,v in poseLabel.iteritems()]
    poses = sorted(poses,key=lambda s: s[1])
    return poses
def aflwPose():

    path_pose = "/local/attale00/poseLabels/aflw"

    poseLabelaflw = utils.parseLabelFiles(path_pose, "rotY", fileNames, cutoffSeq=".png", suffix=".frog")

    poses_aflw = [(k, np.double(i) * 180 / np.pi) for k, i in poseLabelaflw.iteritems()]

    poses_aflw = sorted(poses_aflw, key=lambda s: s[1])

    return poses_aflw
Example #8
0
def patches():
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/multiPIE_cropped3/'
    

   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
   
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')

    fileNames = labeledImages;
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None    
 


            
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
 
   
    
    W=np.load('/home/attale00/Desktop/classifiers/patches/filter2.npy')
    m=np.load('/home/attale00/Desktop/classifiers/patches/meanI2.npy')
    X1=X-m
    data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])

    
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)
   
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    clfPath = '/home/attale00/Desktop/classifiers/patches/rfICAHogColor'
    f=file(clfPath,'r')
    print 'classifier used: '+ f.name
    clf = pickle.load(f)
    testSet.classifiedAs=clf.predict(testSet.data)
    testSet.probabilities=clf.predict_proba(testSet.data)      
        

    
    return testSet
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/mouth_img_error/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    testSet = fg.dataContainer(labs)
    components = 150
    roi=None
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(120,40),roi=roi,resizeFactor = .5)
# 
# perform ICA
    if mode not in ['s','v']:
        ica = FastICA(n_components=components,whiten=True)
        ica.fit(X)
        meanI=np.mean(X,axis=0)
        X1=X-meanI
        data=ica.transform(X1)
        filters=ica.components_
        
    elif mode in ['s','v']:
        W=np.load('/home/attale00/Desktop/classifiers/patches/filterMP1.npy')
        m=np.load('/home/attale00/Desktop/classifiers/patches/meanIMP1.npy')
        X1=X-m
        data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])
            
    print 'feature vector length: {}'.format(len(testSet.data[0]))

    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('errorpatch_ica','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
Example #10
0
def main(nJobs = 1):

    path = '/local/attale00/GoodPose/extracted_alpha/grayScale64'
    
    fileNames = utils.getAllFiles(path);
    

    labs=utils.parseLabelFiles('/local/attale00/GoodPose'+'/mouth_labels','mouth',fileNames,cutoffSeq='_0.png',suffix='_face0.labels')
    print('-----computing Features-----')

    roi2 = (0,32,0,64)
    mouthSet = fg.dataContainer(labs)

    #load the mask for the mouth room pixels and dilate it
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)

    m=dil>0;

    #get the features
    fg.getHogFeature(mouthSet,roi2,path=path+'/',ending=None,extraMask = None)
    
    #map the string labels to numbers (required by sklearn)
    #change the mapping here for different classifiers
    mouthSet.targetNum=map(utils.mapMouthLabels2Two,mouthSet.target)
    n_estimators = 100
    min_split = 10
    max_depth = 20
    max_features = np.sqrt(len(mouthSet.data[0]))
    
    rf = classifierUtils.standardRF(max_features = max_features)
    rf2=classifierUtils.standardRF(max_features=max_features)
    
    score=classifierUtils.standardCrossvalidation(rf2,mouthSet)

    rf.fit(mouthSet.data,mouthSet.targetNum)
    
    pickle.dump(rf,open('/home/attale00/Desktop/classifiers/RandomForestMouthclassifier_12','w'))
    
    f=open('/home/attale00/Desktop/classifiers/RandomForestMouthclassifier_12.txt','w')
    f.write('Trained on aflw\n')
    f.write('Attribute: mouth' )
    f.write('Features: getHogFeature(mouthSet,roi2,path=path,ending=None,extraMask = m) on 64*64 grayScale 3 direction bins \n')
    f.write('ROI:(0,32,0,64)\n')
    f.write('labels: closed, narrow: 0, open, wideOpen: 1\n')
    f.write('CV Score: {}\n'.format(score))
    f.close()
def main(mode):
    path = '/local/attale00/AFLW_ALL'
    path_ea = path+'/color256/'
    
    fileNames = utils.getAllFiles(path_ea);
    
  
    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')

    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(88,165,150,362)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(26,9),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    print len(testSet.data)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        #classifierUtils.standardCrossvalidation(rf,testSet,n_jobs=5)
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
Example #12
0
def multiPiePose():

    

   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
        
   
    path_pose = '/local/attale00/poseLabels/multipie'
    
    poseLabel = utils.parseLabelFiles(path_pose,'rotY',labeledImages,cutoffSeq='.png',suffix='.frog')
    
    poses = np.array([np.double(i)*180/np.pi for i in poseLabel.values()])
    return poses
Example #13
0
def aflwPose():

    path_ea = '/local/attale00/AFLW_cropped/mouth_img_error/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    
    path_pose = '/local/attale00/poseLabels/aflw'
    
    poseLabelaflw = utils.parseLabelFiles(path_pose,'rotY',fileNames,cutoffSeq='.png',suffix='.frog')
    
    poses_aflw = [np.double(i)*180/np.pi for i in poseLabelaflw.values()]
    
    

    return np.array(poses_aflw)
def main(mode):
    path='/local/attale00/'
    
    allFiles = utils.getAllFiles(path+'Multi-PIE/extracted')
    
    allLabelFiles = utils.getAllFiles(path+'Multi-PIE/labels')
    #allLabelFiles =  utils.getAllFiles(path+'a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    #labs=utils.parseLabelFiles(path+'a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    
    testSet = fg.dataContainer(labs)
    
    
    roi = (0,32,0,64)
    #roi = (128,256,0,256)    
    
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None)
    fg.getColorHistogram(testSet,(50,190,110,402),path = path+'/Multi-PIE/extracted/',ending=None,colorspace='lab',range=(1.,255.0),bins = 20)
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    #testSet.targetNum = map(utils.mapGlassesLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color256/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(88,165,150,362)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(26,9),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,'v')
    elif mode in ['c']:
        print 'cross validation of data'
        _cross_validate(testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def main(mode):

    
    poses = multiPiePose()
    #poses = multiPiePoseOriginal()
    poseDict=splitByPose(poses,binmax=75, stepsize = 30)

    
    for k,v in poseDict.iteritems():
        fn = [i[0] for i in v]
    
        labs=utils.parseLabelFiles(path_label,'mouth',fn,cutoffSeq='.png',suffix='_face0.labels')
        labs=dict((lk,lv) for (lk,lv) in labs.iteritems() if not lv.startswith('narr'))
    
    
        testSet = fg.dataContainer(labs)
    
    
 
        fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 9, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

   
            
    
        testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
        rf=classifierUtils.standardRF(max_features = 30,min_split=12,max_depth=70)
 
        if mode in ['s','v']:
            print 'Classifying with loaded classifier'
            print '------------------- pose {}-----------------'.format(k)
            plt.figure()
            #obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/poseSplit/pose{}'.format(k))
            obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/mirror/rfHogMirror'.format(k))
            obj.plot(title='Mirrored,Pose: {}, ntot: {}, nOpen{}'.format(k,len(testSet.data),testSet.targetNum.count(1)))
            pickle.dump(obj,open('multiPie_mirror_aggregate{}'.format(k),'w'))
        elif mode in ['c']:
            print 'cross validation of data'
            rValues = classifierUtils.dissectedCV(rf,testSet)
            plt.title('Pose: {}, n: {}'.format(k,len(v)))
            #pickle.dump(rValues,open('patches_pose_hog_{}'.format(k),'w'))
        elif mode in ['save']:
            print 'saving new classifier'
            _saveRF(testSet,rf,identifier = k)
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    roi=None
    testSet = fg.dataContainer(labs)
    testSetMirror = fg.dataContainer(labs)
    for f in range(len(testSetMirror.fileNames)):
        testSetMirror.fileNames[f]+='M'
    
  
    orientations = 9
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

  
    fg.getHogFeature(testSetMirror,roi,path='/local/attale00/AFLW_cropped/mirrored/', ending='.png',orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8))    
    
    testSet.addContainer(testSetMirror)
  
 
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 30,min_split=12,max_depth=70)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('patches_cv_hog_{}'.format(orientations),'w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
Example #18
0
def texture():
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    testSet = fg.dataContainer(labs)    
    roi=(50,74,96,160)
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)


    W=np.load('/home/attale00/Desktop/classifiers/ica/filter1.npy')
    m=np.load('/home/attale00/Desktop/classifiers/ica/meanI1.npy')
    X1=X-m
    data=np.dot(X1,W.T)    
    
    for i in range(len(testSet.data)):
        testSet.data[i].extend(data[i,:])
    
 
  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=10)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
       
    clfPath = '/home/attale00/Desktop/classifiers/ica/rf128ICAHOGCOLOR'
    f=file(clfPath,'r')
    print 'classifier used: '+ f.name
    clf = pickle.load(f)
    testSet.classifiedAs=clf.predict(testSet.data)
    testSet.probabilities=clf.predict_proba(testSet.data)      
        

    
    return testSet
def main(mode):
    path = '/local/attale00/GoodPose'
    path_ea = path+'/pascal128/'
    
    fileNames = utils.getAllFiles(path+'/targets');
    
    
    
    
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    #roi=(88,165,150,362)
    roi=(44,84,88,168)    
    
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, cells_per_block=(8,3))
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        _cross_validate(testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
Example #20
0
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    
    poses = aflwPose()
    poseDict=splitByPose(poses,binmax=100, stepsize = 40)

    
    for k,v in poseDict.iteritems():
        fn = [i[0] for i in v]
    
        labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fn,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
        testSet = fg.dataContainer(labs)
    
    
 
        fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 9, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

   
            
    
        testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
        rf=classifierUtils.standardRF(max_features = 40,min_split=12,max_depth=70)
 
        if mode in ['s','v']:
            print 'Classifying with loaded classifier'
            _classifyWithOld(path,testSet,mode)
        elif mode in ['c']:
            print 'cross validation of data'
            rValues = classifierUtils.dissectedCV(rf,testSet)
            plt.title('Pose: {}, n: {}'.format(k,len(v)))
            #pickle.dump(rValues,open('patches_pose_hog_{}'.format(k),'w'))
        elif mode in ['save']:
            print 'saving new classifier'
            _saveRF(testSet,rf,identifier = k)
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);
#    minr = 10000;
#    for f in fileNames:
#        im = cv2.imread(path_ea+f,-1)
#        if im.shape[0]!=40 or im.shape[1]!=120:
#            print f
#            print im.shape
#        minr = minr if im.shape[0]>= minr else im.shape[0]
#    
#    print minr
#    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None
    filters = None
    meanI = None    
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;


    components = 150
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(120,40),roi=roi,resizeFactor = .5)
# 
# perform ICA
    if mode not in ['s','v']:
        ica = FastICA(n_components=components,whiten=True)
        ica.fit(X)
        meanI=np.mean(X,axis=0)
        X1=X-meanI
        data=ica.transform(X1)
        filters=ica.components_
        
    elif mode in ['s','v']:
        W=np.load('/home/attale00/Desktop/classifiers/patches/filterMP1.npy')
        m=np.load('/home/attale00/Desktop/classifiers/patches/meanIMP1.npy')
        X1=X-m
        data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])
    #orientations = 2

    #strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)
    #fg.getColorHistogram(testSet,(0,40,40,80),path=path_ea,ending='.png',colorspace='lab',bins=bins)
    #fg.getImagePatchStat(testSet,path=path_ea,patchSize=(4,12))
    #fg.getImagePatchStat(testSet,path='/local/attale00/AFLW_cropped/mouth_img_error/',patchSize=(4,12))
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 23,min_split=15,max_depth=70,n_estimators=150)
    #rf=classifierUtils.standardRF(max_features = 5,min_split=12,max_depth=45)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('patches_cv_ica_{}'.format(components),'w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main():

    logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")

    ###############################################################################
    # data
    path = "/local/attale00/AFLW_ALL/"
    path_ea = "/local/attale00/AFLW_cropped/cropped3/"

    fileNames = utils.getAllFiles(path_ea)

    labs = utils.parseLabelFiles(
        path + "/labels/labels", "mouth_opening", fileNames, cutoffSeq=".png", suffix="_face0.labels"
    )

    testSet = fg.dataContainer(labs)

    X = fg.getAllImagesFlat(path_ea, testSet.fileNames, (40, 120), roi=None)

    #
    # perform ICA
    ica = FastICA(n_components=250, whiten=True)
    ica.fit(X)
    meanI = np.mean(X, axis=0)
    X1 = X - meanI
    data = ica.transform(X1)

    for i in range(len(fileNames)):
        testSet.data[i].extend(data[i, :])

    DATA = np.zeros((len(testSet.data), len(testSet.data[0])))

    for i in range(len(testSet.data)):
        DATA[i, :] = np.array(testSet.data[i])

    testSet.targetNum = map(utils.mapMouthLabels2Two, testSet.target)

    classLabels = np.array(testSet.targetNum)

    scores = []

    parameters = {
        "n_estimators": range(90, 260, 20),
        "max_depth": range(50, 110, 20),
        "min_samples_split": range(5, 35, 5),
        "max_features": range(25, 75, 10),
        "min_samples_leaf": range(1, 10, 5),
    }

    #    parameters = {'n_estimators': range(90, 100,20),
    #                  'max_depth': range(50, 60,20),
    #                'min_samples_split':range(5,15,5),
    #                'max_features':range(25,35,10),
    #                'min_samples_leaf':range(1,10,5)}

    print "Performing grid search..."

    print "parameters:"
    pprint(parameters)

    for i in range(80, 260, 15):
        mf = i if i < 75 else 75
        parameters["max_features"] = range(10, mf, 10)
        s = GridSearchUtils.doGridSearch(data[:, 0:i], classLabels, parameters)
        temp = (s[0], s[1], i)
        scores.append(temp)

    scores_s = sorted(scores, key=lambda x: x[0])

    print scores_s

    pickle.dump(scores_s, open("/local/attale00/gridsearches/ICA_3", "w"))
Example #23
0
from sklearn.decomposition import FastICA
import numpy as np 
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(levelname)s %(message)s')


###############################################################################
#data
path = '/local/attale00/AFLW_ALL/'
path_ea = '/local/attale00/AFLW_cropped/cropped2/'
#    
fileNames = utils.getAllFiles(path_ea);


labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')



testSet = fg.dataContainer(labs)


roi=(0,37,0,115)

 
X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(37,115),roi=roi)
 
#        
# perform ICA
ica = FastICA(n_components=100,whiten=True)
ica.fit(X)
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/multiPIE_cropped3/'
    

   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=dict((k,v) for (k,v) in labs.iteritems() if not v.startswith('narr'))
 
#    
    
   
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None    



            
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
 
        
    # perform ICA
#    if mode not in ['s','v']:
#        ica = FastICA(n_components=100,whiten=True)
#        ica.fit(X)
#        meanI=np.mean(X,axis=0)
#        X1=X-meanI
#        data=ica.transform(X1)
#        filters=ica.components_
#        
#    elif mode in ['s','v']:
#        W=np.load('/home/attale00/Desktop/classifiers/thesis/filtercombined.npy')
#        m=np.load('/home/attale00/Desktop/classifiers/thesis/meancombined.npy')
#        X1=X-m
#        data=np.dot(X1,W.T)    
#    
#    for i in range(len(testSet.fileNames)):
#            testSet.data[i].extend(data[i,:])
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 9, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=40)
    #fg.getImagePatchStat(testSet,path='/local/attale00/AFLW_cropped/mouth_img_error_multiPie/',patchSize =(4,12))
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 27,min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/color_only')       
        pickle.dump(obj,open('color_only','w'))
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('patches_mp_','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(nJobs = 1):

    path = '/local/attale00/GoodPose'
    path_ea = path+'/extracted_alpha'
    path_adata = path_ea + '/a_data'
    
    filenames = utils.getAllFiles(path+'/targets');
    
    attribute = 'mouth'
    
    attribute_values = utils.parseLabelINIFile(path+'/mouth_labels/labels.ini',attribute);
    
    print('------------Attribute: \t'+attribute+' ---------------')
    for i in attribute_values:
        print('Value: \t'+i)
        
    print('----------------------------')
    print('----------parsing label files------')
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',filenames,cutoffSeq='.png',suffix='_face0.labels')
    print('-----computing Features-----')
    #make 10 bin hist for each mouth
    mouthSet = fg.getHistogram(10,(0,270,60,452),hrange=(155.,255.0),labelFileDict = labs,path = path_ea+'/grayScale/',ending='_0.png')
    
    mouthSet.targetNum = map(utils.mapMouthLabels2Two,mouthSet.target)
    fpr=[]
    tpr=[]
    fnr = []    

    for i in xrange(1,101,5):
        trainingSet,testSet = mouthSet.splitInTestAndTraining(frac=.6)
        (a,b,c)=_classify(trainingSet, testSet,n_estimators=i)
        tpr.append(a)
        fpr.append(b)
        fnr.append(c)
    
    plt.figure()
    plt.plot(range(1,101,5),tpr)
    plt.xlabel('number of trees')
    plt.ylabel('score')
    plt.title('.6 training set, max_feature 3, min_split 1' )
   
    fpr=[]
    tpr=[]
    fnr = []
    for i in xrange(1,20):
        trainingSet,testSet = mouthSet.splitInTestAndTraining(frac=i*.05)
        (a,b,c)=_classify(trainingSet, testSet,n_estimators=70)
        tpr.append(a)
        fpr.append(b)
        fnr.append(c)
    
    plt.figure()
    plt.plot(np.arange(0.05,1,0.05),tpr)
    plt.xlabel('fraction used as training set')
    plt.ylabel('score')
    plt.title('ntrees = 70, max_feature 3, min_split 1' )
    
    fpr=[]
    tpr=[]
    fnr = []
    for i in xrange(1,11):
        trainingSet,testSet = mouthSet.splitInTestAndTraining(frac=.7)
        (a,b,c)=_classify(trainingSet, testSet,n_estimators=70,max_features=i)
        tpr.append(a)
        fpr.append(b)
        fnr.append(c)
    
    plt.figure()
    plt.plot(range(1,11),tpr)
    plt.xlabel('number of features used')
    plt.ylabel('score')
    plt.title('ntrees = 70,frac=.7 min_split 1' )
    
    
    plt.show()
    
    #classifier
    #linSVM = svm.SVC(kernel = 'linear',C=1)
    
    #this takes forever: check if that can be true
    #scoresLinSVM = cross_validation.cross_val_score(linSVM,data,y=targetNum,n_jobs=-1,verbose = 1)
    
    #implement random forest classifier with verbosity level
    
    #scoresRF = cross_validation.cross_val_score(rf,mouthSet.data,y=mouthSet.targetNum,n_jobs=nJobs,verbose = 1)
    #print(scoresRF)    
    return
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);
#    minr = 10000;
#    for f in fileNames:
#        im = cv2.imread(path_ea+f,-1)
#        if im.shape[0]!=40 or im.shape[1]!=120:
#            print f
#            print im.shape
#        minr = minr if im.shape[0]>= minr else im.shape[0]
#    
#    print minr
#    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    roi=(0,37,0,115)
    roi=None    
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;


            
 
    #X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
   
# perform ICA 
    names_open = []
    names_closed = []
    for i,f in enumerate(testSet.fileNames):
        if testSet.targetNum[i] == 0:
            names_closed.append(f)
        elif testSet.targetNum[i] == 1:
            names_open.append(f)
    Xopen = fg.getAllImagesFlat(path_ea,names_open,(40,120))
    XClosed = fg.getAllImagesFlat(path_ea,names_closed,(40,120))
    
    if mode not in ['s','v']:
        icaopen = FastICA(n_components=100,whiten=True)
        icaopen.fit(Xopen)
        meanIopen=np.mean(Xopen,axis=0)
        X1open=Xopen-meanIopen
        dataopen=icaopen.transform(X1open)
        filtersopen=icaopen.components_
        plottingUtils.showICAComponents(filtersopen,(40,120),4,4)
        icaclosed = FastICA(n_components=100,whiten=True)
        icaclosed.fit(XClosed)
        meanIclosed=np.mean(XClosed,axis=0)
        X1closed=XClosed-meanIclosed
        dataclosed=icaclosed.transform(X1closed)
        filtersclosed=icaclosed.components_        
        plottingUtils.showICAComponents(filtersclosed,(40,120),4,4)
        
        plt.show()
        
    elif mode in ['s','v']:
        W=np.load('/home/attale00/Desktop/classifiers/patches/filterMP1.npy')
        m=np.load('/home/attale00/Desktop/classifiers/patches/meanIMP1.npy')
        X1=X-m
        data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])


    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)
    #fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)

  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    
    rf=classifierUtils.standardRF(max_features = 27,min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(mode):
    labelFiles='/local/attale00/aflw_original_labels'
    path_ea = '/local/attale00/AFLW_cropped/eyes/'
    

    fileNames = utils.getAllFiles(path_ea)
    goods = []
    for f in fileNames:
        im = cv2.imread(path_ea+f)
        if im.shape[0] == 40 and im.shape[1] ==120:
            goods.append(f)
    
    fileNames = goods
    labs=utils.parseLabelFiles(labelFiles,'glasses',fileNames,cutoffSeq='.png',suffix='.labels')
    
    
   
    
    
    testSet = fg.dataContainer(labs)
    
    


            
# 
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
# 
#        
#    # perform ICA
#    if mode not in ['s','v']:
#        ica = FastICA(n_components=100,whiten=True)
#        ica.fit(X)
#        meanI=np.mean(X,axis=0)
#        X1=X-meanI
#        data=ica.transform(X1)
#        filters=ica.components_
#        
#    elif mode in ['s','v']:
#        W=np.load('/home/attale00/Desktop/classifiers/patches/filter2.npy')
#        m=np.load('/home/attale00/Desktop/classifiers/patches/meanI2.npy')
#        X1=X-m
#        data=np.dot(X1,W.T)    
#    
#    for i in range(len(testSet.fileNames)):
#            testSet.data[i].extend(data[i,:])
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 5, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)

    #testSet.targetNum=map(lambda x: 1 if x=='light' else 0,testSet.target)
   
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/color_only')       
        #pickle.dump(obj,open('color_only','w'))
    elif mode in ['c']:
        print 'cross validation of data'
        
        
        testSet.targetNum=map(lambda x: 1 if x=='1' else 0,testSet.target)
        rf=classifierUtils.standardRF(max_features = 27,min_split=5,max_depth=60,n_estimators = 500)
        rValues = classifierUtils.dissectedCV(rf,testSet)
        plt.title('No glasses against rest')
        plt.show()
        
        #pickle.dump(rValues,open('patches_mp_','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        testSet.targetNum=map(lambda x: 1 if x=='1' else 0,testSet.target)
        rf=classifierUtils.standardRF(max_features = 27,min_split=5,max_depth=60,n_estimators = 500)
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
Example #28
0
def main(mode):
    path = '/local/attale00/AFLW_ALL'
    path_ea = path+'/color128/'
    
    fileNames = utils.getAllFiles(path_ea);
    
    
    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    testSetMirror = fg.dataContainer(labs)
    for f in range(len(testSetMirror.fileNames)):
        testSetMirror.fileNames[f]+='M'
    
    
    roi=(50,74,96,160)
 
 

    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)
    Y=fg.getAllImagesFlat(path+'/mirror128/',testSet.fileNames,(128,256),roi=roi)
    Z=np.concatenate((X,Y),axis=0)
    # perform ICA
    ica = FastICA(n_components=100,whiten=True)
    ica.fit(Z)
    meanI=np.mean(Z,axis=0)
    X1=X-meanI
    Y1=Y-meanI    
    data=ica.transform(X1)
    datam=ica.transform(Y1)
    filters=ica.components_
    for i in range(len(fileNames)):
        testSet.data[i].extend(data[i,:])
        testSetMirror.data[i].extend(datam[i,:])


    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    #fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=10)

  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
    testSet.addContainer(testSetMirror)
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(nJobs = 1):

    path = '/local/attale00'
    
    fileNames = utils.getAllFiles(path+'/targets');
    
    attribute = 'mouth'
    
    attribute_values = utils.parseLabelINIFile(path+'/mouth_labels/labels.ini',attribute);
    
    print('------------Attribute: \t'+attribute+' ---------------')
    for i in attribute_values:
        print('Value: \t'+i)
        
    print('----------------------------')
    print('----------parsing label files------')
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    print('-----computing Features-----')
    #make 10 bin hist for each mouth
    #roi = (40,200,100,200)
    roi = (50,190,110,402) 
    roi2=(128,256,0,256)
    mouthSet = fg.dataContainer(labs)
    #fg.getHistogram(20,roi,hrange=(0,255),dataC = mouthSet,path = path+'/extracted/gradients/Direction/',ending='_0.png')
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    
    m=dil>0;
#    em=m[roi[0]:roi[1],roi[2]:roi[3]]
#    m= m !=True
  
    fg.getHogFeature(mouthSet,roi2,path=path+'/targets/grayScaleSmall/',ending=None,extraMask = None)
    #fg.getPixelValues(mouthSet,roi,path=path_ea+'/',ending='_0.png',mask =m, scaleFactor = 10)    
    #fg.getColorHistogram(mouthSet,roi,path=path_ea+'/',ending='_0.png',colorspace=None,range=(1.0,255.0),bins = 20)   
    mouthSet.targetNum=map(utils.mapMouthLabels2Two,mouthSet.target)
    n_estimators = range(10,180,20);
    max_features = range(2,22,2)
    max_depth = range(5,40,5)
    max_depth.append(100)
    
    min_split = range(1,20,2)
    
    score=[]
    var = []
    for n in n_estimators:    
        scoresRF = _crossValidate(mouthSet, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet.data[0])),min_split = 5)
   
        score.append(scoresRF.mean())
        var.append(scoresRF.std())
        
    print scoresRF
    plt.errorbar(n_estimators,score,yerr=var)
    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
    
#    mouthSet2 = fg.dataContainer(labs)
#    roi=(256,512,0,512)
#    fg.getColorHistogram(mouthSet2,roi,path=path+'/targets/',ending=None,colorspace='lab',range=(1.,255.0),bins = 20)
#    mouthSet2.targetNum=map(utils.mapMouthLabels2Two,mouthSet2.target)
#    score=[]
#    var = []
#    for n in n_estimators:    
#        scoresRF = _crossValidate(mouthSet2, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet2.data[0])),min_split = 5)
#   
#        score.append(scoresRF.mean())
#        var.append(scoresRF.std())
#        
#    print scoresRF
#    plt.errorbar(n_estimators,score,yerr=var)
#    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
    
     

#    fg.getColorHistogram(mouthSet,roi,path=path_ea+'/',ending='_0.png',colorspace='lab',range=(100.0,255.0),bins = 20)
#
#    score=[]
#    var = []
#    for n in n_estimators:    
#        scoresRF = _crossValidate(mouthSet, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet.data[0])),min_split = 5)
#   
#        score.append(scoresRF.mean())
#        var.append(scoresRF.std())
#        
#    print scoresRF
#    plt.errorbar(n_estimators,score,yerr=var)
#    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
#    plt.legend(['HOG','LAB','HOG+LAB'])
#    plt.title('20bins')
    
    plt.show()        
    
    #classifier
    #linSVM = svm.SVC(kernel = 'linear',C=1)
    
    #this takes forever: check if that can be true
    #scoresLinSVM = cross_validation.cross_val_score(linSVM,data,y=targetNum,n_jobs=-1,verbose = 1)
    
    #implement random forest classifier with verbosity level
#    roi_narrow=(60,160,130,382)
#    extraMask = np.load('/home/attale00/Desktop/emptyMouthMask.npy')
#    
#    fg.getMeanAndVariance(roi_narrow,mouthSet,path_ea+'/',extraMask = extraMask,ending='_0.png')
#    scoresRF = _crossValidate(mouthSet,max_features = 13)
#    print 'Orientation and mean and cov' +str(scoresRF)    
    return
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    testSet = fg.dataContainer(labs)    
    roi=(50,74,96,160)
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)
#
#    
#    # perform ICA
#    if mode not in ['s','v']:
#        ica = FastICA(n_components=50,whiten=True)
#        ica.fit(X)
#        meanI=np.mean(X,axis=0)
#        X1=X-meanI
#        data=ica.transform(X1)
#        filters=ica.components_
#        
#    elif mode in ['s','v']:
#        W=np.load('/home/attale00/Desktop/classifiers/thesis/filter3.npy')
#        m=np.load('/home/attale00/Desktop/classifiers/thesis/meanI3.npy')
#        X1=X-m
#        data=np.dot(X1,W.T)    
#    
#    for i in range(len(testSet.data)):
#        testSet.data[i].extend(data[i,:])
###    
#    
#   
#
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getPoseLabel(testSet,pathToPoseFiles='/local/attale00/poseLabels/multipie/')    
    #fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=40)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/texture/hog_pose')
        pickle.dump(obj,open('hog_pose','w'))
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('texture_mp_','w'))
       
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'