def main():
    path='/local/attale00/'
    
    allFiles = utils.getAllFiles(path+'Multi-PIE/extracted')
    
    allLabelFiles = utils.getAllFiles(path+'Multi-PIE/db_labels')
    np.random.shuffle(allLabelFiles)
    #get the labels from the database, for each person
    sliceat=250
    labelstest=utils.parseLabelFiles(path+'Multi-PIE/db_labels','sex',allLabelFiles[0:sliceat],cutoffSeq='',suffix='')
    
    labelstraining = utils.parseLabelFiles(path+'Multi-PIE/db_labels','sex',allLabelFiles[sliceat:],cutoffSeq='',suffix='')
    #now generate the label dict for each file
    
    labsTest={}
    labsTraining={}
    for f in allFiles:
        if labelstest.has_key(f[0:3]+'.labels'):
            labsTest[f]=labelstest[f[0:3]+'.labels']
        elif labelstraining.has_key(f[0:3]+'.labels'):
            labsTraining[f]=labelstraining[f[0:3]+'.labels' ]
            
    testSet = fg.dataContainer(labsTest)
    trainingSet = fg.dataContainer(labsTraining)
    
    roi=(0,64,0,64)
    ppc=(8,8)
    cpb=(8,8)
    
    fg.getHogFeature(testSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None,pixels_per_cell=ppc,cells_per_block=cpb)
    
    fg.getHogFeature(trainingSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None,pixels_per_cell=ppc, cells_per_block=cpb)
    
    testSet.targetNum=map(utils.mapSexLabel2Two,testSet.target)
    trainingSet.targetNum = map(utils.mapSexLabel2Two,trainingSet.target)
    
    rf1=classifierUtils.standardRF(max_features=np.sqrt(len(testSet.data[0])))
    rf2=classifierUtils.standardRF(max_features=np.sqrt(len(trainingSet.data[0])))
    
    rf1.fit(testSet.data,testSet.targetNum)
    
    s=rf1.score(trainingSet.data,trainingSet.targetNum)
    trainingSet.classifiedAs=rf1.predict(trainingSet.data)
    trainingSet.hasBeenClassified=True
    classifierUtils.evaluateClassification(trainingSet,{0:'male',1:'female'})
    
    
    
    print 'Score: {}'.format(s)
    
    print '----------other way around ----\n'
    
    rf2.fit(trainingSet.data,trainingSet.targetNum)
    
    s=rf2.score(testSet.data,testSet.targetNum)
    testSet.classifiedAs=rf2.predict(testSet.data)
    testSet.hasBeenClassified=True
    classifierUtils.evaluateClassification(testSet,{0:'male',1:'female'})
    print 'Score: {}'.format(s)
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
    
    #fileNames = utils.getAllFiles(path_ea);
    
    
    
    
    #labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(50,74,96,160)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 8, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,'s')
    elif mode in ['c']:
        print 'cross validation of data'
        print 'Scores'
        #print classifierUtils.standardCrossvalidation(rf,testSet,n_jobs=5)
        #_cvDissect(testSet,rf)
        classifierUtils.dissectedCV(rf,testSet)        
        print '----'
       
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def patches():
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/multiPIE_cropped3/'
    

   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
   
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')

    fileNames = labeledImages;
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None    
 


            
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
 
   
    
    W=np.load('/home/attale00/Desktop/classifiers/patches/filter2.npy')
    m=np.load('/home/attale00/Desktop/classifiers/patches/meanI2.npy')
    X1=X-m
    data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])

    
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)
   
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    clfPath = '/home/attale00/Desktop/classifiers/patches/rfICAHogColor'
    f=file(clfPath,'r')
    print 'classifier used: '+ f.name
    clf = pickle.load(f)
    testSet.classifiedAs=clf.predict(testSet.data)
    testSet.probabilities=clf.predict_proba(testSet.data)      
        

    
    return testSet
def main(nJobs = 1):

    path = '/local/attale00/GoodPose/extracted_alpha/grayScale64'
    
    fileNames = utils.getAllFiles(path);
    

    labs=utils.parseLabelFiles('/local/attale00/GoodPose'+'/mouth_labels','mouth',fileNames,cutoffSeq='_0.png',suffix='_face0.labels')
    print('-----computing Features-----')

    roi2 = (0,32,0,64)
    mouthSet = fg.dataContainer(labs)

    #load the mask for the mouth room pixels and dilate it
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)

    m=dil>0;

    #get the features
    fg.getHogFeature(mouthSet,roi2,path=path+'/',ending=None,extraMask = None)
    
    #map the string labels to numbers (required by sklearn)
    #change the mapping here for different classifiers
    mouthSet.targetNum=map(utils.mapMouthLabels2Two,mouthSet.target)
    n_estimators = 100
    min_split = 10
    max_depth = 20
    max_features = np.sqrt(len(mouthSet.data[0]))
    
    rf = classifierUtils.standardRF(max_features = max_features)
    rf2=classifierUtils.standardRF(max_features=max_features)
    
    score=classifierUtils.standardCrossvalidation(rf2,mouthSet)

    rf.fit(mouthSet.data,mouthSet.targetNum)
    
    pickle.dump(rf,open('/home/attale00/Desktop/classifiers/RandomForestMouthclassifier_12','w'))
    
    f=open('/home/attale00/Desktop/classifiers/RandomForestMouthclassifier_12.txt','w')
    f.write('Trained on aflw\n')
    f.write('Attribute: mouth' )
    f.write('Features: getHogFeature(mouthSet,roi2,path=path,ending=None,extraMask = m) on 64*64 grayScale 3 direction bins \n')
    f.write('ROI:(0,32,0,64)\n')
    f.write('labels: closed, narrow: 0, open, wideOpen: 1\n')
    f.write('CV Score: {}\n'.format(score))
    f.close()
def main(mode):
    path = '/local/attale00/AFLW_ALL'
    path_ea = path+'/color256/'
    
    fileNames = utils.getAllFiles(path_ea);
    
  
    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')

    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(88,165,150,362)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(26,9),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    print len(testSet.data)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        #classifierUtils.standardCrossvalidation(rf,testSet,n_jobs=5)
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def main(mode):
    path='/local/attale00/'
    
    allFiles = utils.getAllFiles(path+'Multi-PIE/extracted')
    
    allLabelFiles = utils.getAllFiles(path+'Multi-PIE/labels')
    #allLabelFiles =  utils.getAllFiles(path+'a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    #labs=utils.parseLabelFiles(path+'a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    
    testSet = fg.dataContainer(labs)
    
    
    roi = (0,32,0,64)
    #roi = (128,256,0,256)    
    
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path+'Multi-PIE_grayScale64/',ending=None,extraMask = None)
    fg.getColorHistogram(testSet,(50,190,110,402),path = path+'/Multi-PIE/extracted/',ending=None,colorspace='lab',range=(1.,255.0),bins = 20)
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    #testSet.targetNum = map(utils.mapGlassesLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color256/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(88,165,150,362)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(26,9),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,'v')
    elif mode in ['c']:
        print 'cross validation of data'
        _cross_validate(testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def main(mode):

    
    poses = multiPiePose()
    #poses = multiPiePoseOriginal()
    poseDict=splitByPose(poses,binmax=75, stepsize = 30)

    
    for k,v in poseDict.iteritems():
        fn = [i[0] for i in v]
    
        labs=utils.parseLabelFiles(path_label,'mouth',fn,cutoffSeq='.png',suffix='_face0.labels')
        labs=dict((lk,lv) for (lk,lv) in labs.iteritems() if not lv.startswith('narr'))
    
    
        testSet = fg.dataContainer(labs)
    
    
 
        fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 9, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

   
            
    
        testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
        rf=classifierUtils.standardRF(max_features = 30,min_split=12,max_depth=70)
 
        if mode in ['s','v']:
            print 'Classifying with loaded classifier'
            print '------------------- pose {}-----------------'.format(k)
            plt.figure()
            #obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/poseSplit/pose{}'.format(k))
            obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/mirror/rfHogMirror'.format(k))
            obj.plot(title='Mirrored,Pose: {}, ntot: {}, nOpen{}'.format(k,len(testSet.data),testSet.targetNum.count(1)))
            pickle.dump(obj,open('multiPie_mirror_aggregate{}'.format(k),'w'))
        elif mode in ['c']:
            print 'cross validation of data'
            rValues = classifierUtils.dissectedCV(rf,testSet)
            plt.title('Pose: {}, n: {}'.format(k,len(v)))
            #pickle.dump(rValues,open('patches_pose_hog_{}'.format(k),'w'))
        elif mode in ['save']:
            print 'saving new classifier'
            _saveRF(testSet,rf,identifier = k)
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    roi=None
    testSet = fg.dataContainer(labs)
    testSetMirror = fg.dataContainer(labs)
    for f in range(len(testSetMirror.fileNames)):
        testSetMirror.fileNames[f]+='M'
    
  
    orientations = 9
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

  
    fg.getHogFeature(testSetMirror,roi,path='/local/attale00/AFLW_cropped/mirrored/', ending='.png',orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8))    
    
    testSet.addContainer(testSetMirror)
  
 
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 30,min_split=12,max_depth=70)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('patches_cv_hog_{}'.format(orientations),'w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def texture():
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    testSet = fg.dataContainer(labs)    
    roi=(50,74,96,160)
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)


    W=np.load('/home/attale00/Desktop/classifiers/ica/filter1.npy')
    m=np.load('/home/attale00/Desktop/classifiers/ica/meanI1.npy')
    X1=X-m
    data=np.dot(X1,W.T)    
    
    for i in range(len(testSet.data)):
        testSet.data[i].extend(data[i,:])
    
 
  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=10)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
       
    clfPath = '/home/attale00/Desktop/classifiers/ica/rf128ICAHOGCOLOR'
    f=file(clfPath,'r')
    print 'classifier used: '+ f.name
    clf = pickle.load(f)
    testSet.classifiedAs=clf.predict(testSet.data)
    testSet.probabilities=clf.predict_proba(testSet.data)      
        

    
    return testSet
def main(mode):
    path = '/local/attale00/GoodPose'
    path_ea = path+'/pascal128/'
    
    fileNames = utils.getAllFiles(path+'/targets');
    
    
    
    
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    #roi=(88,165,150,362)
    roi=(44,84,88,168)    
    
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    m=dil>0;

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, cells_per_block=(8,3))
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        _cross_validate(testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/mouth_img_error/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    fg_mode = 0
    size=(4,12)
    overlap=2
    #size=(40,120)
    orientations = 9
    roi=None
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

    print 'feature vector length: {}'.format(len(testSet.data[0]))

    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('errorpatch_hog','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    
    poses = aflwPose()
    poseDict=splitByPose(poses,binmax=100, stepsize = 40)

    
    for k,v in poseDict.iteritems():
        fn = [i[0] for i in v]
    
        labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fn,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
        testSet = fg.dataContainer(labs)
    
    
 
        fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 9, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

   
            
    
        testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
        rf=classifierUtils.standardRF(max_features = 40,min_split=12,max_depth=70)
 
        if mode in ['s','v']:
            print 'Classifying with loaded classifier'
            _classifyWithOld(path,testSet,mode)
        elif mode in ['c']:
            print 'cross validation of data'
            rValues = classifierUtils.dissectedCV(rf,testSet)
            plt.title('Pose: {}, n: {}'.format(k,len(v)))
            #pickle.dump(rValues,open('patches_pose_hog_{}'.format(k),'w'))
        elif mode in ['save']:
            print 'saving new classifier'
            _saveRF(testSet,rf,identifier = k)
def main(mode):
    
    path_mp = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_eamp = path_mp+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labsmp=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')    
    
    path = '/local/attale00/AFLW_ALL'
    path_ea = path+'/color128/'
    
    fileNames = utils.getAllFiles(path_ea);
    
    
    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')

    
    
    testSet = fg.dataContainer(labs)
    testSetmp = fg.dataContainer(labsmp)
    
    roi=(50,74,96,160)
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;


   
    
            
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)
    Y=fg.getAllImagesFlat(path_eamp,testSetmp.fileNames,(128,256),roi=roi)
    Z=np.concatenate((X,Y),axis=0)
#        
    # perform ICA
    ica = FastICA(n_components=100,whiten=True)
    ica.fit(Z)
    meanI=np.mean(Z,axis=0)
    X1=X-meanI
    Y1=Y-meanI
    data=ica.transform(X1)
    datamp = ica.transform(Y1)
    filters=ica.components_
    for i in range(len(testSet.fileNames)):
        testSet.data[i].extend(data[i,:])
        
    for i in range(len(testSetmp.fileNames)):
        testSetmp.data[i].extend(datamp[i,:])
    
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=10)
    
    fg.getHogFeature(testSetmp,roi,path=path_eamp,ending='.png',extraMask = None,orientations = 3, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSetmp,roi,path=path_eamp,ending='.png',colorspace='lab',bins=10)
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
    testSet.addContainer(testSetmp)
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(nJobs = 1):

    path = '/local/attale00/GoodPose'
    path_ea = path+'/extracted_alpha'
    path_adata = path_ea + '/a_data'
    
    fileNames = utils.getAllFiles(path+'/targets');
    
    attribute = 'mouth'
    
    attribute_values = utils.parseLabelINIFile(path+'/mouth_labels/labels.ini',attribute);
    
    print('------------Attribute: \t'+attribute+' ---------------')
    for i in attribute_values:
        print('Value: \t'+i)
        
    print('----------------------------')
    print('----------parsing label files------')
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    #labs=utils.parseLabelFiles(path+'/mouth_labels/labels','glasses',fileNames,cutoffSeq='.png',suffix='_face0.labels')

    print('-----computing Features-----')
    #make 10 bin hist for each mouth
    #roi = (40,200,100,200)
    roi = (50,190,110,402) 
    roi2=(0,128,0,256)
    roi=(0,64,0,128)
    #roi2=(128,256,0,256)
    mouthSet = fg.dataContainer(labs)
    #fg.getHistogram(20,roi,hrange=(0,255),dataC = mouthSet,path = path+'/extracted/gradients/Direction/',ending='_0.png')
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    
    m=dil>0;
#    em=m[roi[0]:roi[1],roi[2]:roi[3]]
#    m= m !=True
  
    fg.getHogFeature(mouthSet,roi2,path=path_ea+'/grayScale128/',ending='_0.png',extraMask = None)
    #fg.getPixelValues(mouthSet,roi,path=path_ea+'/',ending='_0.png',mask =m, scaleFactor = 10)    
    #fg.getColorHistogram(mouthSet,roi,path=path_ea+'/',ending='_0.png',colorspace=None,range=(1.0,255.0),bins = 20)   
    mouthSet.targetNum=map(utils.mapMouthLabels2Two,mouthSet.target)
    #mouthSet.targetNum=map(utils.mapGlassesLabels2Two,mouthSet.target)
    
    
    score=[]
    frac=np.arange(0.2,1.0,.05)
    for i in frac:
        trainingSet,testSet=mouthSet.splitInTestAndTraining(frac=i)
        rf=classifierUtils.standardRF(max_features = np.sqrt(len(mouthSet.data[0])))
        rf.fit(trainingSet.data,trainingSet.targetNum)
        score.append(rf.score(testSet.data,testSet.targetNum))
        testSet.hasBeenClassified=True
        testSet.classifiedAs=rf.predict(testSet.data)
        print '---------------- {} -----------'.format(i)
        classifierUtils.evaluateClassification(testSet,{0:'closed',1:'open'})
    plt.plot(frac,score,'-*')
    plt.show()        
  
    return
def main(mode):
    labelFiles='/local/attale00/aflw_original_labels'
    path_ea = '/local/attale00/AFLW_cropped/eyes/'
    

    fileNames = utils.getAllFiles(path_ea)
    goods = []
    for f in fileNames:
        im = cv2.imread(path_ea+f)
        if im.shape[0] == 40 and im.shape[1] ==120:
            goods.append(f)
    
    fileNames = goods
    labs=utils.parseLabelFiles(labelFiles,'glasses',fileNames,cutoffSeq='.png',suffix='.labels')
    
    
   
    
    
    testSet = fg.dataContainer(labs)
    
    


            
# 
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
# 
#        
#    # perform ICA
#    if mode not in ['s','v']:
#        ica = FastICA(n_components=100,whiten=True)
#        ica.fit(X)
#        meanI=np.mean(X,axis=0)
#        X1=X-meanI
#        data=ica.transform(X1)
#        filters=ica.components_
#        
#    elif mode in ['s','v']:
#        W=np.load('/home/attale00/Desktop/classifiers/patches/filter2.npy')
#        m=np.load('/home/attale00/Desktop/classifiers/patches/meanI2.npy')
#        X1=X-m
#        data=np.dot(X1,W.T)    
#    
#    for i in range(len(testSet.fileNames)):
#            testSet.data[i].extend(data[i,:])
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    fg.getHogFeature(testSet,None,path=path_ea,ending='.png',extraMask = None,orientations = 5, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)

    #testSet.targetNum=map(lambda x: 1 if x=='light' else 0,testSet.target)
   
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/color_only')       
        #pickle.dump(obj,open('color_only','w'))
    elif mode in ['c']:
        print 'cross validation of data'
        
        
        testSet.targetNum=map(lambda x: 1 if x=='1' else 0,testSet.target)
        rf=classifierUtils.standardRF(max_features = 27,min_split=5,max_depth=60,n_estimators = 500)
        rValues = classifierUtils.dissectedCV(rf,testSet)
        plt.title('No glasses against rest')
        plt.show()
        
        #pickle.dump(rValues,open('patches_mp_','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        testSet.targetNum=map(lambda x: 1 if x=='1' else 0,testSet.target)
        rf=classifierUtils.standardRF(max_features = 27,min_split=5,max_depth=60,n_estimators = 500)
        _saveRF(testSet,rf)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/AFLW_ALL'
    path_ea = path+'/color128/'
    
    fileNames = utils.getAllFiles(path_ea);
    
    
    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
   
    roi=None
    roi=(50,74,96,160)
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)
    #Y=fg.getAllImagesFlat(path_mp,mpFiles,(128,256),roi=roi)
    #Z=np.concatenate((X,Y),axis=0)
    Z=X
        
     #perform ICA
    ica = FastICA(n_components=50,whiten=True)
    ica.fit(Z)
    meanI=np.mean(X,axis=0)
    
    X1=X-meanI
    data=ica.transform(X1)
    filters=ica.components_
    for i in range(len(fileNames)):
        testSet.data[i].extend(data[i,:])

    orientations = 4
    bins=40


    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=bins)

  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=1,max_depth=70)
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('texture_combined'.format(orientations),'w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
        
        
    return
def main(mode):
    path = '/local/attale00/extracted_pascal__4__Multi-PIE'
    path_ea = path+'/color128/'
   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
        
    testSet = fg.dataContainer(labs)    
    roi=(50,74,96,160)
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(128,256),roi=roi)
#
#    
#    # perform ICA
#    if mode not in ['s','v']:
#        ica = FastICA(n_components=50,whiten=True)
#        ica.fit(X)
#        meanI=np.mean(X,axis=0)
#        X1=X-meanI
#        data=ica.transform(X1)
#        filters=ica.components_
#        
#    elif mode in ['s','v']:
#        W=np.load('/home/attale00/Desktop/classifiers/thesis/filter3.npy')
#        m=np.load('/home/attale00/Desktop/classifiers/thesis/meanI3.npy')
#        X1=X-m
#        data=np.dot(X1,W.T)    
#    
#    for i in range(len(testSet.data)):
#        testSet.data[i].extend(data[i,:])
###    
#    
#   
#
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))

  
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 4, cells_per_block=(6,2),maskFromAlpha=False)
    fg.getPoseLabel(testSet,pathToPoseFiles='/local/attale00/poseLabels/multipie/')    
    #fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=40)    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    
    rf=classifierUtils.standardRF(max_features = np.sqrt(len(testSet.data[0])),min_split=5,max_depth=40)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        obj=classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/thesis/texture/hog_pose')
        pickle.dump(obj,open('hog_pose','w'))
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('texture_mp_','w'))
       
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);
#    minr = 10000;
#    for f in fileNames:
#        im = cv2.imread(path_ea+f,-1)
#        if im.shape[0]!=40 or im.shape[1]!=120:
#            print f
#            print im.shape
#        minr = minr if im.shape[0]>= minr else im.shape[0]
#    
#    print minr
#    
    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    testSetMirror = fg.dataContainer(labs)
    for f in range(len(testSetMirror.fileNames)):
        testSetMirror.fileNames[f]+='M'
    
    roi=None    
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;


            
# 
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
#    Y=fg.getAllImagesFlat('/local/attale00/AFLW_cropped/mirrored/',testSet.fileNames,(40,120),roi=roi)
#    Z=np.concatenate((X,Y),axis=0)
#    
#    # perform ICA
#    ica = FastICA(n_components=100,whiten=True)
#    ica.fit(Z)
#    meanI=np.mean(Z,axis=0)
#    X1=X-meanI
#    Y1=Y-meanI    
#    data=ica.transform(X1)
#    datam=ica.transform(Y1)
#    filters=ica.components_
#    for i in range(len(fileNames)):
#        testSet.data[i].extend(data[i,:])
#        testSetMirror.data[i].extend(datam[i,:])
##        
#
#
#
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    orientations = 9
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)

  
    fg.getHogFeature(testSetMirror,roi,path='/local/attale00/AFLW_cropped/mirrored/', ending='.png',orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8))    
    
    testSet.addContainer(testSetMirror)
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 17,min_split=7,max_depth=40,n_estimators=200)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/cropped3/'
#    
    fileNames = utils.getAllFiles(path_ea);

    
    labs=utils.parseLabelFiles(path+'/labels/labels','mouth_opening',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None
    filters = None
    meanI = None    


    components = 35 #100 #150
    bins=40
    orientations = 9
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
#    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(120,40),roi=roi,resizeFactor = .5)
# 
# perform ICA
    if mode not in ['s','v']:
        ica = FastICA(n_components=components,whiten=True)
        ica.fit(X)
        meanI=np.mean(X,axis=0)
        X1=X-meanI
        data=ica.transform(X1)
        filters=ica.components_
        
    elif mode in ['s','v']:
        W=np.load('/home/attale00/Desktop/classifiers/patches/filterMP1.npy')
        m=np.load('/home/attale00/Desktop/classifiers/patches/meanIMP1.npy')
        X1=X-m
        data=np.dot(X1,W.T)    
    
    for i in range(len(fileNames)):
            testSet.data[i].extend(data[i,:])
    

    #strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = orientations, cells_per_block=(3,3),pixels_per_cell=(24,8),maskFromAlpha=False)
    fg.getColorHistogram(testSet,(0,40,40,80),path=path_ea,ending='.png',colorspace='lab',bins=bins)
    #fg.getImagePatchStat(testSet,path=path_ea,patchSize=(4,12))
    #fg.getImagePatchStat(testSet,path='/local/attale00/AFLW_cropped/mouth_img_error/',patchSize=(4,12))
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 40,min_split=15,max_depth=70)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        _classifyWithOld(path,testSet,mode)
    elif mode in ['c']:
        print 'cross validation of data'
        rValues = classifierUtils.dissectedCV(rf,testSet)
        pickle.dump(rValues,open('patches_cv_combined','w'))
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(nJobs = 1):

    path = '/local/attale00'
    
    fileNames = utils.getAllFiles(path+'/targets');
    
    attribute = 'mouth'
    
    attribute_values = utils.parseLabelINIFile(path+'/mouth_labels/labels.ini',attribute);
    
    print('------------Attribute: \t'+attribute+' ---------------')
    for i in attribute_values:
        print('Value: \t'+i)
        
    print('----------------------------')
    print('----------parsing label files------')
    labs=utils.parseLabelFiles(path+'/mouth_labels','mouth',fileNames,cutoffSeq='.png',suffix='_face0.labels')
    print('-----computing Features-----')
    #make 10 bin hist for each mouth
    #roi = (40,200,100,200)
    roi = (50,190,110,402) 
    roi2=(128,256,0,256)
    mouthSet = fg.dataContainer(labs)
    #fg.getHistogram(20,roi,hrange=(0,255),dataC = mouthSet,path = path+'/extracted/gradients/Direction/',ending='_0.png')
    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
    m=cv2.resize(np.uint8(eM),(256,256));
    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    dil = cv2.dilate(m,strel)
    
    
    m=dil>0;
#    em=m[roi[0]:roi[1],roi[2]:roi[3]]
#    m= m !=True
  
    fg.getHogFeature(mouthSet,roi2,path=path+'/targets/grayScaleSmall/',ending=None,extraMask = None)
    #fg.getPixelValues(mouthSet,roi,path=path_ea+'/',ending='_0.png',mask =m, scaleFactor = 10)    
    #fg.getColorHistogram(mouthSet,roi,path=path_ea+'/',ending='_0.png',colorspace=None,range=(1.0,255.0),bins = 20)   
    mouthSet.targetNum=map(utils.mapMouthLabels2Two,mouthSet.target)
    n_estimators = range(10,180,20);
    max_features = range(2,22,2)
    max_depth = range(5,40,5)
    max_depth.append(100)
    
    min_split = range(1,20,2)
    
    score=[]
    var = []
    for n in n_estimators:    
        scoresRF = _crossValidate(mouthSet, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet.data[0])),min_split = 5)
   
        score.append(scoresRF.mean())
        var.append(scoresRF.std())
        
    print scoresRF
    plt.errorbar(n_estimators,score,yerr=var)
    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
    
#    mouthSet2 = fg.dataContainer(labs)
#    roi=(256,512,0,512)
#    fg.getColorHistogram(mouthSet2,roi,path=path+'/targets/',ending=None,colorspace='lab',range=(1.,255.0),bins = 20)
#    mouthSet2.targetNum=map(utils.mapMouthLabels2Two,mouthSet2.target)
#    score=[]
#    var = []
#    for n in n_estimators:    
#        scoresRF = _crossValidate(mouthSet2, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet2.data[0])),min_split = 5)
#   
#        score.append(scoresRF.mean())
#        var.append(scoresRF.std())
#        
#    print scoresRF
#    plt.errorbar(n_estimators,score,yerr=var)
#    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
    
     

#    fg.getColorHistogram(mouthSet,roi,path=path_ea+'/',ending='_0.png',colorspace='lab',range=(100.0,255.0),bins = 20)
#
#    score=[]
#    var = []
#    for n in n_estimators:    
#        scoresRF = _crossValidate(mouthSet, max_depth = 20,n_estimators =n ,nJobs = nJobs,max_features = np.sqrt(len(mouthSet.data[0])),min_split = 5)
#   
#        score.append(scoresRF.mean())
#        var.append(scoresRF.std())
#        
#    print scoresRF
#    plt.errorbar(n_estimators,score,yerr=var)
#    plt.xlabel('number of trees')
#    plt.ylabel('cross val score')
#    plt.legend(['HOG','LAB','HOG+LAB'])
#    plt.title('20bins')
    
    plt.show()        
    
    #classifier
    #linSVM = svm.SVC(kernel = 'linear',C=1)
    
    #this takes forever: check if that can be true
    #scoresLinSVM = cross_validation.cross_val_score(linSVM,data,y=targetNum,n_jobs=-1,verbose = 1)
    
    #implement random forest classifier with verbosity level
#    roi_narrow=(60,160,130,382)
#    extraMask = np.load('/home/attale00/Desktop/emptyMouthMask.npy')
#    
#    fg.getMeanAndVariance(roi_narrow,mouthSet,path_ea+'/',extraMask = extraMask,ending='_0.png')
#    scoresRF = _crossValidate(mouthSet,max_features = 13)
#    print 'Orientation and mean and cov' +str(scoresRF)    
    return
def main(mode):
    path = '/local/attale00/AFLW_ALL/'
    path_ea = '/local/attale00/AFLW_cropped/multiPIE_cropped3/'
    

   
    allLabelFiles =  utils.getAllFiles('/local/attale00/a_labels')
    
    labeledImages = [i[0:16]+'.png' for i in allLabelFiles]
    
    #labs=utils.parseLabelFiles(path+'/Multi-PIE/labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    labs=utils.parseLabelFiles('/local/attale00/a_labels','mouth',labeledImages,cutoffSeq='.png',suffix='_face0.labels')
    
    #labs=dict((k,v) for (k,v) in labs.iteritems() if not v.startswith('narr'))
    
#    
    
#    minr = 10000;
#    for f in fileNames:
#        im = cv2.imread(path_ea+f,-1)
#        if im.shape[0]!=40 or im.shape[1]!=120:
#            print f
#            print im.shape
#        minr = minr if im.shape[0]>= minr else im.shape[0]
#    
#    print minr
#    
    
   
    
    
    testSet = fg.dataContainer(labs)
    
    
    roi=(0,37,0,115)
    roi=None    
    #roi=(44,84,88,168)    
    
    
#    eM=np.load('/home/attale00/Desktop/mouthMask.npy')
#    m=cv2.resize(np.uint8(eM),(256,256));
#    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#    dil = cv2.dilate(m,strel)
#    
#    m=dil>0;


            
 
    X=fg.getAllImagesFlat(path_ea,testSet.fileNames,(40,120),roi=roi)
 
        
    # perform ICA
    if mode not in ['s','v']:
        ica = FastICA(n_components=100,whiten=True)
        ica.fit(X)
        meanI=np.mean(X,axis=0)
        X1=X-meanI
        data=ica.transform(X1)
        filters=ica.components_
        
    elif mode in ['s','v']:
        W=np.load('/home/attale00/Desktop/classifiers/patches/filter2.npy')
        m=np.load('/home/attale00/Desktop/classifiers/patches/meanI2.npy')
        X1=X-m
        data=np.dot(X1,W.T)    
    
    for i in range(len(testSet.fileNames)):
            testSet.data[i].extend(data[i,:])

    strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    fg.getHogFeature(testSet,roi,path=path_ea,ending='.png',extraMask = None,orientations = 5, pixels_per_cell=(24,8),cells_per_block=(3,3),maskFromAlpha=False)
    fg.getColorHistogram(testSet,roi,path=path_ea,ending='.png',colorspace='lab',bins=20)
    #fg.getImagePatchStat(testSet,path='/local/attale00/AFLW_cropped/mouth_img_error_multiPie/',patchSize =(4,12))
  
    #pca
#    n_samples, n_features = X.shape
#
#    mean_ = np.mean(X, axis=0)
#    X -= mean_
#    U, S, V = linalg.svd(X)
#    explained_variance_ = (S ** 2) / n_samples
#    explained_variance_ratio_ = (explained_variance_ /explained_variance_.sum())
#    K=V / S[:, np.newaxis] * np.sqrt(n_samples)
#    filters=K[:100]
#    data=np.dot(X,filters.T)    
    
   
            
    
    testSet.targetNum=map(utils.mapMouthLabels2Two,testSet.target)
    rf=classifierUtils.standardRF(max_features = 27,min_split=13,max_depth=40)
    #rf = svm.NuSVC()
    #rf = linear_model.SGDClassifier(loss='perceptron', eta0=1, learning_rate='constant', penalty=None)    
    if mode in ['s','v']:
        print 'Classifying with loaded classifier'
        
        classifierUtils.classifyWithOld(path,testSet,mode,clfPath = '/home/attale00/Desktop/classifiers/patches/rfICAHogColor')
    elif mode in ['c']:
        print 'cross validation of data'
        classifierUtils.dissectedCV(rf,testSet)
    elif mode in ['save']:
        print 'saving new classifier'
        _saveRF(testSet,rf,filters=filters,meanI=meanI)
    else:
        print 'not doing anything'
def main(mode):
    path = "/local/attale00/AFLW_ALL/"
    path_mirrored = "/local/attale00/AFLW_cropped/mirrored/"

    poses = aflwPose()
    binmax = 100
    stepsize = 40
    bins = range(-binmax, binmax, stepsize)
    poseDict = splitByPose(poses, bins=bins)

    testSets = {}
    # original Part
    for k, v in poseDict.iteritems():
        fn = [i[0] for i in v]

        labs = utils.parseLabelFiles(
            path + "/labels/labels", "mouth_opening", fn, cutoffSeq=".png", suffix="_face0.labels"
        )

        testSets[k] = fg.dataContainer(labs)
        fg.getHogFeature(
            testSets[k],
            None,
            path=path_ea,
            ending=".png",
            extraMask=None,
            orientations=9,
            cells_per_block=(3, 3),
            pixels_per_cell=(24, 8),
            maskFromAlpha=False,
        )

    # mirrored part
    testSetsM = {}
    nBins = len(bins)
    for k, v in poseDict.iteritems():
        binNumber = bins.index(k)
        oppositeBin = bins[nBins - 1 - binNumber]

        fn = [i[0] + "M" for i in poseDict[oppositeBin]]
        labs = utils.parseLabelFiles(
            path + "/labels/labels", "mouth_opening", fn, cutoffSeq=".pngM", suffix="_face0.labels"
        )
        testSetsM[k] = fg.dataContainer(labs)
        fg.getHogFeature(
            testSetsM[k],
            None,
            path=path_mirrored,
            ending=".png",
            orientations=9,
            cells_per_block=(3, 3),
            pixels_per_cell=(24, 8),
        )

    for k, v in poseDict.iteritems():
        testSet = testSets[k]
        testSet.addContainer(testSetsM[k])

        testSet.targetNum = map(utils.mapMouthLabels2Two, testSet.target)
        rf = classifierUtils.standardRF(max_features=40, min_split=12, max_depth=70)

        if mode in ["s", "v"]:
            print "Classifying with loaded classifier"
            _classifyWithOld(path, testSet, mode)
        elif mode in ["c"]:
            print "cross validation of data"
            rValues = classifierUtils.dissectedCV(rf, testSet)
            plt.title("Pose: {}, n: {}".format(k, len(testSet.data)))
            # pickle.dump(rValues,open('patches_pose_hog_{}'.format(k),'w'))
        elif mode in ["save"]:
            print "saving new classifier"
            _saveRF(testSet, rf, identifier=k)