def testCam(faceXMLpath): #faceXMLpath = "/home/bolei/code/opencv-2.4.7/data/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) capture = myDetector.openCam() if capture: while 1: face_set, img_rectangle = retrieveCam() if face_set!=0: cv.ShowImage("result", img_rectangle)
def testImg(faceXMLpath): #faceXMLpath = "/home/bolei/code/opencv-2.4.7/data/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) for i in range(1,22): input_name = '/home/bolei/Pictures/face/' + str(i) + '.jpg' face_set, img_rectangle = myDetector.detectImg(input_name) if face_set!=0: print face_set cv.ShowImage("result", img_rectangle) cv.WaitKey()
def pipelineCam(): tagEmotion = { 0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'sad', 5: 'surprise', 6: 'neutral' } print 'load facedetector...' faceXMLpath = "/data/vision/fisher/data1/face_celebrity/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) ## reconstruct model print "loading trained deep learning model..." model = loadModel('train3040.pkl.cpu') X = model.get_input_space().make_batch_theano() Y = model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) capture = myDetector.openCam() if capture: while 1: face_set, img_rectangle = myDetector.retrieveCam() print img_rectangle if face_set != None: xx = np.array(face_set) x_500 = np.concatenate( (xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) x_input = DataPylearn2([x_500, np.ones(500)], (48, 48, 1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) print tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) print img_rectangle.shape cv.WaitKey() if cv.WaitKey(10) >= 0: break
def pipelineCam(): tagEmotion = {0:'angry',1:'disgust',2:'fear',3:'happy',4:'sad',5:'surprise',6:'neutral'} print 'load facedetector...' faceXMLpath = "/data/vision/fisher/data1/face_celebrity/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) ## reconstruct model print "loading trained deep learning model..." model = loadModel('train3040.pkl.cpu') X = model.get_input_space().make_batch_theano() Y = model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) capture = myDetector.openCam() if capture: while 1: face_set, img_rectangle = myDetector.retrieveCam() print img_rectangle if face_set!=None: xx = np.array(face_set) x_500 = np.concatenate((xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) x_input = DataPylearn2([x_500, np.ones(500)], (48,48,1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) print tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) print img_rectangle.shape cv.WaitKey() if cv.WaitKey(10) >= 0: break
def pipelineImg(): tagEmotion = {0:'angry',1:'disgust',2:'fear',3:'happy',4:'sad',5:'surprise',6:'neutral'} print 'load facedetector...' faceXMLpath = "/data/vision/fisher/data1/face_celebrity/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) #img_folder = "/scratch/face/thumbnails_features_deduped_publish/thumbnails_features_deduped_publish/bill clinton/" #img_folder = "/data/vision/billf/manifold-learning/DL/Data/pubfig/images/" img_folder = "/data/vision/fisher/data1/face_celebrity/thumbnails_features_deduped_publish/thumbnails_features_deduped_publish/bill gates/" imgs = os.listdir(img_folder) print len(imgs) ## reconstruct model print "loading trained deep learning model..." """ old way to load model FF = 'train.pkl' a=cPickle.load(open(FF)) X = a.model.get_input_space().make_batch_theano() Y = a.model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) for i in range(len(imgs)): input_name = img_folder + imgs[i] print imgs[i] if input_name.endswith('jpg'): face_set, img_rectangle = myDetector.detectImg(input_name) if face_set[0]!=0: print face_set xx = np.array(face_set) x_500 = np.concatenate((xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) from pylearn2.space import Conv2DSpace ishape = Conv2DSpace( shape = [48, 48], num_channels = 1 ) from DBL_util import DataPylearn2 x_input = DataPylearn2([x_500, np.zeros(500)], (48,48,1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) print tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) cv.WaitKey() """ #model = loadModel('train3040.pkl.cpu') model = loadModel('train322010.pkl.cpu') X = model.get_input_space().make_batch_theano() Y = model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) for i in range(len(imgs)): input_name = img_folder + imgs[i] if input_name.endswith('jpg'): face_set, img_rectangle = myDetector.detectImg(input_name) #face_set, img_rectangle = readWholeImg(input_name) if face_set!=None: xx = np.array(face_set) x_500 = np.concatenate((xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) x_input = DataPylearn2([x_500, np.zeros(500)], (48,48,1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) #print y_pred.shape #print y_pred[0] print y_pred[0] print imgs[i], tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) cv.WaitKey(0)
def pipelineImg(): tagEmotion = { 0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'sad', 5: 'surprise', 6: 'neutral' } print 'load facedetector...' faceXMLpath = "/data/vision/fisher/data1/face_celebrity/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) #img_folder = "/scratch/face/thumbnails_features_deduped_publish/thumbnails_features_deduped_publish/bill clinton/" #img_folder = "/data/vision/billf/manifold-learning/DL/Data/pubfig/images/" img_folder = "/data/vision/fisher/data1/face_celebrity/thumbnails_features_deduped_publish/thumbnails_features_deduped_publish/bill gates/" imgs = os.listdir(img_folder) print len(imgs) ## reconstruct model print "loading trained deep learning model..." """ old way to load model FF = 'train.pkl' a=cPickle.load(open(FF)) X = a.model.get_input_space().make_batch_theano() Y = a.model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) for i in range(len(imgs)): input_name = img_folder + imgs[i] print imgs[i] if input_name.endswith('jpg'): face_set, img_rectangle = myDetector.detectImg(input_name) if face_set[0]!=0: print face_set xx = np.array(face_set) x_500 = np.concatenate((xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) from pylearn2.space import Conv2DSpace ishape = Conv2DSpace( shape = [48, 48], num_channels = 1 ) from DBL_util import DataPylearn2 x_input = DataPylearn2([x_500, np.zeros(500)], (48,48,1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) print tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) cv.WaitKey() """ #model = loadModel('train3040.pkl.cpu') model = loadModel('train322010.pkl.cpu') X = model.get_input_space().make_batch_theano() Y = model.fprop(X) from theano import tensor as T y = T.argmax(Y, axis=1) from theano import function f = function([X], y) for i in range(len(imgs)): input_name = img_folder + imgs[i] if input_name.endswith('jpg'): face_set, img_rectangle = myDetector.detectImg(input_name) #face_set, img_rectangle = readWholeImg(input_name) if face_set != None: xx = np.array(face_set) x_500 = np.concatenate( (xx, np.zeros((499, xx.shape[1]), dtype=xx.dtype)), axis=0) x_input = DataPylearn2([x_500, np.zeros(500)], (48, 48, 1)) x_arg = x_input.X if X.ndim > 2: x_arg = x_input.get_topological_view(x_arg) y_pred = f(x_arg.astype(X.dtype)) #print y_pred.shape #print y_pred[0] print y_pred[0] print imgs[i], tagEmotion[y_pred[0]] cv.ShowImage("result", img_rectangle) cv.WaitKey(0)
input_name = '/home/bolei/Pictures/face/' + str(i) + '.jpg' face_set, img_rectangle = myDetector.detectImg(input_name) if face_set!=0: print face_set cv.ShowImage("result", img_rectangle) cv.WaitKey() def testCam(faceXMLpath): #faceXMLpath = "/home/bolei/code/opencv-2.4.7/data/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) capture = myDetector.openCam() if capture: while 1: face_set, img_rectangle = retrieveCam() if face_set!=0: cv.ShowImage("result", img_rectangle) if __name__ == '__main__': #testImg() # test input image #testCam() # test camera image #faceXMLpath = "/home/bolei/code/opencv-2.4.7/data/haarcascades/haarcascade_frontalface_alt.xml" faceXMLpath = "/afs/csail.mit.edu/u/b/bzhou/code/OpenCV-2.4.2/data/haarcascades/haarcascade_frontalface_alt.xml" myDetector = faceDetector(faceXMLpath) capture = myDetector.openCam() while 1: face_set, img_rectangle = myDetector.retrieveCam() cv.ShowImage("result", img_rectangle) print face_set if cv.WaitKey(10) >= 0: break