Beispiel #1
0
def detectFacesOnScreen():
	"""Detects facial emotions of faces on the computer screen"""
	if cnn.loadModel():
		with mss.mss() as sct:
			monitor = {'top': 0, 'left': 0, 'width': 800, 'height': 900}
			while True:
				frame = np.array(sct.grab(monitor))
				coordinates,gray = d.detectFace(frame,True)
				for (x,y,w,h) in coordinates:
					face = cv2.resize(gray[y:y+h,x:x+w],d.FACE_DIMENSIONS)
					outputProcessing(frame,x,y,w,h,cnn.predictImageLables(face)[0])
				cv2.imshow('frame', frame)
				if cv2.waitKey(1) & 0xFF == ord('q'):
					cv2.destroyAllWindows()
					break
	else:
		print("Invalid Model path "+cnn.MODEL_PATH)
Beispiel #2
0
def detectFacesInWebcam():
	"""Detects facial emotions in webcam feed"""
	if cnn.loadModel():
		cap = cv2.VideoCapture(0)
		while(cap.isOpened()):
			ret, frame = cap.read()
			coordinates,gray = d.detectFace(frame,True)
			for (x,y,w,h) in coordinates:
				face = cv2.resize(gray[y:y+h,x:x+w],d.FACE_DIMENSIONS)
				outputProcessing(frame,x,y,w,h,cnn.predictImageLables(face)[0])
			cv2.imshow('frame',frame)
			if cv2.waitKey(25) & 0xFF == ord('q'):
				break
		cap.release()
		cv2.destroyAllWindows()
	else:
		print("Invalid Model path "+cnn.MODEL_PATH)
Beispiel #3
0
def detectFacesInVideo(videoPath):
	"""Plays the video in the path with facial emotion labels"""
	if cnn.loadModel():
		if os.path.exists(videoPath):
			cap = cv2.VideoCapture(videoPath)
			while(True):
				ret, frame = cap.read()
				if ret:
					coordinates,gray = d.detectFace(frame,True)
					for (x,y,w,h) in coordinates:
						face = cv2.resize(gray[y:y+h,x:x+w],d.FACE_DIMENSIONS)
						outputProcessing(frame,x,y,w,h,cnn.predictImageLables(face)[0])
					cv2.imshow('frame',frame)
					if cv2.waitKey(25) & 0xFF == ord('q'):
						break
				else:
					break
			cap.release()
			cv2.destroyAllWindows()
		else:
			print("Invalid video path "+videoPath)
	else:
		print("Invalid Model path "+cnn.MODEL_PATH)