Ejemplo n.º 1
0
def work():
    print('in work')
    mixer.init()
    sound = mixer.Sound('alarm.wav')
    cap = cv2.VideoCapture(0)
    font = cv2.FONT_HERSHEY_COMPLEX_SMALL
    score = 0
    thicc = 2
    while (True):
        print('in treu')
        ret, frame = cap.read()
        # import pdb
        # pdb.set_trace()
        if frame is not None:
            height, width = frame.shape[:2]
            label = classify_face(frame)
            if (label == 'with_mask'):
                print("No Beep")
            else:
                sound.play()
                print("Beep")
            cv2.putText(frame, str(label), (100, height - 20), font, 1,
                        (255, 255, 255), 1, cv2.LINE_AA)
            cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
    return 'Closed'
Ejemplo n.º 2
0
import numpy as np
from pygame import mixer
import time
from label_detect import classify_face


mixer.init()
sound = mixer.Sound('alarm.wav')


#face = cv2.CascadeClassifier('/media/preeth/Data/prajna_files/Drowsiness_detection/haar_cascade_files/haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
score=0
thicc=2
#faces = face.detectMultiScale(gray,minNeighbors=5,scaleFactor=1.1,minSize=(25,25))
while(True):
    ret, frame = cap.read()
    height,width = frame.shape[:2]
    label = classify_face(frame)
    if(label == 'with_mask'):
        print("No Beep")
    else:
        sound.play()
        print("Beep")   
    cv2.putText(frame,str(label),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)
    cv2.imshow('frame',frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()
def detect_face(frameCount):
	# grab global references to the video stream, output frame, and
	# lock variables
	global vs, outputFrame, lock

	# initialize the motion detector and the total number of frames
	# read thus far
	md = Recog(accumWeight=0.1)
	total = 0

	# loop over frames from the video stream
	frame_counter = 0 
	face_frame_counter = 0
	previous_state = 0
	previous_id = 'unknown'
	counter_id = 0
	flag = False
	mainFlag = False
	flagTemp = 0
	finalFlag = True
	while True:
		ret, frame = vs.read()

		height,width = frame.shape[:2]
		label = classify_face(frame)
		if(label == 'with_mask'):
		    print("Mask detected")
		else:
		    print("No mask detected")   
		cv2.putText(frame,str(label),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)

		try:
			x = th.is_alive()
			if x == False and flagTemp == 0:
				flagTemp = 1
				frame_counter = 0 
				face_frame_counter = 0
				previous_state = 0
				previous_id = 'unknown'
				counter_id = 0
				flag = False
				mainFlag = False
		except:
			pass
		if finalFlag:

			# read the next frame from the video stream, resize it,
			# convert the frame to grayscale, and blur it
			
			frame_counter+=1
			
			msg = ""
			#frame = imutils.resize(frame, width=400)
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			gray = cv2.GaussianBlur(gray, (7, 7), 0)

			# grab the current timestamp and draw it on the frame
			timestamp = datetime.datetime.now()
			cv2.putText(frame, timestamp.strftime(
				"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

			# if the total number of frames has reached a sufficient
			# number to construct a reasonable background model, then
			# continue to process the frame
			if total > frameCount:
				# detect motion in the image
				faces = faceCascade.detectMultiScale(
					gray,
					scaleFactor=1.2,
					minNeighbors=5,
					minSize=(int(minW), int(minH)),
				)
				
				
				#print(faces)
				if len(faces)>0:
					# print("face detected")
					if previous_state == 1:
						face_frame_counter+=1
					
					previous_state = 1
				else:
					previous_state = 0
					face_frame_counter = 0
				

				if face_frame_counter>70:
					#start seq.
					print('face recognized')
					for(x,y,w,h) in faces:
						cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
						id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
						if counter_id>10:
							
							print(previous_id)
							tempstr = 'Hello '+names[id]
							finalFlag = False
							mainFlag = True
							flagTemp = 0
							a = 0
							th = threading.Thread(target = flow, args= (names[id],))
							th.start()
							#print(th.is_alive())
						
							flag = True
						
						if(confidence<100):
							
							id = names[id]
							if (id==previous_id):
								counter_id+=1
							previous_id = id
						else: 
							id = 'unknown'
							previous_id = id
							counter_id = 0
					
					#face_frame_counter = 0
				
			total += 1

			# acquire the lock, set the output frame, and release the
			# lock
		with lock:
			outputFrame = frame.copy()