def process_video(frames, pnet, rnet, onet, sentiment_model): crops = [] crop_idcs = [] new_frames = [] score = 100 emotion = 'NA' for i, frame in enumerate(frames): bbox, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor) frame = frame.copy() try: for box in bbox: w = box[2] - box[0] h = box[3] - box[1] # plot the box using cv2 crop_frame = frame[int(box[1]):int(box[1] + h), int(box[0]):int(box[0] + w)] if (i % 5 == 0): emotion, score = sent.Sentiment_Analysis( crop_frame, sentiment_model) frame = cv2.putText(frame, '%s %.3f%%' % (emotion, score), (int(box[0]), int(box[1] - 5)), font, fontScale, fontColor, lineType) crops.append(crop_frame) crop_idcs.append(i) frame = cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[0] + w), int(box[1] + h)), (0, 0, 255), 2) except Exception as e: print(e) new_frames.append(frame) return crops, new_frames, crop_idcs
# Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) frame_crop = frame[y:y + h, x:x + w] #, y:y+h] ## Perform Deep Learning model = sent.Transfer_learning() Emotion, percentage = sent.Sentiment_Analysis(frame_crop, model) cv2.imshow('Crop', frame_crop) print(Emotion) # Display the resulting frame cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows()