def preprocess_image(src): face_rects = FaceDetector.detect_faces(src) # The original code expected only single face. # If face was found then we crop only the first region and use it below # If there is no face then try to process image raw camera image if face_rects: (x1, y1, x2, y2) = face_rects[0] src = src[y1:y2, x1:x2] # scale the image preprocessed_image = cv2.resize( src, (VideoFaceMatcher.NETWORK_WIDTH, VideoFaceMatcher.NETWORK_HEIGHT)) # convert to RGB preprocessed_image = cv2.cvtColor(preprocessed_image, cv2.COLOR_BGR2RGB) # whiten preprocessed_image = VideoFaceMatcher.whiten_image(preprocessed_image) # return the preprocessed image return preprocessed_image, face_rects
def face_detection_image(): try: # Initializes the FaceDetector with its default values detector = FaceDetector() #Run the face detection detection_result = detector.detect_faces(request.data) #Reads the numpy array to an image and corrects the color scheme img = Image.fromarray(detection_result.astype('uint8')) b, g, r = img.split() img = Image.merge("RGB", (r, g, b)) #Prepare the image to send back to the client file_object = io.BytesIO() img.save(file_object, 'PNG') file_object.seek(0) return send_file(file_object, attachment_filename='face_detection.png', mimetype='image/jpg') except Exception as error: raise error
import cv2 #import tensorflow as tf from FaceDetector import FaceDetector import matplotlib.pyplot as plt cap = cv2.VideoCapture('happiness.mov') detector = FaceDetector('haarcascade_frontalface.xml') while True: ret, frame = cap.read() if frame is None: print('None') else: faces = detector.detect_faces(frame, show_image=True) print(faces[:][:]) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = frame[y:y + h, x:x + w] cv2.imshow('frame', frame) cv2.waitKey(41)