def main(image_file): """ Get a uploaded file, crop faces, assign to result :param image_file: input file :return: list of human faces and their hyperparameters and original image in base64 format to display """ img_input = np.fromstring(image_file.read(), np.uint8) # read file to input stream img = cv2.imdecode(img_input, cv2.IMREAD_COLOR) # decode input stream to numpy array original_img_base64 = predict.nparray_to_base64( img) # return original image with bas64 format results = [] detected_faces = predict.face_detector(img) # detect faces in image if detected_faces: for i, face in enumerate(detected_faces): cropped_face = predict.nparray_to_base64( face) # export to tag <img> in html prob_age = predict.predict_age(face) # call model to predict age age_data = [math.floor(100 * x) for x in prob_age[0] ] # change probabilities to percentages grouped_age = np.argmax( prob_age[0]) # get the highest probability in prediction if grouped_age == 0: # split age in range displayed_age = "0 ~ 10 years old" else: displayed_age = str(grouped_age) + "0" + " ~ " + str( grouped_age) + "9" + " years old" prob_gender = predict.predict_gender( face) # call model to predict gender if prob_gender > 0.5: displayed_gender = "Male" else: displayed_gender = "Female" male_prob = math.floor(100 * prob_gender[0]) # change to percentages female_prob = 100 - male_prob results.append({ 'index_gender': 'canvas_gender_' + str(i), 'index_age': 'canvas_age_' + str(i), 'cropped_image': cropped_face, 'target_gender': displayed_gender, 'target_age': displayed_age, 'prob_male': male_prob, 'prob_female': female_prob, 'prob_age': age_data, }) else: print("Error: cannot detect any human faces in the picture") return original_img_base64, results
def main_stream(): """ Streaming results to image tag :param : :return: flow of video frame """ # ID verification previous_id = np.zeros(shape=(1, 128)) detected_id_threshold = -0.9 id_count = 0 frame_rate = 25 # adjust frame rate from camera prev = 0 alpha = 1.5 # border of face while True: time_elapsed = time.time() - prev # ret, image = cap.read() # get video frame if time_elapsed > 1. / frame_rate: prev = time.time() _, detections = predict.face_detector( image) # detect face in a picture detected, crop_face, top_left, bottom_right = nearest_standing( image, detections, alpha) content = "" if detected: vector_face = face_recognition.face_encodings(crop_face) if vector_face: age_prob = predict.predict_age_id(crop_face) gender_prob = predict.predict_gender(crop_face) text_gender = "M" if gender_prob[0][0] > 0.5 else "F" text_age = str(np.argmax(age_prob[0])) detected_id = tf.keras.losses.cosine_similarity( previous_id, vector_face[0]).numpy() if detected_id > detected_id_threshold: id_count += 1 previous_id = vector_face[0] content = "G: " + text_gender + ", R: " + text_age + ", ID: " + str( id_count) image = draw_label(image, top_left, bottom_right, content) if not ret: print("Error: failed to capture image") break yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', image)[1].tostring() + b'\r\n')
def generate_api_stream(): """ Streaming results as json :param : :return: flow of video frame as json """ # ID verification previous_id = np.zeros(shape=(1, 128)) detected_id_threshold = -0.9 id_count = 0 frame_rate = 25 # adjust framerate from camera prev = 0 alpha = 1.5 while True: time_elapsed = time.time() - prev # ret, image = cap.read() # get video frame if time_elapsed > 1. / frame_rate: prev = time.time() _, detections = predict.face_detector(image) # detect face in a picture detected, crop_face, top_left, bottom_right = streaming.nearest_standing(image, detections, alpha) content = {} if detected: vector_face = face_recognition.face_encodings(crop_face) if vector_face: age_prob = predict.predict_age_id(crop_face) gender_prob = predict.predict_gender(crop_face) text_gender = "M" if gender_prob[0][0] > 0.5 else "F" text_age = str(np.argmax(age_prob[0])) detected_id = tf.keras.losses.cosine_similarity(previous_id, vector_face[0]).numpy() if detected_id > detected_id_threshold: id_count += 1 previous_id = vector_face[0] # content = "G: " + text_gender + ", R: " + text_age + ", ID: " + str(id_count) content = { 'gender': text_gender, 'age': text_age, 'id': identification, 'time': datetime.datetime.now().strftime("%Y/%m/%d, %H:%M:%S"), 'vector': vector_face[0].tolist(), } if not ret: print("Error: failed to capture image") break yield json.dumps(content)
def capture_frame(): """ Capture one frame and predict :param : :return: json file """ global identification, previous_id # ID verification detected_id_threshold = -0.9 frame_rate = 25 # adjust framerate from camera prev = 0 alpha = 1.5 ret, image = cap.read() # get video frame _, detections = predict.face_detector(image) # detect face in a picture detected, crop_face, top_left, bottom_right = streaming.nearest_standing(image, detections, alpha) content = {} if detected: vector_face = face_recognition.face_encodings(crop_face) if vector_face: age_prob = predict.predict_age_id(crop_face) gender_prob = predict.predict_gender(crop_face) text_gender = "M" if gender_prob[0][0] > 0.5 else "F" text_age = str(np.argmax(age_prob[0])) detected_id = tf.keras.losses.cosine_similarity(previous_id, vector_face[0]).numpy() if detected_id > detected_id_threshold: identification += 1 previous_id = vector_face[0] # content = "G: " + text_gender + ", R: " + text_age + ", ID: " + str(identification) content = { 'gender': text_gender, 'age': text_age, 'id': identification, 'time': datetime.datetime.now(), 'vector': vector_face[0].tolist(), } else: pass # content is a blank dictionary print(jsonify(content)) return jsonify(content)
def generate_api_stream(): """ Streaming results as json :param : :return: flow of video frame as json and write record to database """ # set up database conn, path_to_db = setup_database() # create new database sqlite record_count = 0 # counter of record is limited in one database record_max = 100000 # maximum of records in a table # ID verification previous_id = np.zeros(shape=(1, 128)) detected_id_threshold = -0.9 id_count = 0 frame_rate = 25 # adjust framerate from camera prev = 0 # time counter alpha = 1.5 # border of faces while True: time_elapsed = time.time() - prev ret, image = cap.read() # get video frame if time_elapsed > 1. / frame_rate: prev = time.time() record_count += 1 # record is written if record_count == record_max: # reach limitation conn, path_to_db = setup_database() # setup new database _, detections = predict.face_detector( image) # detect face in a picture detected, crop_face, top_left, bottom_right = streaming.nearest_standing( image, detections, alpha) content = {} if detected: vector_face = face_recognition.face_encodings(crop_face) if vector_face: age_prob = predict.predict_age_id(crop_face) gender_prob = predict.predict_gender(crop_face) text_gender = "M" if gender_prob[0][0] > 0.5 else "F" text_age = str(np.argmax(age_prob[0])) detected_id = tf.keras.losses.cosine_similarity( previous_id, vector_face[0]).numpy() if detected_id > detected_id_threshold: id_count += 1 previous_id = vector_face[0] # save content into dictionary content = { 'gender': text_gender, 'age': text_age, 'id': id_count, 'vector': json.dumps(vector_face[0].tolist()), } # write record to database create_record(conn, (content['gender'], content['age'], content['id'], content['vector'])) if not ret: print("Error: failed to capture image") break yield json.dumps( content) # streaming dictionary to front end