def predict_gender(name): #loading data and images image_path = source + name m = re.search('.*(?=-)', name) if m: found = m.group(0) else: found = name result = {'male': 0, 'female': 0, 'domain': found} try: rgb_image = load_image(image_path, grayscale=False) except: print('3. Doesn"t open') if (os.path.isfile(image_path)): os.remove(image_path) to_remove.append(name) return result gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') #face and gender detection faces = detect_faces(face_detection, gray_image) if (len(faces) == 0): print('no faces') to_remove.append(name) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) rgb_face = rgb_image[y1:y2, x1:x2] try: rgb_face = cv2.resize(rgb_face, (gender_target_size)) except: continue rgb_face = preprocess_input(rgb_face, False) rgb_face = np.expand_dims(rgb_face, 0) gender_prediction = gender_classifier.predict(rgb_face) gender_label_arg = np.argmax(gender_prediction) gender_text = gender_labels[gender_label_arg] if gender_text == gender_labels[0]: result['female'] += 1 else: result['male'] += 1 return result
def predict(): if request.method == "POST": # get the file from the request img_file = request.files["file"] app.logger.info(img_file) img_bytes = img_file.read() img_bytes_arr = io.BytesIO(img_bytes) app.logger.info(f"Loading image...") img_arr = inference.load_image(img_bytes_arr) app.logger.info(f"Making prediction...") _, pred, prob = inference.make_prediction(model, img_arr) pred = pred.replace("_", " ").title() app.logger.info(f"Predicted {pred} with {round(prob,2)} probability") return jsonify(food=pred, probability=str(round(prob, 3))) else: return "NIL"
def get_prediction(): image = request.files['file'] if image.filename != '': fn = os.path.join(app.config['UPLOAD_FOLDER'], image.filename + str(datetime.now().time())) image.save(fn) image = load_image(fn) res, preprocessed_image = inference(image) preprocessed_image = Image.fromarray(np.uint8(preprocessed_image * 255)).convert('RGB') buffer = BytesIO() preprocessed_image.save(buffer, format="PNG") myimage = buffer.getvalue() return jsonify(message=res, image=str(base64.b64encode(myimage))[2:-1])
def get_prediction(): image = request.files['file'] if image.filename != '': fn = os.path.join( app.config['UPLOAD_FOLDER'], image.filename + str(datetime.now().time()) ) image.save(fn) image = load_image(fn) os.remove(fn) res_image, res = run_on_image(image) res_image = Image.fromarray(np.uint8(res_image)).convert('RGB') image_height_over_width = res_image.size[1] / res_image.size[0] #res_image = res_image.resize((260, int(image_height_over_width * 260))) buffer = BytesIO() res_image.save(buffer, format="PNG") return_image = buffer.getvalue() return jsonify(message=res, image=str(base64.b64encode(return_image))[2:-1])
elif is_EPGA: EPGA_multi_model = Net(MODEL, 1, 12, 7, 5, 8, 2, False, False, None) EPGA_multi_model.load_weights(EPGA_path) else: EPA_multi_model = Net(MODEL, 1, 9, is_dropout, 7, 5, 8, 2) EPA_multi_model.load_weights(EPA_path) EPGA_multi_model.summary() emotion_window = [] gender_window = [] pose_window = [] age_window = [] # loading images rgb_image = load_image(image_path, grayscale=False) gray_image = load_image(image_path, grayscale=True) gray_image = np.squeeze(gray_image) gray_image = gray_image.astype('uint8') start = time.clock() face_cascade = cv2.CascadeClassifier( '/home/user/anaconda3/pkgs/libopencv-3.4.2-hb342d67_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml' ) faces = face_cascade.detectMultiScale(gray_image, 1.3, 5) print(np.shape(faces)) # faces = detect_faces(face_detection, gray_image) for face_coordinates in faces: x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) rgb_face = rgb_image[y1:y2, x1:x2] gray_face = gray_image[y1:y2, x1:x2]