def main(photo): # Instantiate face detection neural network and load pretrained weights model = FCN() model.load_weights("Keras_FCN8s_face_seg_YuvalNirkin.h5") # ~~~Process Image~~~ # def preprocess(image): # Resize image to 500x500 pixels image = cv2.resize(image, (500, 500)) # Arbitrary color scaling that makes the algorithm work better on select data image = image.astype(float) - np.array( (122.67891434, 116.66876762, 104.00698793)) # Convert image to convention required by face detection algorithm image = image[np.newaxis, :, :, ::-1] return image # Pass image through face detection algorithm im = cv2.cvtColor(cv2.imread(photo), cv2.COLOR_BGR2RGB) image_in = preprocess(im) out = model.predict(image_in) # Generate mask for face detection out_resized = cv2.resize(np.squeeze(out), (im.shape[1], im.shape[0])) out_resized_clipped = np.clip(out_resized.argmax(axis=2), 0, 1).astype(np.float64) face_mask = cv2.GaussianBlur(out_resized_clipped, (7, 7), 6) # Redness criterion of each pixel (absolute) red_absolute = 200 > im[:, :, 0] # Redness criterion of each pixel (relative) red_relative = im[:, :, 0] > (0.9 * (im[:, :, 1] + im[:, :, 2])) # Pixels must both have a high red value and relatively lower blue and green values red_spots = np.logical_and(red_absolute, red_relative) # Generate mask of red regions from face mask red_mask = np.copy(face_mask) red_mask[np.invert(red_spots)] = 0 red_mask[red_mask != 1] = 0 # Calculate redness score face_pixels = sum(face_mask.reshape(-1) > 0) red_pixels = sum(red_mask.reshape(-1) > 0) red_score = 1000 * red_pixels / face_pixels # Generate labeled image overlay = np.copy(im) overlay[red_mask == 1, 0] = 0 overlay[red_mask == 1, 1] = 0 overlay[red_mask == 1, 2] = 255 im_mask = cv2.addWeighted(im, 0.5, overlay, 0.5, 0) # Output image and redness score cv2.imwrite(photo[:-4] + '_labeled' + photo[-4:], im_mask[:, :, ::-1]) print('Redness Score: %d' % red_score)
import gradio as gr import os, sys file_folder = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, file_folder) from FCN8s_keras import FCN from PIL import Image import cv2 from drive import download_file_from_google_drive import numpy as np weights = os.path.join(file_folder, "face_seg_model_weights.h5") if not os.path.exists(weights): file_id = "1IerDF2DQqmJWqyvxYZOICJT1eThnG8WR" download_file_from_google_drive(file_id, weights) model1 = FCN() model1.load_weights(weights) def segment_face(inp): im = Image.fromarray(np.uint8(inp)) im = im.resize((500, 500)) in_ = np.array(im, dtype=np.float32) in_ = in_[:, :, ::-1] in_ -= np.array((104.00698793, 116.66876762, 122.67891434)) in_ = in_[np.newaxis, :] out = model1.predict(in_) out_resized = cv2.resize(np.squeeze(out), (inp.shape[1], inp.shape[0])) out_resized_clipped = np.clip(out_resized.argmax(axis=2), 0, 1).astype(np.float64)
#predict facial kepoints detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') dets = detector(img, 1) parts = predictor(img, dets[0]).parts() dlib_pts = [] for pt in parts: dlib_pts.append(np.array([pt.x, pt.y])) dlib_pts = np.array(dlib_pts) #mask from triangulation face_triangles = make_face_triang(dlib_pts, (H, W)) triang_mask = get_triang_mask(img.shape, face_triangles) # NN segmentation seg_model = FCN() seg_model.load_weights("Keras_FCN8s_face_seg_YuvalNirkin.h5") nn_mask = get_nn_mask(img, seg_model) K.clear_session() #get hairline from NN mask contour = find_contours(nn_mask[:, :, 0], 0.5)[0].astype(np.int16) contour = contour[np.logical_or( np.logical_and( contour[:, 0] < dlib_pts[0][1], np.abs(contour[:, 1] - dlib_pts[0][0]) <= np.abs(contour[:, 1] - dlib_pts[16][0])), np.logical_and( contour[:, 0] < dlib_pts[16][1], np.abs(contour[:, 1] - dlib_pts[0][0]) > np.abs(contour[:, 1] - dlib_pts[16][0])))]
from keras.layers import * from keras.layers.advanced_activations import LeakyReLU from keras.activations import relu from keras.initializers import RandomNormal from keras.applications import * import keras.backend as K import numpy as np import os import matplotlib.pyplot as plt import cv2 import scipy.ndimage as ndimage import time from FCN8s_keras import FCN model = FCN() model.load_weights("Keras_FCN8s_face_seg_YuvalNirkin.h5") def vgg_preprocess(im): im = cv2.resize(im, (500, 500)) in_ = np.array(im, dtype=np.float32) in_ = in_[:, :, ::-1] in_ -= np.array((104.00698793, 116.66876762, 122.67891434)) in_ = in_[np.newaxis, :] return in_ def auto_downscaling(im): w = im.shape[1] h = im.shape[0]
import gradio as gr import os, sys sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__), "utils")) from FCN8s_keras import FCN from PIL import Image import cv2 import tensorflow as tf from drive import download_file_from_google_drive import numpy as np weights = os.path.join(file_folder, "face_seg_model_weights.h5") if not os.path.exists(weights): file_id = "1IerDF2DQqmJWqyvxYZOICJT1eThnG8WR" download_file_from_google_drive(file_id, weights) model1 = FCN() model1.load_weights(weights) def segment_face(inp): im = Image.fromarray(np.uint8(inp)) im = im.resize((500, 500)) in_ = np.array(im, dtype=np.float32) in_ = in_[:, :, ::-1] in_ -= np.array((104.00698793,116.66876762,122.67891434)) in_ = in_[np.newaxis,:] out = model1.predict(in_) out_resized = cv2.resize(np.squeeze(out), (inp.shape[1], inp.shape[0])) out_resized_clipped = np.clip(out_resized.argmax(axis=2), 0, 1).astype(np.float64) result = (out_resized_clipped[:, :, np.newaxis] + 0.25)/1.25 * inp.astype(np.float64).astype(np.uint8)