Example #1
0
    def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))

        # for face detection
        self.detector = MTCNN()
        try:
            os.mkdir('./img')
        except OSError:
            pass

        # load model and weights
        self.img_size = 64
        self.stage_num = [3, 3, 3]
        self.lambda_local = 1
        self.lambda_d = 1

        # load pre-trained model
        self.model = SSR_net(self.img_size, self.stage_num, self.lambda_local, self.lambda_d)()
        self.model.load_weights(path)
        self.graph = tf.get_default_graph()

        logger.info('Loaded model')
Example #2
0
# import sys
# from PIL import Image
# import pygame
# from keras import backend
# import tensorflow as tf
# import matplotlib
# from matplotlib import pyplot as plt
# import matplotlib.patches as patches

# from moviepy.editor import *

# weight path
weight_file = "./assets/ssrnet_3_3_3_64_1.0_1.0.h5"

model = SSR_net(64, [3, 3, 3], 1, 1)()
model.load_weights(weight_file)

detector = MTCNN()


def draw_label(image,
               point,
               label,
               font=cv2.FONT_HERSHEY_SIMPLEX,
               font_scale=1,
               thickness=2):
    size = cv2.getTextSize(label, font, font_scale, thickness)[0]
    x, y = point
    cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0),
                  cv2.FILLED)
Example #3
0
class ModelWrapper(MAXModelWrapper):
    MODEL_META_DATA = {
        'id': 'ssrnet',
        'name': 'SSR-Net Facial Age Estimator Model',
        'description': 'SSR-Net Facial Recognition and Age Prediction model; trained using Keras on the IMDB-WIKI dataset',
        'type': 'Facial Recognition',
        'source': 'https://developer.ibm.com/exchanges/models/all/max-facial-age-estimator/',
        'license': 'MIT'
    }

    def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))

        # for face detection
        self.detector = MTCNN()
        try:
            os.mkdir('./img')
        except OSError:
            pass

        # load model and weights
        self.img_size = 64
        self.stage_num = [3, 3, 3]
        self.lambda_local = 1
        self.lambda_d = 1

        # load pre-trained model
        self.model = SSR_net(self.img_size, self.stage_num, self.lambda_local, self.lambda_d)()
        self.model.load_weights(path)
        self.graph = tf.get_default_graph()

        logger.info('Loaded model')

    def _pre_process(self, input_img):
        ad = 0.4
        img_h, img_w, _ = np.shape(input_img)

        # check image w/h > 1024
        input_img = img_resize(input_img)

        img_h, img_w, _ = np.shape(input_img)
        detected = self.detector.detect_faces(input_img)
        faces = np.empty((len(detected), self.img_size, self.img_size, 3))

        for i, d in enumerate(detected):
            if d['confidence'] > 0.85:
                x1, y1, w, h = d['box']
                x2 = x1 + w
                y2 = y1 + h
                # Normalized bbx coordinates
                d['box'] = [float(y1) / img_h, float(x1) / img_w, float(y2) / img_h, float(x2) / img_w]
                xw1 = max(int(x1 - ad * w), 0)
                yw1 = max(int(y1 - ad * h), 0)
                xw2 = min(int(x2 + ad * w), img_w - 1)
                yw2 = min(int(y2 + ad * h), img_h - 1)
                faces[i, :, :, :] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (self.img_size, self.img_size))
        return (faces, detected)

    def _predict(self, pre_x):
        faces=pre_x[0]
        with self.graph.as_default():
            predicted_ages = self.model.predict(faces)
        return (predicted_ages,pre_x[1])

    def _post_process(self,post_rst):
        predicted_ages=post_rst[0]
        detected=post_rst[1]
        pred_res = []
        for i, d in enumerate(detected):
            if d['confidence'] > 0.85:
                pre_age=predicted_ages[i].astype(int)
                pred_res.append([{'box': d['box'], 'age':pre_age}])
        return pred_res
class ModelWrapper(object):
    """Model wrapper for SavedModel format"""
    def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))

        # for face detection
        self.detector = MTCNN()
        try:
            os.mkdir('./img')
        except OSError:
            pass

        # load model and weights
        self.img_size = 64
        self.stage_num = [3, 3, 3]
        self.lambda_local = 1
        self.lambda_d = 1

        # load pre-trained model
        self.model = SSR_net(self.img_size, self.stage_num, self.lambda_local, self.lambda_d)()
        self.model.load_weights(path)

        self.graph = tf.get_default_graph()

        logger.info('Loaded model')

    def predict(self, x):
        input_img = x
        # python version
        pyFlag = ''
        if len(sys.argv) < 3:
            pyFlag = '2'  # default to use moviepy to show, this can work on python2.7 and python3.5
        elif len(sys.argv) == 3:
            pyFlag = sys.argv[2]  # python version
        else:
            print('Wrong input!')
            sys.exit()

        detected = ''  # make this not local variable
        ad = 0.4

        if pyFlag == '3':
            input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)

        img_h, img_w, _ = np.shape(input_img)
        input_img = cv2.resize(input_img, (1024, int(1024 * img_h / img_w)))
        img_h, img_w, _ = np.shape(input_img)

        detected = self.detector.detect_faces(input_img)
        faces = np.empty((len(detected), self.img_size, self.img_size, 3))

        for i, d in enumerate(detected):
            if d['confidence'] > 0.95:
                x1, y1, w, h = d['box']
                x2 = x1 + w
                y2 = y1 + h
                xw1 = max(int(x1 - ad * w), 0)
                yw1 = max(int(y1 - ad * h), 0)
                xw2 = min(int(x2 + ad * w), img_w - 1)
                yw2 = min(int(y2 + ad * h), img_h - 1)
                cv2.rectangle(input_img, (x1, y1), (x2, y2), (255, 0, 0), 2)
                # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                faces[i, :, :, :] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (self.img_size, self.img_size))

        if len(detected) > 0:
            with self.graph.as_default():
                predicted_ages = self.model.predict(faces)

        # prediction results with BBX & AGES
        pred_res = []
        for i, d in enumerate(detected):
            if d['confidence'] > 0.8:
                pre_age=predicted_ages[i].astype(int)
                pred_res.append([{'box': d['box'], 'age':pre_age}])
        return pred_res