Exemple #1
0
def LoadModel(where, hyper):
    """Loads a pre-trained model with n curves located at weights = path/to/weights.hdf5"""
    sess = Session()
    with sess.as_default():
        with sess.graph.as_default():
            Ss = hyper[0, :]
            ks = hyper[3, :]
            As = hyper[4, :]
            freezeS = hyper[5, :]

            model = create_model(ks, As, Ss, freezeS)
            prms = [
                model.layers[1].A, model.layers[1].k, model.layers[1].s,
                model.layers[1].B
            ]
            model.compile(optimizer=Adam(), loss='mse')
            model.load_weights(where)

            #Extract optimized weights into a dictionary
            BroutePrms = model.layers[1].get_weights()
            prms = ExtractParams(BroutePrms, freezeS)
            return prms
    def detect_face(self, Img, image_size):
        minsize = 20
        threshold = [0.6, 0.7, 0.7]
        factor = 0.709
        margin = 44
        gpu_memory_fraction = 1.0

        print('Creating networks and loading parameters')
        with tf.Graph().as_default():
            gpu_options = GPUOptions(
                per_process_gpu_memory_fraction=gpu_memory_fraction)
            sess = Session(config=ConfigProto(gpu_options=gpu_options,
                                              log_device_placement=False))
            with sess.as_default():
                dir_model = "./align"
                pnet, rnet, onet = align.detect_face.create_mtcnn(
                    sess, dir_model)

                Img_size = np.asarray(Img.shape)[0:2]
                bounding_boxes, _ = align.detect_face.detect_face(
                    Img, minsize, pnet, rnet, onet, threshold, factor)
                faces = np.zeros(
                    (len(bounding_boxes), image_size, image_size, 3),
                    dtype="uint8")
                bb = np.zeros((len(bounding_boxes), 4), dtype=np.int32)
                for i in range(len(bounding_boxes)):
                    det = np.squeeze(bounding_boxes[i, 0:4])
                    bb[i, 0] = np.maximum(det[0] - margin / 2, 0)
                    bb[i, 1] = np.maximum(det[1] - margin / 2, 0)
                    bb[i, 2] = np.minimum(det[2] + margin / 2, Img_size[1])
                    bb[i, 3] = np.minimum(det[3] + margin / 2, Img_size[0])
                    cropped = Img[bb[i, 1]:bb[i, 3], bb[i, 0]:bb[i, 2], :]
                    img_cropped = Image.fromarray(cropped)
                    img_aligned = img_cropped.resize((image_size, image_size),
                                                     Image.BILINEAR)
                    aligned_arr = np.asarray(img_aligned)
                    faces[i, :, :, :] = cv2.cvtColor(aligned_arr,
                                                     cv2.COLOR_BGR2RGB)
        return faces, bb
Exemple #3
0
class face_utils_cls():
    def __init__(self):
        '''
        dlib库对应的关键点模型
        '''
        # import pdb
        # pdb.set_trace()
        self.path = './face_models'
        # self.face_landmark_dlib = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
        # self.face_detector_dlib = dlib.get_frontal_face_detector()

        #set sess
        '''如果使用gpu,按需分配'''
        gpu_options = GPUOptions(allow_growth=True)
        session_config = ConfigProto(allow_soft_placement=True,
                                        log_device_placement=False,
                                        gpu_options=gpu_options)
        '''
        初始化人脸特征模型, 人脸检测模型,人脸关键点模型
        '''

        self.face_feature_sess = Session(graph=tf.Graph(), config=session_config)
        self.face_detection_sess = Session(graph=tf.Graph(), config=session_config)
        self.face_landmark_sess = Session(graph=tf.Graph(), config=session_config)
        self.face_attribute_sess = Session(graph=tf.Graph(), config=session_config)

        self.ff_pb_path = self.path + "/face_recognition_model.pb"
        self.init_feature_face()

        self.detect_pb_path = self.path + "/face_detection_model.pb"
        self.init_detection_face_tf()

        self.landmark_pb_path = self.path + "/landmark.pb"
        self.init_face_landmark_tf()

        self.attribute_pb_path = self.path + "/face_attribute.pb"
        self.init_face_attribute()

    def init_feature_face(self):
        with self.face_feature_sess.as_default():
            with self.face_feature_sess.graph.as_default():
                with GFile(self.ff_pb_path, 'rb') as f:
                    graph_def = self.face_feature_sess.graph_def
                    graph_def.ParseFromString(f.read())
                    tf.import_graph_def(graph_def, name='')
                    self.ff_images_placeholder = get_default_graph().get_tensor_by_name("input:0")
                    self.ff_train_placeholder = get_default_graph().get_tensor_by_name("phase_train:0")
                    self.ff_embeddings = get_default_graph().get_tensor_by_name("embeddings:0")

    def init_detection_face_tf(self):
        with self.face_detection_sess.as_default():
            with self.face_detection_sess.graph.as_default():
                face_detect_od_graph_def = self.face_detection_sess.graph_def
                with GFile(self.detect_pb_path, 'rb') as fid:
                    serialized_graph = fid.read()
                    face_detect_od_graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(face_detect_od_graph_def, name='')
                    ops = get_default_graph().get_operations()
                    all_tensor_names = {output.name for op in ops for output in op.outputs}
                    self.detection_tensor_dict = {}
                    for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes']:
                        tensor_name = key + ':0'
                        if tensor_name in all_tensor_names:
                            self.detection_tensor_dict[key] = get_default_graph().get_tensor_by_name(
                                tensor_name)
                    self.detection_image_tensor = get_default_graph().get_tensor_by_name('image_tensor:0')


    def init_face_landmark_tf(self):

        with self.face_landmark_sess.as_default():
            with self.face_landmark_sess.graph.as_default():
                graph_def = self.face_landmark_sess.graph_def
                with GFile(self.landmark_pb_path, 'rb') as fid:
                    serialized_graph = fid.read()
                    graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(graph_def, name='')
                    self.face_landmark_tensor = get_default_graph(). \
                        get_tensor_by_name("fully_connected_9/Relu:0")



    def init_face_attribute(self):

        with self.face_attribute_sess.as_default():
            with self.face_attribute_sess.graph.as_default():
                graph_def = self.face_attribute_sess.graph_def
                with GFile(self.attribute_pb_path, 'rb') as fid:
                    serialized_graph = fid.read()
                    graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(graph_def, name='')
                    self.pred_eyeglasses = get_default_graph().get_tensor_by_name("ArgMax:0")
                    self.pred_young = get_default_graph().get_tensor_by_name("ArgMax_1:0")
                    self.pred_male = get_default_graph().get_tensor_by_name("ArgMax_2:0")
                    self.pred_smiling = get_default_graph().get_tensor_by_name("ArgMax_3:0")
                    self.face_attribute_image_tensor = get_default_graph().get_tensor_by_name("Placeholder:0")


    def detection_face_by_dlib(self, im_data):
        ##调用dlib
        pass

        sp = im_data.shape
        im_data = cv2.cvtColor(im_data, cv2.COLOR_BGR2GRAY)
        rects = self.face_detector_dlib(im_data, 1)

        if len(rects) == 0:
            return None, None, None, None

        # 只取第一个人脸
        x1 = rects[0].left() * 1.0 / sp[1]
        y1 = rects[0].top() * 1.0 / sp[0]
        x2 = rects[0].right() * 1.0 / sp[1]
        y2 = rects[0].bottom() * 1.0 / sp[0]

        '''
        #调整人脸区域
        '''
        y1 = int(max(y1 - 0.3 * (y2 - y1), 0))

        return x1, y1, x2, y2

    def detection_face_by_tf(self, im_data):
        im_data_re = cv2.resize(im_data, (256, 256))

        output_dict = self.face_detection_sess.run(self.detection_tensor_dict,
                                            feed_dict={self.detection_image_tensor:
                                                        np.expand_dims(
                                                            im_data_re, 0)})

        # all outputs are float32 numpy arrays, so convert types as appropriate
        output_dict['num_detections'] = int(output_dict['num_detections'][0])
        output_dict['detection_classes'] = output_dict[
            'detection_classes'][0].astype(np.uint8)
        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
        output_dict['detection_scores'] = output_dict['detection_scores'][0]


        for i in range(len(output_dict['detection_scores'])):
            if output_dict['detection_scores'][i] > 0.1:
                bbox = output_dict['detection_boxes'][i]
                y1 = bbox[0]
                x1 = bbox[1]
                y2 = bbox[2]
                x2 = bbox[3]
                return x1, y1, x2, y2

        return None, None, None, None

    # 图像数据标准化
    def prewhiten(self, x):
        mean = np.mean(x)
        std = np.std(x)
        std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
        y = np.multiply(np.subtract(x, mean), 1/std_adj)
        return y

    def face_feature(self, face_data):
        im_data = self.prewhiten(face_data)  # 预处理
        im_data = cv2.resize(im_data, (160, 160))
        im_data1 = np.expand_dims(im_data, axis=0)
        ##人脸特征提取
        emb1 = self.face_feature_sess.run(self.ff_embeddings,
                                    feed_dict={self.ff_images_placeholder: im_data1,
                                                self.ff_train_placeholder: False})
        return emb1


    def face_landmark_tf(self, face_data):

        print("begin ... landmark")
        pred = self.face_landmark_sess.run(self.face_landmark_tensor, {"Placeholder:0":
                                                            np.expand_dims(face_data, 0)})
        print("success ... landmark")
        pred = pred[0]
        #cv2.imwrite("0_landmark.jpg", face_data)

        return pred

    def face_attribute(self, im_data):
        [eye_glass, young, male, smiling] = self.face_attribute_sess.run(
            [self.pred_eyeglasses, self.pred_young, self.pred_male, self.pred_smiling],
            feed_dict={self.face_attribute_image_tensor: np.expand_dims(im_data, 0)})

        return eye_glass, young, male, smiling

    def load_fea_from_str(self, fea_path):
        with open(fea_path) as f:
            fea_str = f.readlines()
            f.close()
        emb2_str = fea_str[0].split(",")
        emb2 = []
        for ss in emb2_str:
            emb2.append(float(ss))
        emb2 = np.array(emb2)

        return emb2
Exemple #4
0
def Optimize(df,
             hyper,
             peak,
             lr=5e-2,
             epochs=100,
             v=1,
             weights=0,
             where='/tmp/W.hdf5'):
    """df: training data
    hyper: Matrix with hyperparameters. Contains, as rows: Ss,dev,posit,ks,As,freezeS
    Ss: x point around which curves are to be centered
    dev: allowed deviation for the center of the curves with respect to Ss
    posit: slope of the curve: slope==1 -> positive slope, slope==0 -> negative slope
    ks: Suggested k values (steepness of curve)
    As: Suggested A values (relative importance of event, by how much did mass increment with this event?)
    freezeS: Which S values are to be fixed
    lr: learning rate
    epochs: number of epochs
    v: verbosity of output while training
    weights: load pre-trained parameters to restart optimization from there
    where: /path/to/file where model is to be stored. Default is /tmp/W.hdf5"""

    sess = Session()
    with sess.as_default():
        with sess.graph.as_default():
            Ss = hyper[0, :]
            ks = hyper[3, :]
            As = hyper[4, :]
            freezeS = hyper[5, :]

            model = create_model(ks, As, Ss, freezeS)
            prms = [
                model.layers[1].A, model.layers[1].k, model.layers[1].s,
                model.layers[1].B
            ]
            model.compile(loss=CostFunction(prms, hyper, peak=peak),
                          optimizer=Adam(lr))

            if weights:  #Load weights if available
                model.load_weights(weights)

            #Callbacks
            rlr = callbacks.ReduceLROnPlateau(monitor='loss',
                                              factor=0.5,
                                              patience=10,
                                              min_delta=0.004,
                                              min_lr=2.0e-7,
                                              mode='min',
                                              verbose=v)
            ton = EndOnNaN()
            chk = callbacks.ModelCheckpoint(where,
                                            monitor='loss',
                                            save_best_only=True,
                                            mode='min',
                                            period=1)
            earlyStp = callbacks.EarlyStopping(monitor='loss',
                                               min_delta=0.00003,
                                               patience=25,
                                               verbose=1,
                                               mode='min')

            hist = model.fit(df.x,
                             df.y,
                             epochs=epochs,
                             batch_size=128,
                             verbose=v,
                             callbacks=[rlr, ton, chk, earlyStp])

            model.load_weights(where)  #Load weights (best of all epochs)

            #Extract optimized weights into a dictionary
            BroutePrms = model.layers[1].get_weights()
            prms = ExtractParams(BroutePrms, freezeS)
            return prms, min(hist.history['loss'])
from django.shortcuts import render,redirect
from django.shortcuts import get_object_or_404
from django.core.files.storage import FileSystemStorage

import numpy as np
import os
from keras.preprocessing import image
from keras.models import load_model
from  tensorflow.compat.v1 import Session
from tensorflow import Graph

model_graph = Graph()

with model_graph.as_default():
	tf_session = Session()
	with tf_session.as_default():


		# model = load_model("classify/first_one.h5")
		# Please use the below link to download model from drive because it is of large size so
		# i cannot use it here. 
		model = "https://drive.google.com/file/d/1uoa7_WWQXaahpnAB6JbqNZV6Kr9RKm2Z/view?usp=sharing"

IMG_WIDTH = 224
IMG_HEIGHT = 224

labels = {0: "Corn Gray leaf spot", 
1: "Common rust Corn Maize", 
2: "Northern Leaf Blight Corn Maize", 
3: "Healthy Maize Corn", 
4: "Early blight Potato leaf",