def __init__(self): self.yolo_anchors_path = yaml_cfg.get('yolo_anchors_path') self.yolo_model_path = yaml_cfg.get('yolo_model_path') self.yolo_classes_path = yaml_cfg.get('yolo_classes_path') self.usr_model_type = yaml_cfg.get('usr_model_type') if self.usr_model_type == 'yolo': logger.info("use yolo model in human detecting") self.yolo = YOLO(self.yolo_anchors_path, self.yolo_model_path, self.yolo_classes_path) else: logger.info("use pose model in human detecting") w, h = networks.model_wh(yaml_cfg.get('pose_image_size')) self.poseEstimator = TfPoseEstimator( networks.get_graph_path('cmu'), target_size=(w, h))
def mod_ins_stuff(self, empe_inf): mnt_tp_cd = empe_inf[constants.cons_request_mnt_tp_cd] tmp_emb = None if mnt_tp_cd == constants.cons_mnt_tp_cd_delete: self.__mod_ins_stuff(mnt_tp_cd, empe_inf[constants.cons_request_ccbins_id], empe_inf[constants.cons_request_empe_id], empe_inf[constants.cons_request_usr_nm], None) else: # 将图象由OpenCv的BGR转成RGB格式,wuyunzhen.zh, 20190309 img = cv.cvtColor(empe_inf[constants.cons_request_img], cv.COLOR_BGR2RGB) tmp_embs = self.get_embedding(img) if len(tmp_embs) > 0: tmp_emb = tmp_embs[0] self.__mod_ins_stuff( mnt_tp_cd, empe_inf[constants.cons_request_ccbins_id], empe_inf[constants.cons_request_empe_id], empe_inf[constants.cons_request_usr_nm], tmp_emb) # 将图片保存到磁盘文件 img_path = yaml_cfg.get('ccbins_stuff_pcs_imgs') + "/" + empe_inf[constants.cons_request_ccbins_id] + '_' + \ empe_inf[constants.cons_request_empe_id] + '_' + empe_inf[constants.cons_request_usr_nm] + ".jpg" cv.imwrite(img_path, empe_inf[constants.cons_request_img]) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021011) return tmp_emb
def humanDetect(self, req_data, cv_image_list, path_flag): results = [] write_dbs = [] cur_date = time.strftime('%Y%m%d', time.localtime(time.time())) ccbins_id = req_data.get(constants.cons_request_ccbins_id) sys_evt_trace_id = req_data.get(constants.cons_request_trace_id) folder_path = yaml_cfg.get('illegal_image_path') + "/" + cur_date +'/'+ ccbins_id save_all_file = yaml_cfg.get('save_all_file') if not os.path.exists(folder_path): os.makedirs(folder_path) folder_path = os.path.abspath(folder_path) for i in range(0, len(cv_image_list)): face_count = 0 #人数、非法人数为-1,表示该项无须校验 human_count = -1 un_num = -1 valid_stff_list = [] for type in req_data.get(constants.cons_request_model_type): if type == constants.cons_mdtp_human_detect: # 人数检测 human_count = self.humanCountSrv.humanCount(cv_image_list[i]) elif type == constants.cons_mdtp_human_recg: # 非法入侵 face_count, un_num, valid_stff_list = self.humanRecognizeSrv.get_invalid_face(cv_image_list[i], ccbins_id) results.append([human_count, face_count, un_num, valid_stff_list]) detect_alarm_flag, recognize_alarm_flag = self.__computeAlarm(human_count, un_num) img_path = (req_data.get(constants.cons_request_paths)[i] if path_flag else ( folder_path + "/" + sys_evt_trace_id + "_" + str(i) + ".jpg")) if not path_flag: if detect_alarm_flag or recognize_alarm_flag or save_all_file: # 若不是通过交易带附件传图片且初步认为有告警产生,则保存图片 cv.imwrite(img_path, cv_image_list[i]) # 将结果写数据库 write_dbs.append([human_count, un_num, valid_stff_list, img_path, detect_alarm_flag, recognize_alarm_flag]) oprt_rcd_dao.add_records(write_dbs, req_data) return results
def __init__(self): # 扣人脸模型初始化 self.minsize = yaml_cfg.get('facenet_min_size') # minimum size of face # three steps's threshold self.threshold = list( map(float, yaml_cfg.get('facenet_threshold').split(','))) self.factor = float(yaml_cfg.get('facenet_factor')) # scale factor self.margin = 44 self.image_size = int(yaml_cfg.get('facenet_image_size')) self.graph_find = tf.Graph() self.facenet_confidence_level = float( yaml_cfg.get('facenet_confidence_level')) self.facenet_ingore_small = yaml_cfg.get('facenet_ingore_small') self.head_pitch_angle = 35 # 人员点头最大角度 self.head_yaw_angle = 35 # 人员侧脸最大角度 self.head_pitch_yaw_angle = 60 # 人员点后/侧脸角度和最大值 self.face_color_percent = 0.6 # 符合人脸颜色的像素占整个人脸区域的比重最小值 with self.graph_find.as_default(): # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3) # self.sess_find = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) self.sess_find = tf.Session() with self.sess_find.as_default(): self.pnet, self.rnet, self.onet = detect_face.create_mtcnn( self.sess_find, None) # 识别人脸模型初始化 self.graph_recognize = tf.Graph() with self.graph_recognize.as_default(): # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3) # self.sess_recognize = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) self.sess_recognize = tf.Session() with self.sess_recognize.as_default(): # Load the model # facenet.load_model("20170512-110547") saver = tf.train.import_meta_graph( "apps/humanDetectApp/models/facenet/20170512-110547/model-20170512-110547.meta" ) saver.restore( tf.get_default_session(), "apps/humanDetectApp/models/facenet/20170512-110547/model-20170512-110547.ckpt-250000" ) # saver = tf.train.import_meta_graph( # "apps/humanDetectApp/models/facenet/20180402-114759/model-20180402-114759.meta") # saver.restore(tf.get_default_session(), # "apps/humanDetectApp/models/facenet/20180402-114759/model-20180402-114759.ckpt-275") # Get input and output tensors self.images_placeholder = tf.get_default_graph( ).get_tensor_by_name("input:0") self.embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") self.phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") self.legal_ins_stuff = {} self.reg_ins_stuff()
def __reg_from_file(self): stuff_image_dir = yaml_cfg.get('ccbins_stuff_imgs') list = os.listdir(stuff_image_dir) # 列出文件夹下所有的目录与文件 success = 0 for i in range(0, len(list)): path = os.path.join(stuff_image_dir, list[i]) if os.path.isfile(path): img = cv.imread(path) # cv.imwrite("src_file.jpg", img) img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # cv.imwrite("src_file_rgb.jpg", img) embs = self.get_embedding(img) if len(embs) > 0: success += 1 ccbinsId, empeId, usrNm = self.__getStuffInfoByPath(path) tmp_ccbins = [] for (key, value) in self.legal_ins_stuff.items(): if key == ccbinsId: tmp_ccbins = value if tmp_ccbins: tmp_ccbins.append({ constants.cons_request_empe_id: empeId, constants.cons_request_usr_nm: usrNm, constants.cons_emb: embs[0] }) else: self.legal_ins_stuff.update({ ccbinsId: [{ constants.cons_request_empe_id: empeId, constants.cons_request_usr_nm: usrNm, constants.cons_emb: embs[0] }] }) else: logger.error("图片中没有人脸, 路径:" + path) # print(self.legal_ins_stuff) logger.info("从配置文件注册人脸成功,人脸数: " + str(len(list)) + ",成功数量:" + str(success))
# coding=utf-8 import apps.humanDetectApp.models.pose_estimation.src.networks as networks from apps.humanDetectApp.models.pose_estimation.src.estimator import TfPoseEstimator from networks.yolo.yolo3_predict import YOLO from PIL import Image from common import addlog, ai_print import tensorflow as tf import logging.config from keras import backend as K from apps.humanDetectApp.human_detect_config import yaml_cfg logger = logging.getLogger() config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = float( yaml_cfg.get('per_process_gpu_memory_fraction')) session = tf.Session(config=config) K.set_session(session) class HumanCountSrv(object): def __init__(self): self.yolo_anchors_path = yaml_cfg.get('yolo_anchors_path') self.yolo_model_path = yaml_cfg.get('yolo_model_path') self.yolo_classes_path = yaml_cfg.get('yolo_classes_path') self.usr_model_type = yaml_cfg.get('usr_model_type') if self.usr_model_type == 'yolo': logger.info("use yolo model in human detecting") self.yolo = YOLO(self.yolo_anchors_path, self.yolo_model_path, self.yolo_classes_path) else: logger.info("use pose model in human detecting")
def __computeAlarm(self, human_count, un_num): legal_stuff_num = yaml_cfg.get('legal_stuff_num') return (human_count != -1 and human_count != 0 and human_count < legal_stuff_num), un_num > 0
def post(self): orgName = get_argument("orgName", required=True, help='所属机构不能为空') deviceName = get_argument("deviceName", required=True, help='所属设备不能为空') alarmType = get_argument("alarmType", required=True, help='报警类型不能为空') alarmLevel = get_argument("alarmLevel", required=True, help='报警等级不能为空') alarmTime = get_argument("alarmTime", required=True, help='报警时间不能为空') remark = get_argument("remark", required=False) # 转回img img = pickle.loads(eval(remark)) # json_data = request.form['json_data'] # dict_data = json.loads(json_data) # orgName =dict_data.get('orgName',-1) # deviceName =dict_data.get('deviceName',-1) # alarmType =dict_data.get('alarmType') # alarmLevel =dict_data.get('alarmLevel') # alarmTime =dict_data.get('alarmTime') # f = request.files['myfile'] # x=f.read() # print('>>>',x) # with open('D:/image/b123.jpg', 'ab') as f: # f.write(x) # for i in f: # print('@@',type(i)) # with open('D:/image/a.jpg', 'ab') as f: # f.write(i) # 告警类别 00 对应海康 人数超限:'131643', '单人异常', '131644', '多人异常' # 告警类别 01 对应海康 非法入侵:'131585', '跨域警戒线','131588', '区域入侵', '131586', '人员进入' if alarmType == '131643': alarmType = '00' vlt_err_alrm_inf = '单人异常' elif alarmType == '131644': alarmType = '00' vlt_err_alrm_inf = '多人异常' elif alarmType == '131585': alarmType = '01' vlt_err_alrm_inf = '跨域警戒线' elif alarmType == '131588': alarmType = '01' vlt_err_alrm_inf = '区域入侵' elif alarmType == '131586': alarmType = '01' vlt_err_alrm_inf = '人员进入' elif alarmType == '150002': alarmType = '01' vlt_err_alrm_inf = '非法人员入侵' else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021012) alarm_info_hk_po = AlarmInfoHk() alarm_info_hk_po.id = gen_uuid() alarm_info_hk_po.ccbins_id = orgName alarm_info_hk_po.eqmt_id = deviceName alarm_info_hk_po.bsn_cgycd = alarmType alarm_info_hk_po.vlt_err_alrm_inf = vlt_err_alrm_inf alarm_info_hk_po.alarm_level = alarmLevel alarm_info_hk_po.stdt_tm = datetime.strptime(alarmTime, '%Y%m%d %H:%M:%S') if remark and len(remark) > 0: try: cur_date = time.strftime('%Y%m%d', time.localtime(time.time())) folder_path = yaml_cfg.get( 'illegal_image_path_hk') + "/" + cur_date + '/' + orgName if not os.path.exists(folder_path): os.makedirs(folder_path) img_path = (folder_path + "/" + alarm_info_hk_po.id + ".jpg") cv.imwrite('D:/image/ab.jpg', img) # f.save(img_path) alarm_info_hk_po.uploadfiletrgtrfullnm = img_path except Exception: logger.error("存储hk告警图片失败, " + alarmType + "," + vlt_err_alrm_inf + "," + alarmTime) alarm_hk_dao.add_record(alarm_info_hk_po) return make_response(status=HttpStatus.SUCCESS, data={"code": "success"})