def load_face_stat(self): """ 统计样本信息 """ table_dict = TableDict() # 统计采集人数 sql = "select count(1) as cc from" sql += " (select distinct fc.uid from face_collection fc, face_collection_users fcu" sql += " where fc.isdeleted = '0' and fcu.isdeleted = '0' and fc.uid = fcu.uid) f" stat_list = table_dict.list(sql) cc1 = stat_list[0]["cc"] if stat_list is not None and len( stat_list) > 0 else 0 # 统计采集样本数 sql = "select count(1) as cc from" sql += " (select fc.uid from face_collection fc, face_collection_detail fcd" sql += " where fc.isdeleted = '0' and fc.uid = fcd.uid) f" stat_list = table_dict.list(sql) cc2 = stat_list[0]["cc"] if stat_list is not None and len( stat_list) > 0 else 0 json_r = {} json_r["cc1"] = int(cc1) json_r["cc2"] = int(cc2) return json_r
def list_users_by_un_or_loginid(self, un, loginid): """ 根据用户名或账号模糊查找用户 """ table_dict = TableDict() sql = "select * from face_collection_users where isdeleted = '0'" sql1 = "" sql2 = "" dict = {} if un is not None: sql1 = "un like %(un)s" dict["un"] = "%{}%".format(un) if loginid is not None: sql2 = "loginid like %(loginid)s" dict["loginid"] = "%{}%".format(loginid) if sql1 != "" and sql2 != "": sql += " and ({} or {})".format(sql1, sql2) elif sql1 != "" and sql2 == "": sql += " and {}".format(sql1) elif sql1 == "" and sql2 != "": sql += " and {}".format(sql2) else: return [] u_list = table_dict.list(sql, dict) return u_list
def get_by_sn(self, sn): """ 取得某序号的终端机记录 """ table_dict = TableDict() sql = "select * from face_collection_camera where sn = %(sn)s" list = table_dict.list(sql, {"sn": sn}) return None if len(list) <= 0 else list[0]
def get_by_ip(self, ip): """ 取得某ip的终端机记录 """ table_dict = TableDict() sql = "select * from face_collection_camera where ip = %(ip)s" list = table_dict.list(sql, {"ip": ip}) return None if len(list) <= 0 else list[0]
def get_by_cn(self, cn): """ 取得某名称的终端机记录 """ table_dict = TableDict() sql = "select * from face_collection_camera where cn = %(cn)s" list = table_dict.list(sql, {"cn": cn}) return None if len(list) <= 0 else list[0]
def get_newest_by_uid(self, uid): """ 获取每个人的最新采集信息 """ table_dict = TableDict() sql = "select * from face_collection_detail" sql += " where id = (select max(id) from face_collection_detail where uid = %(uid)s)" u_newest_list = table_dict.list(sql, {"uid": uid}) return {} if len(u_newest_list) == 0 else u_newest_list[0]
def list_camera(self): """ 列出全部终端机 """ table_dict = TableDict() sql = "select * from face_collection_camera where isdeleted = '0' order by sn" c_list = table_dict.list(sql) for c in c_list: c["cno"] = str(c["cno"]) return c_list
def get_camera(self, id): """ 取得终端机信息 """ table_dict = TableDict() camera = table_dict.get("face_collection_camera", id) if camera is None: return {} camera["cno"] = str(camera["cno"]) return camera
def del_camera(self, id_list): """ 删除终端机 """ if id_list is None or id_list.strip() == '': return table_dict = TableDict() sql_list = [] id_lists = id_list.split(",") for id in id_lists: sql = "update face_collection_camera set isdeleted = '1' where id = '{}'".format(id) sql_list.append(sql) table_dict.batch_exec(sql_list)
def get_train_info(self): """ 得到最近一次训练的情况 """ table_dict = TableDict() sql = "select * from face_train" sql += " where id = (select max(id) from face_train where isdeleted = '0')" result_dict_list = table_dict.list(sql) if len(result_dict_list) == 0: return {} for result_dict in result_dict_list: result_dict["sdt"] = "" if result_dict["sdt"] is None else str(result_dict["sdt"]) result_dict["edt"] = "" if result_dict["edt"] is None else str(result_dict["edt"]) return result_dict_list[0]
def load_face_list3(self, cid): """ 列出某次采集的人脸信息 """ table_dict = TableDict() sql = "select * from face_collection_detail where cid = %(cid)s" u_list = table_dict.list(sql, {"cid": cid}) for u in u_list: u["dt"] = str(u["dt"]) img_path = u["uid"] + "/" + u["iid"] + ".jpg" u["img_url"] = Setup.s3_face_dir[6:] + "/" + img_path return u_list
def map_uid_un_label(self): """ 建立人员信息映射 :return: 映射表 """ uid_to_un, uid_to_label, label_to_uid = {}, {}, {} # 读取采集人员表 table_dict = TableDict() list_users = table_dict.list("select * from face_collection_users where isdeleted = '0'") for user in list_users: uid_to_un[user["uid"]] = user["un"] uid_to_label[user["uid"]] = user["label"] label_to_uid[user["label"]] = user["uid"] return uid_to_un, uid_to_label, label_to_uid
def load_face_list2(self, uid): """ 加载人脸列表,列出某个人所有记录 """ table_dict = TableDict() sql = "select * from face_collection_detail" sql += " where uid = %(uid)s order by dt desc" u_list = table_dict.list(sql, {"uid": uid}) for u in u_list: u["dt"] = str(u["dt"]) img_path = u["uid"] + "/" + u["iid"] + ".jpg" u["img_url"] = Setup.s3_face_dir[6:] + "/" + img_path return u_list
def load_recent_prediction(self, hm): """ 加载最近的识别结果 """ table_dict = TableDict() sql = "select fp.*, fcu.un as pun from face_prediction fp, face_collection_users fcu" sql += " where fp.isdeleted = '0' and fcu.isdeleted = '0' and fp.puid = fcu.uid" sql += " order by fp.dt desc limit 0, %(hm)s" p_list = table_dict.list(sql, {"hm": hm}) for p in p_list: p["dt"] = str(p["dt"]) # 待识别与匹配的图像地址 img_path1 = str(p["puid"]) + "/" + p["iniid"] + ".jpg" p["in_img_url"] = Setup.s3_face_dir[6:] + "/" + img_path1 img_path2 = str(p["puid"]) + "/" + p["miid"] + ".jpg" p["m_img_url"] = Setup.s3_face_dir[6:] + "/" + img_path2 return p_list
def filter_imgs(self, imgs, fs, uid): """ 已提取的图像不再重复提取 """ imgs_filtered, fs_filtered = [], [] # 读取数据库提取记录 table_dict = TableDict() sql = "select * from face_train_feature where isdeleted = '0' and uid = %(uid)s" fe_list = table_dict.list(sql, {"uid": uid}) # 过滤已提取的 for i, f in enumerate(fs): is_in = 0 for fe in fe_list: if fe["iid"] == f.split(".")[0]: is_in = 1 break if is_in == 0: imgs_filtered.append(imgs[i]) fs_filtered.append(f) return imgs_filtered, fs_filtered
def load_face_list1(self): """ 加载人脸列表,一个人对应一份记录 """ table_dict = TableDict() # 列出已采集的人,最近采集的排在前面 sql = "select distinct fc.uid, fcu.un from face_collection fc, face_collection_users fcu" sql += " where fc.isdeleted = '0' and fcu.isdeleted = '0' and fc.uid = fcu.uid" sql += " order by fc.dt desc" u_list = table_dict.list(sql) # 查询每个人的最新采集时间以及图像 for u in u_list: u_newest = self.get_newest_by_uid(u["uid"]) if len(u_newest) > 0: # 附加最新采集时间以及图像 u["dt"] = str(u_newest["dt"]) img_path = u["uid"] + "/" + u_newest["iid"] + ".jpg" u["img_url"] = Setup.s3_face_dir[6:] + "/" + img_path return u_list
def save_camera(self, id, cn, ip, sn, cno, tips): """ 新增或修改终端机 """ camera_cn = self.get_by_cn(cn) camera_ip = self.get_by_ip(ip) camera_sn = self.get_by_sn(sn) table_dict = TableDict() table = "face_collection_camera" id = id if id is not None else id_generator() camera = table_dict.get(table, id) if camera is None: # 确保名称、ip与序号的唯一 if camera_cn is not None: return "", "已经有名称为“{}”的终端机,不能重复!".format(cn) if camera_ip is not None: return "", "已经有ip地址为“{}”的终端机,不能重复!".format(ip) if camera_sn is not None: return "", "已经有序号为“{}”的终端机,不能重复!".format(sn) # 新增 camera = {"id": id, "cn": cn, "ip": ip, "sn": sn, "cno": cno, "tips": tips, "isdeleted": "0"} table_dict.save(table, camera) else: # 确保名称、ip与序号的唯一 if camera_cn is not None and camera_cn["id"] != id: return "", "已经有名称为“{}”的终端机,不能重复!".format(cn) if camera_ip is not None and camera_ip["id"] != id: return "", "已经有ip地址为“{}”的终端机,不能重复!".format(ip) if camera_sn is not None and camera_sn["id"] != id: return "", "已经有序号为“{}”的终端机,不能重复!".format(sn) # 修改 camera["cn"] = cn camera["ip"] = ip camera["sn"] = sn camera["cno"] = cno camera["tips"] = tips table_dict.update(table, camera) return id, ""
def save_to_ds(self, cid, pid, uid, ct, imgs, filenames): """ 保存图像到数据集 """ # 记录数据库 table_dict = TableDict() dict = { "id": cid, "pid": pid, "uid": uid, "ct": ct, "dt": now_dt_str(), "isdeleted": "0" } detail_dict_list = [] dir = "{}/{}".format(Setup.s3_face_dir, uid) if os.path.exists(dir) == False: os.makedirs(dir) for i, img in enumerate(imgs): # 保存到数据集 cv2.imwrite(os.path.join(dir, filenames[i]), img) detail_dict = { "id": filenames[i].split(".")[0], "cid": cid, "pid": pid, "uid": uid, "iid": filenames[i].split(".")[0], "dt": now_dt_str() } detail_dict_list.append(detail_dict) if len(detail_dict_list) > 0: table_dict.save("face_collection", dict) table_dict.batch_save("face_collection_detail", detail_dict_list)
def feature_extract(self, feature_extractor, batch_size, uid_to_label): """ 特征提取 """ table_dict = TableDict() tid = id_generator() name = "facenet" # 记录本次训练情况 train_dict = { "id": tid, "tt": "1", "tp": 0.0, "sdt": now_dt_str(), "isdeleted": "0" } table_dict.save("face_train", train_dict) for i, uid in enumerate(uid_to_label): imgs, fs = Train.load_imgs(uid) imgs, fs = self.filter_imgs(imgs, fs, uid) info("uid: {}, len: {}, feature extract ...".format( uid, len(imgs))) if len(imgs) == 0: info("uid: {}, len: {}, feature extract ok".format( uid, len(imgs))) continue features = feature_extractor.extract_features( imgs, batch_size=batch_size) labels = (np.ones(len(features)) * int(uid_to_label[uid])).astype( np.int32) # 特征文件存放路径 dir = os.path.join(Setup.s4_feature_dir, name) if os.path.exists(dir) == False: os.makedirs(dir) hdf5_file_path = os.path.join(dir, "{}.hdf5".format(uid)) # 类内部序号 sn = 0 # 特征文件若已存在,则读取里面数据,与今次新增的数据集合,一起写入文件 if os.path.exists(hdf5_file_path): db_exist = h5py.File(hdf5_file_path, "r") features_exist = np.copy(db_exist["imgs"]) labels_exist = np.copy(db_exist["labels"]) db_exist.close() sn += features_exist.shape[0] info("uid: {}, feature exist {}, now add ...".format( uid, features_exist.shape[0])) features_new, labels_new = [], [] features_new.extend(features_exist) features_new.extend(features) labels_new.extend(labels_exist) labels_new.extend(labels) hdf5_writer = Hdf5Writer(np.shape(features_new), hdf5_file_path, dataKey="imgs") hdf5_writer.add(features_new, labels_new) hdf5_writer.close() else: hdf5_writer = Hdf5Writer(np.shape(features), hdf5_file_path, dataKey="imgs") hdf5_writer.add(features, labels) hdf5_writer.close() # 保存提取记录到数据库 to_db_list = [] for f in fs: to_db_list.append({ "id": id_generator(), "tid": tid, "uid": uid, "label": str(uid_to_label[uid]), "iid": f.split(".")[0], "sn": sn, "dt": now_dt_str(), "isdeleted": "0" }) sn += 1 table_dict.batch_save("face_train_feature", to_db_list) # 更新训练进度 train_dict["tp"] = (i + 1) / len(uid_to_label) table_dict.update("face_train", train_dict) info("uid: {}, len: {}, feature extract ok".format(uid, len(imgs))) # 更新训练完成时间 train_dict["tp"] = 1.0 train_dict["edt"] = now_dt_str() table_dict.update("face_train", train_dict)
from db.table_dict import TableDict from main.setup import Setup from utils.utils_sys import id_generator, now_dt_str pid = "1" root_dir = Setup.s3_face_dir dirs = os.listdir(root_dir) dict_list = [] detail_dict_list = [] for dir in dirs: print(dir) cid = id_generator() uid = dir dict = {"id": cid, "pid": pid, "uid": uid, "ct": "1", "dt": now_dt_str(), "isdeleted": "0"} dict_list.append(dict) lFile = list(Path(os.path.join(root_dir, dir)).glob("*.jpg")) for file in lFile: file_name = str(file).split("\\")[-1].split(".")[0] print(file_name) detail_dict = {"id": file_name, "cid": cid, "pid": pid, "uid": uid, "iid": file_name, "dt": now_dt_str()} detail_dict_list.append(detail_dict) table_dict = TableDict() if len(detail_dict_list) > 0: table_dict.batch_save("face_collection", dict_list) table_dict.batch_save("face_collection_detail", detail_dict_list)
from pathlib import Path import cv2 from db.table_dict import TableDict from main.setup import Setup face_dir = Setup.s3_face_dir + "_tmp/" lFile = list(Path(face_dir).glob("*.jpg")) img_urls = [str(img_url).replace("\\", "/") for img_url in lFile] sorted(img_urls, reverse=True) img_urls.sort(reverse=True) print(img_urls) table_dict = TableDict() hm = 3 sql = "select * from face_prediction where isdeleted = '0' order by id desc limit 0,%(hm)s" p_list = table_dict.list(sql, {"hm": hm}) print(p_list) img = cv2.imread("https://kutikomiya.jp/images/idol/a/asuka-kirara001.W120.jpg") cv2.imshow("es", img) cv2.waitKey(0)
def predict(self, cid, pid, fs): """ 人脸识别 :param cid: 采集id :param pid: 终端id :param fs: 人脸图文件 :return: {'uid': 识别结果(人员id), 'cc': 成功添加的样本数} """ # 加载训练信息 prediction_feature = self.training_service.get_prediction_feature() uid_to_un = self.training_service.get_uid_to_un() uid_to_label = self.training_service.get_uid_to_label() label_to_uid = self.training_service.get_label_to_uid() # 计算识别耗时 start_time = dt.datetime.now() # 临时保存目录 tmp_dir = Setup.s3_face_dir + "_tmp/" if os.path.exists(tmp_dir) == False: os.makedirs(tmp_dir) # 将文件保存到本地 imgs, filenames = [], [] for f in fs: filename = id_generator() + ".jpg" file_path = os.path.join(tmp_dir, filename) f.save(file_path) imgs.append(cv2.imread(file_path)) filenames.append(filename) # 数据清洗 imgs, filenames = self.collection_service.get_data_processor( ).data_wash(imgs, filenames) # 数据预处理 imgs = self.collection_service.get_data_processor().data_preprocess( imgs) # 进行识别 preds, ps_dist, ps_sim, ps_sn = [], [], [], [] if Setup.s4_use_feature_extract == 1: preds, ps_dist, ps_sim, ps_sn = prediction_feature.prediction(imgs) # 相似度最高的作为识别结果 index = int(np.argmax(ps_sim)) pred = preds[index] sim = float(np.max(ps_sim)) filename = filenames[index] uid = "0" if pred == -1 else label_to_uid[str(pred)] # 在数据集里找到当前识别对象的最新图像 miid = "" if uid != "0": u_newest = self.collection_service.get_newest_by_uid(uid) miid = u_newest["iid"] # 识别耗时 times = (dt.datetime.now() - start_time) tc = float(times.seconds * np.power(10, 3) + times.microseconds / np.power(10, 3)) # 记录识别结果 table_dict = TableDict() iniid = filename.split(".")[0] dict = { "id": id_generator(), "cid": cid, "pid": pid, "puid": uid, "tuid": uid, "sim": sim, "iniid": iniid, "miid": miid, "tc": tc, "dt": now_dt_str(), "isdeleted": "0" } table_dict.save("face_prediction", dict) # 识别结果对应的图像加入数据集 imgs_p, filenames_p = [], [] for i, p in enumerate(preds): if p == pred: imgs_p.append(imgs[i]) filenames_p.append(filenames[i]) json_r = self.collection_service.collect(cid, pid, uid, None, ct="2", imgs=imgs_p, filenames=filenames_p) json_r["uid"] = uid return json_r