Пример #1
0
    def collectX(self, fs):
        """
        不间断地采集人脸
        :param fs: 人脸图文件
        :return: {'cc': 成功添加的样本数}
        """
        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_ttt/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        imgs, filenames = [], []
        for f in fs:
            filename = id_generator() + ".jpg"
            file_path = os.path.join(tmp_dir, filename)
            f.save(file_path)
            #imgs.append(cv2.imread(file_path))
            #filenames.append(filename)

        # 数据清洗
        #imgs, filenames = self.data_processor.data_wash(imgs, filenames)
        # 数据预处理
        #imgs = self.data_processor.data_preprocess(imgs)

        json_r = {}
        json_r["cid"] = now_dt_str()
        #json_r["cc"] = len(filenames)

        return json_r
Пример #2
0
    def save_to_ds(self, cid, pid, uid, ct, imgs, filenames):
        """
        保存图像到数据集
        """
        # 记录数据库
        table_dict = TableDict()
        dict = {
            "id": cid,
            "pid": pid,
            "uid": uid,
            "ct": ct,
            "dt": now_dt_str(),
            "isdeleted": "0"
        }
        detail_dict_list = []

        dir = "{}/{}".format(Setup.s3_face_dir, uid)
        if os.path.exists(dir) == False:
            os.makedirs(dir)

        for i, img in enumerate(imgs):
            # 保存到数据集
            cv2.imwrite(os.path.join(dir, filenames[i]), img)

            detail_dict = {
                "id": filenames[i].split(".")[0],
                "cid": cid,
                "pid": pid,
                "uid": uid,
                "iid": filenames[i].split(".")[0],
                "dt": now_dt_str()
            }
            detail_dict_list.append(detail_dict)
        if len(detail_dict_list) > 0:
            table_dict.save("face_collection", dict)
            table_dict.batch_save("face_collection_detail", detail_dict_list)
Пример #3
0
from db.table_dict import TableDict

from main.setup import Setup
from utils.utils_sys import id_generator, now_dt_str

pid = "1"
root_dir = Setup.s3_face_dir
dirs = os.listdir(root_dir)
dict_list = []
detail_dict_list = []
for dir in dirs:
    print(dir)
    cid = id_generator()
    uid = dir
    dict = {"id": cid, "pid": pid, "uid": uid, "ct": "1",
            "dt": now_dt_str(), "isdeleted": "0"}
    dict_list.append(dict)

    lFile = list(Path(os.path.join(root_dir, dir)).glob("*.jpg"))
    for file in lFile:
        file_name = str(file).split("\\")[-1].split(".")[0]
        print(file_name)
        detail_dict = {"id": file_name, "cid": cid, "pid": pid, "uid": uid,
                       "iid": file_name, "dt": now_dt_str()}
        detail_dict_list.append(detail_dict)

table_dict = TableDict()
if len(detail_dict_list) > 0:
    table_dict.batch_save("face_collection", dict_list)
    table_dict.batch_save("face_collection_detail", detail_dict_list)
Пример #4
0
import pandas as pd
import pymysql
from utils.utils_sys import id_generator, now_dt_str

conn = pymysql.connect("127.0.0.1", "es", "123456", "es_face")
#print(conn)

sql = 'select * from face_collection'
results = pd.read_sql(sql, conn)
print(results)
#print('{}, {}'.format(results.shape[0], results.at[0, 'width']))

cursor = conn.cursor()
sql = "insert into face_collection(id, uid, pid, width, height, dt, isdeleted)"
sql += " values ('%s', '%s', '%s', %d, %d, '%s', '%s')"
cursor.execute(sql % (id_generator(), "20", "1", 160, 160, now_dt_str(), "0"))
conn.commit()

sql = "update face_collection set pid = '15' where width = %(width)s"
cursor.execute(sql, {"width": 160})
conn.commit()

cursor.close()
conn.close()
Пример #5
0
    def feature_extract(self, feature_extractor, batch_size, uid_to_label):
        """
        特征提取
        """
        table_dict = TableDict()
        tid = id_generator()
        name = "facenet"

        # 记录本次训练情况
        train_dict = {
            "id": tid,
            "tt": "1",
            "tp": 0.0,
            "sdt": now_dt_str(),
            "isdeleted": "0"
        }
        table_dict.save("face_train", train_dict)

        for i, uid in enumerate(uid_to_label):
            imgs, fs = Train.load_imgs(uid)
            imgs, fs = self.filter_imgs(imgs, fs, uid)
            info("uid: {}, len: {}, feature extract ...".format(
                uid, len(imgs)))
            if len(imgs) == 0:
                info("uid: {}, len: {}, feature extract ok".format(
                    uid, len(imgs)))
                continue

            features = feature_extractor.extract_features(
                imgs, batch_size=batch_size)
            labels = (np.ones(len(features)) * int(uid_to_label[uid])).astype(
                np.int32)

            # 特征文件存放路径
            dir = os.path.join(Setup.s4_feature_dir, name)
            if os.path.exists(dir) == False:
                os.makedirs(dir)
            hdf5_file_path = os.path.join(dir, "{}.hdf5".format(uid))

            # 类内部序号
            sn = 0

            # 特征文件若已存在,则读取里面数据,与今次新增的数据集合,一起写入文件
            if os.path.exists(hdf5_file_path):
                db_exist = h5py.File(hdf5_file_path, "r")
                features_exist = np.copy(db_exist["imgs"])
                labels_exist = np.copy(db_exist["labels"])
                db_exist.close()
                sn += features_exist.shape[0]
                info("uid: {}, feature exist {}, now add ...".format(
                    uid, features_exist.shape[0]))

                features_new, labels_new = [], []
                features_new.extend(features_exist)
                features_new.extend(features)
                labels_new.extend(labels_exist)
                labels_new.extend(labels)
                hdf5_writer = Hdf5Writer(np.shape(features_new),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features_new, labels_new)
                hdf5_writer.close()
            else:
                hdf5_writer = Hdf5Writer(np.shape(features),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features, labels)
                hdf5_writer.close()

            # 保存提取记录到数据库
            to_db_list = []
            for f in fs:
                to_db_list.append({
                    "id": id_generator(),
                    "tid": tid,
                    "uid": uid,
                    "label": str(uid_to_label[uid]),
                    "iid": f.split(".")[0],
                    "sn": sn,
                    "dt": now_dt_str(),
                    "isdeleted": "0"
                })
                sn += 1
            table_dict.batch_save("face_train_feature", to_db_list)

            # 更新训练进度
            train_dict["tp"] = (i + 1) / len(uid_to_label)
            table_dict.update("face_train", train_dict)

            info("uid: {}, len: {}, feature extract ok".format(uid, len(imgs)))

        # 更新训练完成时间
        train_dict["tp"] = 1.0
        train_dict["edt"] = now_dt_str()
        table_dict.update("face_train", train_dict)
Пример #6
0
    def predict(self, cid, pid, fs):
        """
        人脸识别
        :param cid: 采集id
        :param pid: 终端id
        :param fs: 人脸图文件
        :return: {'uid': 识别结果(人员id), 'cc': 成功添加的样本数}
        """
        # 加载训练信息
        prediction_feature = self.training_service.get_prediction_feature()
        uid_to_un = self.training_service.get_uid_to_un()
        uid_to_label = self.training_service.get_uid_to_label()
        label_to_uid = self.training_service.get_label_to_uid()

        # 计算识别耗时
        start_time = dt.datetime.now()

        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_tmp/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        imgs, filenames = [], []
        for f in fs:
            filename = id_generator() + ".jpg"
            file_path = os.path.join(tmp_dir, filename)
            f.save(file_path)
            imgs.append(cv2.imread(file_path))
            filenames.append(filename)

        # 数据清洗
        imgs, filenames = self.collection_service.get_data_processor(
        ).data_wash(imgs, filenames)
        # 数据预处理
        imgs = self.collection_service.get_data_processor().data_preprocess(
            imgs)

        # 进行识别
        preds, ps_dist, ps_sim, ps_sn = [], [], [], []
        if Setup.s4_use_feature_extract == 1:
            preds, ps_dist, ps_sim, ps_sn = prediction_feature.prediction(imgs)

        # 相似度最高的作为识别结果
        index = int(np.argmax(ps_sim))
        pred = preds[index]
        sim = float(np.max(ps_sim))
        filename = filenames[index]
        uid = "0" if pred == -1 else label_to_uid[str(pred)]

        # 在数据集里找到当前识别对象的最新图像
        miid = ""
        if uid != "0":
            u_newest = self.collection_service.get_newest_by_uid(uid)
            miid = u_newest["iid"]

        # 识别耗时
        times = (dt.datetime.now() - start_time)
        tc = float(times.seconds * np.power(10, 3) +
                   times.microseconds / np.power(10, 3))

        # 记录识别结果
        table_dict = TableDict()
        iniid = filename.split(".")[0]
        dict = {
            "id": id_generator(),
            "cid": cid,
            "pid": pid,
            "puid": uid,
            "tuid": uid,
            "sim": sim,
            "iniid": iniid,
            "miid": miid,
            "tc": tc,
            "dt": now_dt_str(),
            "isdeleted": "0"
        }
        table_dict.save("face_prediction", dict)

        # 识别结果对应的图像加入数据集
        imgs_p, filenames_p = [], []
        for i, p in enumerate(preds):
            if p == pred:
                imgs_p.append(imgs[i])
                filenames_p.append(filenames[i])
        json_r = self.collection_service.collect(cid,
                                                 pid,
                                                 uid,
                                                 None,
                                                 ct="2",
                                                 imgs=imgs_p,
                                                 filenames=filenames_p)
        json_r["uid"] = uid

        return json_r