def collect():
        """
        人脸采集
        """
        # 采集id
        cid = getParameter("cid", id_generator())
        # 终端id
        pid = getParameter("pid", "1")
        # 人员id
        uid = request.form["uid"]
        # 人脸图数
        img_cc = int(request.form["img_cc"])
        # 是否进行识别
        to_rec = int(request.form["to_rec"])
        # 人脸图文件
        fs = []
        for m in range(img_cc):
            f = request.files["file" + str(m)]
            fs.append(f)

        json_r = {}
        if to_rec == 0:
            json_r = collection_service.collect(cid, pid, uid, fs)
        else:
            json_r = prediction_service.predict(cid, pid, fs)

        return json.dumps(json_r)
Example #2
0
    def collectX(self, fs):
        """
        不间断地采集人脸
        :param fs: 人脸图文件
        :return: {'cc': 成功添加的样本数}
        """
        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_ttt/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        imgs, filenames = [], []
        for f in fs:
            filename = id_generator() + ".jpg"
            file_path = os.path.join(tmp_dir, filename)
            f.save(file_path)
            #imgs.append(cv2.imread(file_path))
            #filenames.append(filename)

        # 数据清洗
        #imgs, filenames = self.data_processor.data_wash(imgs, filenames)
        # 数据预处理
        #imgs = self.data_processor.data_preprocess(imgs)

        json_r = {}
        json_r["cid"] = now_dt_str()
        #json_r["cc"] = len(filenames)

        return json_r
Example #3
0
    def collect(self, cid, pid, uid, fs, ct="1", imgs=None, filenames=None):
        """
        人脸采集
        :param cid: 采集id
        :param pid: 终端id
        :param uid: 人员id
        :param fs: 人脸图文件
        :return: {'cc': 成功添加的样本数}
        """
        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_tmp/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        if imgs is None:
            imgs, filenames = [], []
            for f in fs:
                filename = id_generator() + ".jpg"
                file_path = os.path.join(tmp_dir, filename)
                f.save(file_path)
                imgs.append(cv2.imread(file_path))
                filenames.append(filename)

        if ct == "1":
            # 数据清洗
            imgs, filenames = self.data_processor.data_wash(imgs, filenames)
            # 数据预处理
            imgs = self.data_processor.data_preprocess(imgs)

        # 过滤跟数据集里相似度过高的图像文件
        imgs, filenames = self.data_processor.filter_sim_ds(
            uid, imgs, filenames)

        # 保存图像到数据集
        self.save_to_ds(cid, pid, uid, ct, imgs, filenames)

        info("新增人脸样本(uid: {}): {} 份.".format(uid, len(filenames)))

        json_r = {}
        json_r["cid"] = cid
        json_r["cc"] = len(filenames)

        return json_r
    def save_camera(self, id, cn, ip, sn, cno, tips):
        """
        新增或修改终端机
        """
        camera_cn = self.get_by_cn(cn)
        camera_ip = self.get_by_ip(ip)
        camera_sn = self.get_by_sn(sn)

        table_dict = TableDict()
        table = "face_collection_camera"
        id = id if id is not None else id_generator()
        camera = table_dict.get(table, id)
        if camera is None:
            # 确保名称、ip与序号的唯一
            if camera_cn is not None:
                return "", "已经有名称为“{}”的终端机,不能重复!".format(cn)
            if camera_ip is not None:
                return "", "已经有ip地址为“{}”的终端机,不能重复!".format(ip)
            if camera_sn is not None:
                return "", "已经有序号为“{}”的终端机,不能重复!".format(sn)

            # 新增
            camera = {"id": id, "cn": cn, "ip": ip, "sn": sn, "cno": cno, "tips": tips,
                      "isdeleted": "0"}
            table_dict.save(table, camera)
        else:
            # 确保名称、ip与序号的唯一
            if camera_cn is not None and camera_cn["id"] != id:
                return "", "已经有名称为“{}”的终端机,不能重复!".format(cn)
            if camera_ip is not None and camera_ip["id"] != id:
                return "", "已经有ip地址为“{}”的终端机,不能重复!".format(ip)
            if camera_sn is not None and camera_sn["id"] != id:
                return "", "已经有序号为“{}”的终端机,不能重复!".format(sn)

            # 修改
            camera["cn"] = cn
            camera["ip"] = ip
            camera["sn"] = sn
            camera["cno"] = cno
            camera["tips"] = tips
            table_dict.update(table, camera)

        return id, ""
Example #5
0
    def detect(self, image_url, image_file, image_base64, return_landmark,
               factor_mini):
        """
        人脸检测
        :param image_url: 图片的 URL
        :param image_file: 一个图片,二进制文件
        :param image_base64: base64 编码的二进制图片数据
        :param return_landmark: 是否检测并返回人脸关键点
        :param factor_mini: 缩放因子
        """
        # 返回信息
        r = {}
        request_id = id_generator()
        image_id = id_generator()
        r["request_id"] = request_id

        # 计算耗时
        start_time = dt.datetime.now()

        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_api/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        file_path = self.get_request_img(tmp_dir, image_id, image_url,
                                         image_file, image_base64)

        # 读取图像
        img = None
        if file_path is None:
            r["error_message"] = "没有传入图片"
            return r
        else:
            img = cv2.imread(file_path)
            r["image_id"] = image_id

        # 缩放因子
        factor_mini = float(factor_mini)
        if factor_mini > 1.0 or factor_mini < 0.1:
            factor_mini = Setup.s3_factor_mini
        if factor_mini < 1.0:
            # 缩小图像加快检测速度
            width, height = img.shape[1], img.shape[0]
            img = cv2.resize(
                img, (int(width * factor_mini), int(height * factor_mini)))

        # 人脸检测
        total_boxes, points = self.data_processor.face_detector.detect_face(
            img)
        total_boxes = np.divide(total_boxes, factor_mini)
        points = np.divide(points, factor_mini)
        #print(factor_mini)
        #print(total_boxes)
        #print(points)
        faces = []
        if len(total_boxes) > 0:
            for i, box in enumerate(total_boxes):
                # 人脸矩形框的位置
                face_rectangle = {}
                face_rectangle["left"] = int(box[0])
                face_rectangle["top"] = int(box[1])
                face_rectangle["width"] = int(box[2] - box[0])
                face_rectangle["height"] = int(box[3] - box[1])

                face = {}
                face["face_token"] = id_generator()
                face["face_rectangle"] = face_rectangle

                if return_landmark == "1":
                    # 人脸的关键点坐标数组
                    landmark = []
                    for j in range(int(len(points[i]) / 2)):
                        landmark.append(
                            [int(points[i][j]),
                             int(points[i][j + 5])])
                    face["landmark"] = landmark

                faces.append(face)
        r["faces"] = faces

        # 耗时
        times = (dt.datetime.now() - start_time)
        time_used = times.seconds * np.power(
            10, 3) + times.microseconds / np.power(10, 3)
        r["time_used"] = time_used

        return r
import os
from pathlib import Path

from db.table_dict import TableDict

from main.setup import Setup
from utils.utils_sys import id_generator, now_dt_str

pid = "1"
root_dir = Setup.s3_face_dir
dirs = os.listdir(root_dir)
dict_list = []
detail_dict_list = []
for dir in dirs:
    print(dir)
    cid = id_generator()
    uid = dir
    dict = {"id": cid, "pid": pid, "uid": uid, "ct": "1",
            "dt": now_dt_str(), "isdeleted": "0"}
    dict_list.append(dict)

    lFile = list(Path(os.path.join(root_dir, dir)).glob("*.jpg"))
    for file in lFile:
        file_name = str(file).split("\\")[-1].split(".")[0]
        print(file_name)
        detail_dict = {"id": file_name, "cid": cid, "pid": pid, "uid": uid,
                       "iid": file_name, "dt": now_dt_str()}
        detail_dict_list.append(detail_dict)

table_dict = TableDict()
if len(detail_dict_list) > 0:
Example #7
0
import pandas as pd
import pymysql
from utils.utils_sys import id_generator, now_dt_str

conn = pymysql.connect("127.0.0.1", "es", "123456", "es_face")
#print(conn)

sql = 'select * from face_collection'
results = pd.read_sql(sql, conn)
print(results)
#print('{}, {}'.format(results.shape[0], results.at[0, 'width']))

cursor = conn.cursor()
sql = "insert into face_collection(id, uid, pid, width, height, dt, isdeleted)"
sql += " values ('%s', '%s', '%s', %d, %d, '%s', '%s')"
cursor.execute(sql % (id_generator(), "20", "1", 160, 160, now_dt_str(), "0"))
conn.commit()

sql = "update face_collection set pid = '15' where width = %(width)s"
cursor.execute(sql, {"width": 160})
conn.commit()

cursor.close()
conn.close()
Example #8
0
            face = None
        else:
            # 只要最大的图
            face = utils_cv.max_rect(faces)
        last_face = face
    else:
        face = last_face

    if face is not None:
        # 人脸标记框左上点与右下点
        p1 = (int(face[0] / s_factor) - s_border, int(face[1] / s_factor) - s_border)
        p2 = (int((face[0] + face[2]) / s_factor) + s_border, int((face[1] + face[3]) / s_factor) + s_border)
        # 越界的图不要
        if p1[0] > 0 and p1[1] > 0 and p2[0] < width and p2[1] < height:
            # 保存并显示人脸
            face_img = frame[p1[1]:p2[1], p1[0]:p2[0] :]
            cv2.imwrite("face_ttt/{}.jpg".format(utils_sys.id_generator()), face_img)
            cv2.rectangle(frame, p1, p2, [255, 255, 255], 5)

    # 显示图像
    cv2.imshow("es_face", frame)

    # 按esc退出
    if cv2.waitKey(1) == keycode_esc:
        break

    index = (index + 1) % index_max

camera.release()
cv2.destroyAllWindows()
Example #9
0
    def feature_extract(self, feature_extractor, batch_size, uid_to_label):
        """
        特征提取
        """
        table_dict = TableDict()
        tid = id_generator()
        name = "facenet"

        # 记录本次训练情况
        train_dict = {
            "id": tid,
            "tt": "1",
            "tp": 0.0,
            "sdt": now_dt_str(),
            "isdeleted": "0"
        }
        table_dict.save("face_train", train_dict)

        for i, uid in enumerate(uid_to_label):
            imgs, fs = Train.load_imgs(uid)
            imgs, fs = self.filter_imgs(imgs, fs, uid)
            info("uid: {}, len: {}, feature extract ...".format(
                uid, len(imgs)))
            if len(imgs) == 0:
                info("uid: {}, len: {}, feature extract ok".format(
                    uid, len(imgs)))
                continue

            features = feature_extractor.extract_features(
                imgs, batch_size=batch_size)
            labels = (np.ones(len(features)) * int(uid_to_label[uid])).astype(
                np.int32)

            # 特征文件存放路径
            dir = os.path.join(Setup.s4_feature_dir, name)
            if os.path.exists(dir) == False:
                os.makedirs(dir)
            hdf5_file_path = os.path.join(dir, "{}.hdf5".format(uid))

            # 类内部序号
            sn = 0

            # 特征文件若已存在,则读取里面数据,与今次新增的数据集合,一起写入文件
            if os.path.exists(hdf5_file_path):
                db_exist = h5py.File(hdf5_file_path, "r")
                features_exist = np.copy(db_exist["imgs"])
                labels_exist = np.copy(db_exist["labels"])
                db_exist.close()
                sn += features_exist.shape[0]
                info("uid: {}, feature exist {}, now add ...".format(
                    uid, features_exist.shape[0]))

                features_new, labels_new = [], []
                features_new.extend(features_exist)
                features_new.extend(features)
                labels_new.extend(labels_exist)
                labels_new.extend(labels)
                hdf5_writer = Hdf5Writer(np.shape(features_new),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features_new, labels_new)
                hdf5_writer.close()
            else:
                hdf5_writer = Hdf5Writer(np.shape(features),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features, labels)
                hdf5_writer.close()

            # 保存提取记录到数据库
            to_db_list = []
            for f in fs:
                to_db_list.append({
                    "id": id_generator(),
                    "tid": tid,
                    "uid": uid,
                    "label": str(uid_to_label[uid]),
                    "iid": f.split(".")[0],
                    "sn": sn,
                    "dt": now_dt_str(),
                    "isdeleted": "0"
                })
                sn += 1
            table_dict.batch_save("face_train_feature", to_db_list)

            # 更新训练进度
            train_dict["tp"] = (i + 1) / len(uid_to_label)
            table_dict.update("face_train", train_dict)

            info("uid: {}, len: {}, feature extract ok".format(uid, len(imgs)))

        # 更新训练完成时间
        train_dict["tp"] = 1.0
        train_dict["edt"] = now_dt_str()
        table_dict.update("face_train", train_dict)
Example #10
0
    def search(self, image_url, image_file, image_base64, face_rectangle):
        """
        1 vs n 人脸检索
        :param image_url: 图片的 URL
        :param image_file: 一个图片,二进制文件
        :param image_base64: base64 编码的二进制图片数据
        :param face_rectangle: 人脸矩形框的位置
        """
        # 加载训练信息
        prediction_feature = self.training_service.get_prediction_feature()
        uid_to_un = self.training_service.get_uid_to_un()
        uid_to_label = self.training_service.get_uid_to_label()
        label_to_uid = self.training_service.get_label_to_uid()

        # 返回信息
        r = {}
        request_id = id_generator()
        image_id = id_generator()
        r["request_id"] = request_id

        # 计算耗时
        start_time = dt.datetime.now()

        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_api/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        file_path = self.collection_service.get_request_img(
            tmp_dir, image_id, image_url, image_file, image_base64)

        # 读取图像
        img = None
        if file_path is None:
            r["error_message"] = "没有传入图片"
            return r
        else:
            img = cv2.imread(file_path)
            r["image_id"] = image_id

        # 当传入图片进行人脸检测时,是否指定人脸框位置进行检测
        img = self.get_face_by_rectangle(img, face_rectangle)

        # 如果人脸刚好占满整个图像区域,目前的人脸检测方法可能会漏检,加个边框会大大减少漏检的发生
        img = add_border(img)

        # 数据清洗
        imgs = [img]
        imgs, _ = self.collection_service.get_data_processor().data_wash(imgs)
        if len(imgs) < 1:
            r["error_message"] = "传入的图片中,找不到人脸来进行分析"
            return r
        # 数据预处理
        imgs = self.collection_service.get_data_processor().data_preprocess(
            imgs)

        # 进行识别
        preds, ps_dist, ps_sim, ps_sn = prediction_feature.prediction(imgs)
        if len(preds) == 0:
            r["error_message"] = "模型尚未训练,暂无法进行识别"
            return r
        uid = "0" if preds[0] == -1 else label_to_uid[preds[0]]
        un = "陌生人" if uid == "0" else uid_to_un[uid]
        r["uid"] = uid
        r["un"] = un

        # 样本距离与相似度
        distance = ps_dist[0]
        confidence = ps_sim[0]
        thresholds = Setup.s4_distance_threshold
        r["distance"] = str(distance)
        r["confidence"] = str(confidence)
        r["thresholds"] = str(thresholds)

        # 耗时
        times = (dt.datetime.now() - start_time)
        time_used = times.seconds * np.power(
            10, 3) + times.microseconds / np.power(10, 3)
        r["time_used"] = time_used

        return r
Example #11
0
    def predict(self, cid, pid, fs):
        """
        人脸识别
        :param cid: 采集id
        :param pid: 终端id
        :param fs: 人脸图文件
        :return: {'uid': 识别结果(人员id), 'cc': 成功添加的样本数}
        """
        # 加载训练信息
        prediction_feature = self.training_service.get_prediction_feature()
        uid_to_un = self.training_service.get_uid_to_un()
        uid_to_label = self.training_service.get_uid_to_label()
        label_to_uid = self.training_service.get_label_to_uid()

        # 计算识别耗时
        start_time = dt.datetime.now()

        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_tmp/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        imgs, filenames = [], []
        for f in fs:
            filename = id_generator() + ".jpg"
            file_path = os.path.join(tmp_dir, filename)
            f.save(file_path)
            imgs.append(cv2.imread(file_path))
            filenames.append(filename)

        # 数据清洗
        imgs, filenames = self.collection_service.get_data_processor(
        ).data_wash(imgs, filenames)
        # 数据预处理
        imgs = self.collection_service.get_data_processor().data_preprocess(
            imgs)

        # 进行识别
        preds, ps_dist, ps_sim, ps_sn = [], [], [], []
        if Setup.s4_use_feature_extract == 1:
            preds, ps_dist, ps_sim, ps_sn = prediction_feature.prediction(imgs)

        # 相似度最高的作为识别结果
        index = int(np.argmax(ps_sim))
        pred = preds[index]
        sim = float(np.max(ps_sim))
        filename = filenames[index]
        uid = "0" if pred == -1 else label_to_uid[str(pred)]

        # 在数据集里找到当前识别对象的最新图像
        miid = ""
        if uid != "0":
            u_newest = self.collection_service.get_newest_by_uid(uid)
            miid = u_newest["iid"]

        # 识别耗时
        times = (dt.datetime.now() - start_time)
        tc = float(times.seconds * np.power(10, 3) +
                   times.microseconds / np.power(10, 3))

        # 记录识别结果
        table_dict = TableDict()
        iniid = filename.split(".")[0]
        dict = {
            "id": id_generator(),
            "cid": cid,
            "pid": pid,
            "puid": uid,
            "tuid": uid,
            "sim": sim,
            "iniid": iniid,
            "miid": miid,
            "tc": tc,
            "dt": now_dt_str(),
            "isdeleted": "0"
        }
        table_dict.save("face_prediction", dict)

        # 识别结果对应的图像加入数据集
        imgs_p, filenames_p = [], []
        for i, p in enumerate(preds):
            if p == pred:
                imgs_p.append(imgs[i])
                filenames_p.append(filenames[i])
        json_r = self.collection_service.collect(cid,
                                                 pid,
                                                 uid,
                                                 None,
                                                 ct="2",
                                                 imgs=imgs_p,
                                                 filenames=filenames_p)
        json_r["uid"] = uid

        return json_r
Example #12
0
    def compare(self, image_url1, image_file1, image_base64_1, face_rectangle1,
                image_url2, image_file2, image_base64_2, face_rectangle2):
        """
        1 vs 1 人脸比对
        :param image_url1: 图片的 URL
        :param image_file1: 一个图片,二进制文件
        :param image_base64_1: base64 编码的二进制图片数据
        :param face_rectangle1: 人脸矩形框的位置
        """
        # 返回信息
        r = {}
        request_id = id_generator()
        image_id1 = id_generator()
        image_id2 = id_generator()
        r["request_id"] = request_id

        # 计算耗时
        start_time = dt.datetime.now()

        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_api/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        file_path1 = self.collection_service.get_request_img(
            tmp_dir, image_id1, image_url1, image_file1, image_base64_1)
        file_path2 = self.collection_service.get_request_img(
            tmp_dir, image_id2, image_url2, image_file2, image_base64_2)

        # 读取图像
        img1 = None
        img2 = None
        if file_path1 is None:
            r["error_message"] = "没有传入第一张图片"
            return r
        elif file_path2 is None:
            r["error_message"] = "没有传入第二张图片"
            return r
        else:
            img1 = cv2.imread(file_path1)
            img2 = cv2.imread(file_path2)
            r["image_id1"] = image_id1
            r["image_id2"] = image_id2

        # 当传入图片进行人脸检测时,是否指定人脸框位置进行检测
        img1 = self.get_face_by_rectangle(img1, face_rectangle1)
        img2 = self.get_face_by_rectangle(img2, face_rectangle2)

        # 如果人脸刚好占满整个图像区域,目前的人脸检测方法可能会漏检,加个边框会大大减少漏检的发生
        img1 = add_border(img1)
        img2 = add_border(img2)

        # 数据清洗
        imgs = [img1, img2]
        imgs, _ = self.collection_service.get_data_processor().data_wash(imgs)
        if len(imgs) < 2:
            r["error_message"] = "传入的两张图片中,找不到足够两个的人脸来进行分析"
            return r
        # 数据预处理
        imgs = self.collection_service.get_data_processor().data_preprocess(
            imgs)

        # 特征提取
        prediction_feature = self.training_service.get_prediction_feature()
        feature_extractor = prediction_feature.get_feature_extractor()
        features = feature_extractor.extract_features(imgs)

        # 样本距离与相似度
        distance = distance_l2(features[0], features[1])
        confidence = prediction_feature.cal_sim([distance])[0]
        thresholds = Setup.s4_distance_threshold
        r["distance"] = str(distance)
        r["confidence"] = str(confidence)
        r["thresholds"] = str(thresholds)

        # 耗时
        times = (dt.datetime.now() - start_time)
        time_used = times.seconds * np.power(
            10, 3) + times.microseconds / np.power(10, 3)
        r["time_used"] = time_used

        return r