Exemplo n.º 1
0
def ko2():
    thread_lock.acquire()

    info("ko2")
    faceTest0.test2()

    thread_lock.release()
Exemplo n.º 2
0
    def extract_features(self, imgs, batch_size=256):
        """
        批量图像特征提取
        """
        # 预处理
        imgs_p = self.preprocess(imgs)

        # 获取已加载模型的会话
        sess = self.sess

        # 分批处理
        imgs_f = []
        index = 0
        imgs_size = len(imgs)
        while index < imgs_size:
            index_end = index + batch_size
            if index_end > imgs_size:
                index_end = imgs_size

            # 向前流转网络,得到特征
            imgs_f_ = sess.run(self.embeddings,
                               feed_dict={
                                   self.images_placeholder:
                                   imgs_p[index:index_end],
                                   self.phase_train_placeholder: False
                               })
            imgs_f.extend(imgs_f_)
            info("extract_features {} - {} ok".format(index, index_end))
            index += batch_size

        return imgs_f
Exemplo n.º 3
0
 def run(self):
     thread_lock.acquire()
     info(self.img_path)
     img = cv2.imread(self.img_path)
     rects = self.detector.detectMultiScale(img,
                                            scaleFactor=1.1,
                                            minNeighbors=5,
                                            minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
     print(rects)
     thread_lock.release()
Exemplo n.º 4
0
 def test2(self):
     thread_lock.acquire()
     info("ko2")
     img2 = cv2.imread("D:/s5/lena/103752.jpg")
     rects2 = self.detector.detectMultiScale(img2,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(5, 5),
                                             flags=cv2.CASCADE_SCALE_IMAGE)
     print(rects2)
     thread_lock.release()
Exemplo n.º 5
0
 def test1(self):
     thread_lock.acquire()
     info("ko1")
     img1 = cv2.imread(self.path)
     rects1 = self.detector.detectMultiScale(img1,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(5, 5),
                                             flags=cv2.CASCADE_SCALE_IMAGE)
     print(rects1)
     thread_lock.release()
Exemplo n.º 6
0
    def prediction(self, samples, batch_size=Setup.s4_facenet_batch_size):
        """
        分类预测
        """
        if self.model is None:
            info("training first")
            return [], [], [], []

        features = self.feature_extractor.extract_features(
            samples, batch_size=batch_size)
        preds, ps_dist, ps_sn = self.model.predict(features, return_dist=True)
        ps_sim = self.cal_sim(ps_dist)
        return preds, ps_dist, ps_sim, ps_sn
Exemplo n.º 7
0
    def run(self):
        while True:
            global img_path
            if img_path is None:
                continue

            info(img_path)
            img = cv2.imread(img_path)
            rects = self.detector.detectMultiScale(img,
                                                   scaleFactor=1.1,
                                                   minNeighbors=5,
                                                   minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
            print(rects)

            img_path = None
Exemplo n.º 8
0
    def collect(self, cid, pid, uid, fs, ct="1", imgs=None, filenames=None):
        """
        人脸采集
        :param cid: 采集id
        :param pid: 终端id
        :param uid: 人员id
        :param fs: 人脸图文件
        :return: {'cc': 成功添加的样本数}
        """
        # 临时保存目录
        tmp_dir = Setup.s3_face_dir + "_tmp/"
        if os.path.exists(tmp_dir) == False:
            os.makedirs(tmp_dir)

        # 将文件保存到本地
        if imgs is None:
            imgs, filenames = [], []
            for f in fs:
                filename = id_generator() + ".jpg"
                file_path = os.path.join(tmp_dir, filename)
                f.save(file_path)
                imgs.append(cv2.imread(file_path))
                filenames.append(filename)

        if ct == "1":
            # 数据清洗
            imgs, filenames = self.data_processor.data_wash(imgs, filenames)
            # 数据预处理
            imgs = self.data_processor.data_preprocess(imgs)

        # 过滤跟数据集里相似度过高的图像文件
        imgs, filenames = self.data_processor.filter_sim_ds(
            uid, imgs, filenames)

        # 保存图像到数据集
        self.save_to_ds(cid, pid, uid, ct, imgs, filenames)

        info("新增人脸样本(uid: {}): {} 份.".format(uid, len(filenames)))

        json_r = {}
        json_r["cid"] = cid
        json_r["cc"] = len(filenames)

        return json_r
Exemplo n.º 9
0
    def load_model(self, model_path):
        """
        加载预先已训练的模型
        """
        sess = tf.Session()

        facenet.load_model(model_path)
        info("loaded model ok: %s" % model_path)

        # 获取输入与输出tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")

        # 记录已加载模型的会话
        self.sess = sess

        # 记录输入与输出tensors
        self.images_placeholder = images_placeholder
        self.embeddings = embeddings
        self.phase_train_placeholder = phase_train_placeholder
Exemplo n.º 10
0
    def check_model(self):
        """
        确认model是否成功加载
        """
        if self.model is None:
            info("training first")
            return False
        else:
            if len(self.datas_test) == 0:
                info("no test data")
                return False

            info("evaluating classifier ...")
            preds = self.model.predict(self.datas_test)
            acc = accuracy_score(self.labels_test, preds)
            info("score: {:.2f}%\n".format(acc * 100))

            self.datas_test.clear()
            self.labels_test.clear()

            return acc > 0.5
Exemplo n.º 11
0
    def feature_extract(self, feature_extractor, batch_size, uid_to_label):
        """
        特征提取
        """
        table_dict = TableDict()
        tid = id_generator()
        name = "facenet"

        # 记录本次训练情况
        train_dict = {
            "id": tid,
            "tt": "1",
            "tp": 0.0,
            "sdt": now_dt_str(),
            "isdeleted": "0"
        }
        table_dict.save("face_train", train_dict)

        for i, uid in enumerate(uid_to_label):
            imgs, fs = Train.load_imgs(uid)
            imgs, fs = self.filter_imgs(imgs, fs, uid)
            info("uid: {}, len: {}, feature extract ...".format(
                uid, len(imgs)))
            if len(imgs) == 0:
                info("uid: {}, len: {}, feature extract ok".format(
                    uid, len(imgs)))
                continue

            features = feature_extractor.extract_features(
                imgs, batch_size=batch_size)
            labels = (np.ones(len(features)) * int(uid_to_label[uid])).astype(
                np.int32)

            # 特征文件存放路径
            dir = os.path.join(Setup.s4_feature_dir, name)
            if os.path.exists(dir) == False:
                os.makedirs(dir)
            hdf5_file_path = os.path.join(dir, "{}.hdf5".format(uid))

            # 类内部序号
            sn = 0

            # 特征文件若已存在,则读取里面数据,与今次新增的数据集合,一起写入文件
            if os.path.exists(hdf5_file_path):
                db_exist = h5py.File(hdf5_file_path, "r")
                features_exist = np.copy(db_exist["imgs"])
                labels_exist = np.copy(db_exist["labels"])
                db_exist.close()
                sn += features_exist.shape[0]
                info("uid: {}, feature exist {}, now add ...".format(
                    uid, features_exist.shape[0]))

                features_new, labels_new = [], []
                features_new.extend(features_exist)
                features_new.extend(features)
                labels_new.extend(labels_exist)
                labels_new.extend(labels)
                hdf5_writer = Hdf5Writer(np.shape(features_new),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features_new, labels_new)
                hdf5_writer.close()
            else:
                hdf5_writer = Hdf5Writer(np.shape(features),
                                         hdf5_file_path,
                                         dataKey="imgs")
                hdf5_writer.add(features, labels)
                hdf5_writer.close()

            # 保存提取记录到数据库
            to_db_list = []
            for f in fs:
                to_db_list.append({
                    "id": id_generator(),
                    "tid": tid,
                    "uid": uid,
                    "label": str(uid_to_label[uid]),
                    "iid": f.split(".")[0],
                    "sn": sn,
                    "dt": now_dt_str(),
                    "isdeleted": "0"
                })
                sn += 1
            table_dict.batch_save("face_train_feature", to_db_list)

            # 更新训练进度
            train_dict["tp"] = (i + 1) / len(uid_to_label)
            table_dict.update("face_train", train_dict)

            info("uid: {}, len: {}, feature extract ok".format(uid, len(imgs)))

        # 更新训练完成时间
        train_dict["tp"] = 1.0
        train_dict["edt"] = now_dt_str()
        table_dict.update("face_train", train_dict)