def us(self, face_crop):  # face_crop
        # person1_feature = net.encode(person1[None, ...])
        # print(person1.shape)  # torch.Size([3, 112, 112])
        # print(torch.unsqueeze(person1, 0).shape)  # torch.Size([1, 3, 112, 112])
        # print(person1[None, ...].shape)  # torch.Size([1, 3, 112, 112])
        # print(np.shape(face_crop))

        person2 = tf(face_crop).to(self.device)
        # person2 = tf(Image.open("Contrast_data/1/pic1_0.jpg")).to(self.device)  # 需要辨认的人脸图片
        person2_feature = self.net.encode(person2[None, ...])

        # data_path = r"D:\PycharmProjects(2)\arcloss-pytorch\test_img"  # # 人脸数据库
        main_dir = r"D:\PycharmProjects\MTCNN_data\Rocog_face\Contrast_data"
        dicts = {"0": "周杰伦", "1": "迪丽热巴", "2": "黄晓明", "3": "Liu Hui", "4": "目标未识别"}

        lists = []
        lists2 = []
        for face_dir in os.listdir(main_dir):
            for face_filename in os.listdir(os.path.join(main_dir, face_dir)):
                img = Image.open(os.path.join(main_dir, face_dir, face_filename))
                img = img.convert("RGB")

                person1 = tf(img).to(self.device)
                person1_feature = self.net.encode(torch.unsqueeze(person1, 0))

                # 改进: 改为并行比较,或查找方法,提高对比速度
                siam = compare(person1_feature, person2_feature)  # 将数据库中的人脸和需要辨认的人脸做比较

                # print("余弦相似度值:", max(siam.item(), 0))  # 余弦相似度 tensor([[0.9988]])
                # x = "周杰伦" if round(siam.item()) == 1 else "其他人"
                # x = "迪丽热巴" if round(siam.item()) == 1 else "其他人"
                # print(face_filename)

                # font = ImageFont.truetype("simhei.ttf", 20)

                if siam.item() > 0.8:  # 人脸相似度大于某个阈值,否则被pass掉。
                    print("余弦相似度值:", max(siam.item(), 0))
                    print(face_dir)  # 找到类别文件夹
                    value = dicts[str(face_dir)]  # 根据类别取到类别的值
                    lists.append(value)
                    # print(value)  # 将字典里的值取出来
                    print(lists)
                else:
                    pass

        # print(lists1)  # 每比较完一类人脸图片,打印一次列表

        return lists
    def face_detector(self,img):
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')
        max_threshold = 0
        threshold = 0.7
        max_threshold_feature = 0
        person1 = tf(img).to(device)
        person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
        kys = self.face_dict.keys()
        kys = list(kys)

        # print(kys[0].shape)
        # a = torch.randn([len(kys),kys[0].shape[0],kys[0].shape[1]])
        # print(a.shape)
        # print(a[0].shape)
        # exit()

        for person_feature in kys:
            # print(person_feature.shape)
            siam = compare(person1_feature, person_feature)
            # print(self.face_dict[person_feature], siam)
            if siam > threshold and siam > max_threshold:
                max_threshold = siam
                max_threshold_feature = person_feature
        print('----------完美分割线----------------')
        if max_threshold > 0:
            name = self.face_dict[max_threshold_feature]
            y = time.time()
            # print(y - x)
            return name,max_threshold.item()
        return '','0.0'
Exemple #3
0
    def __init__(self):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.save_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\params\1.pth"
        self.net = FaceNet().to(self.device)
        self.net.load_state_dict(torch.load(self.save_path))
        self.net.eval()
        self.face_dict = {}

        x = time.time()
        main_dir = r"D:\PycharmProjects\MTCNN_data\Rocog_face\Contrast_data2"
        for face_dir in os.listdir(main_dir):
            for face_filename in os.listdir(os.path.join(main_dir, face_dir)):
                img = Image.open(
                    os.path.join(main_dir, face_dir, face_filename))
                img = trans_square(img)

                # 将拿到的图片转成正方形112*112
                person1 = tf(img).to(self.device)
                person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
                self.face_dict[person1_feature] = face_dir  # 将人脸特征向量作为键,类别名作为值
                # print(self.face_dict[person1_feature])  # 人脸特征向量对应类别名 0
                # exit()

                # lists3.extend(person1_feature)
                # lists3.append([person1_feature, face_dir])  # 方便将人脸数据库转成特征向量进行打包。

                # 改进: 改为并行比较,或查找方法,提高对比速度
                # print(person1_feature.shape)  # torch.Size([1, 512])
                # print(person2_feature.shape)  # torch.Size([1, 512])
        y = time.time()
        print(y - x)
Exemple #4
0
    def us(self, face_crop):  # face_crop

        # data_path = r"D:\PycharmProjects(2)\arcloss-pytorch\test_img"  # # 人脸数据库

        # dicts = {"0": "周杰伦", "1": "迪丽热巴", "2": "黄晓明", "3": "刘辉", "4": "目标未识别", "5": "小红", "6": "小花"}

        # print(np.shape(face_crop))  # (342, 258, 3)

        face_crop = trans_square(face_crop)

        # print(np.shape(face_crop))  # (342, 342, 3)
        # 将裁剪出来的图片转成正方形112*112
        person2 = tf(face_crop).to(self.device)
        # print(np.shape(face_crop))  # (342, 342, 3)

        # person2 = tf(Image.open("Contrast_data/1/pic1_0.jpg")).to(self.device)
        person2_feature = self.net.encode(person2[None, ...])  # 传进来的人脸图片

        kys = self.face_dict.keys()  # 拿到人脸数据库里面的键(即人脸特征向量)
        # print(self.face_dict)
        # print(kys)
        kys = list(kys)  # 将人脸特征向量放在一个列表中
        # print(kys)

        # siam = compare(person1_feature, person2_feature)  # 将数据库中的人脸和需要辨认的人脸做比较

        # print("余弦相似度值:", max(siam.item(), 0))  # 余弦相似度 tensor([[0.9988]])
        # x = "周杰伦" if round(siam.item()) == 1 else "其他人"
        # x = "迪丽热巴" if round(siam.item()) == 1 else "其他人"
        # print(face_filename)

        # font = ImageFont.truetype("simhei.ttf", 20)

        max_threshold = 0
        threshold = 0.7
        max_threshold_feature = 0

        for person1_feature_ in kys:  # 遍历数据库里面的人脸特征向量
            # print(person1_feature_)

            siam = compare(person1_feature_, person2_feature)
            if siam > threshold and siam > max_threshold:
                max_threshold = siam  # 如果余弦相似度大于所设置阈值,就赋值给最大阈值。同时能够更新所设置的最大阈值。因为最后只需要拿到余弦相似度最大的哪个值。
                max_threshold_feature = person1_feature_  # 这时也拿到相应的人脸特征向量

        if max_threshold > 0:
            cls = self.face_dict[
                max_threshold_feature]  # 拿到余弦相似度最大时的人脸特征向量对应的类别名
            # print(max_threshold_feature)  # 拿到余弦相似度最大时的人脸特征向量

            return cls, max_threshold_feature

        return '', '0.0'  # 如果上面返回的是空值, 此时要设置默认值占位。
Exemple #5
0
    def us(self, face_crop):  # face_crop

        data_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\face_images\database_face_1.jpg"  # # 人脸数据库
        img = Image.open(data_path)
        img = img.convert("RGB")

        person1 = tf(img).to(self.device)
        person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
        # person1_feature = net.encode(person1[None, ...])
        # print(person1.shape)  # torch.Size([3, 112, 112])
        # print(torch.unsqueeze(person1, 0).shape)  # torch.Size([1, 3, 112, 112])
        # print(person1[None, ...].shape)  # torch.Size([1, 3, 112, 112])

        person2 = tf(face_crop).to(self.device)
        # person2 = tf(Image.open("./face_images/recog_face_1.jpg")).to(self.device)  # 需要辨认的人脸图片
        person2_feature = self.net.encode(person2[None, ...])

        siam = compare(person1_feature, person2_feature)
        print("余弦相似度值:", siam.item())  # 余弦相似度 tensor([[0.9988]])
        # x = "周杰伦" if round(siam.item()) == 1 else "其他人"
        # x = "迪丽热巴" if round(siam.item()) == 1 else "其他人"
        x = "Liu Hui" if siam.item() >= 0.8 else "other people"

        return x
Exemple #6
0
    def face_detector(self, img):
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')

        max_threshold = 0
        threshold = 0.7
        max_threshold_feature = 0
        person1 = tf(img).to(device)
        person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
        kys = self.face_dict.keys()
        kys = list(kys)

        # print(kys[0].shape)
        # a = torch.randn([len(kys),kys[0].shape[0],kys[0].shape[1]])
        # print(a.shape)
        # print(a[0].shape)
        # exit()

        for person_feature in kys:  # 原始载入人脸特征向量
            # print(person_feature.shape)
            siam = compare(person1_feature,
                           person_feature)  # 将裁剪下来的人脸和数据库的人脸做比较
            # print(self.face_dict[person_feature], siam)
            # print("余弦相似度", siam)
            if siam > threshold and siam > max_threshold:

                max_threshold = siam  # 如果余弦相似度大于所设置阈值,就赋值给最大阈值。同时能够更新所设置的最大阈值。因为最后只需要拿到余弦相似度最大的哪个值。
                max_threshold_feature = person_feature  # 这时也拿到相应的人脸特征向量

        # print('----------完美分割线----------------')
        if max_threshold > 0:
            # print(max_threshold_feature)
            name = self.face_dict[
                max_threshold_feature]  # 拿到余弦相似度最大时的人脸特征向量对应的类别名
            # print(max_threshold_feature)  # 拿到余弦相似度最大时的人脸特征向量

            return name, max_threshold.item()

        return '', '0.0'
    def __init__(self):
        path = r"Contrast_data"
        # self.save_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\params\1.pth"
        net_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\params\1.pth"
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')
        self.net = FaceNet().to(device)
        self.net.load_state_dict(torch.load(net_path))
        self.net.eval()
        self.face_dict = {}

        for face_dir in os.listdir(path):
            for face_filename in os.listdir(os.path.join(path, face_dir)):
                person_path = os.path.join(path, face_dir, face_filename)
                img = Image.open(person_path)
                img = img.convert("RGB")
                img = img.resize((112, 112))
                person1 = tf(img).to(device)
                person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
                self.face_dict[person1_feature] = face_dir
Exemple #8
0
    def face_detector(self, img):
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')

        max_threshold = 0
        threshold = 0.7
        max_threshold_feature = 0
        name = 0
        person1 = tf(img).to(device)  # 需要辨认的人脸
        person1_feature = self.net.encode(torch.unsqueeze(person1, 0))

        # 载入np人脸特征向量包
        np_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\list_data.npz"
        list_o = npz2list(np_path)  # 载入numpy保存的人脸数据库文件
        # print(np.shape(list_o))
        for x in list_o:  # 遍历数据库中的人脸特征
            # print(x)
            siam = compare(x[0], person1_feature)  # 将数据库中的人脸和需要辨认的人脸做比较
            # print("余弦相似度值:", max(siam.item(), 0))  # 余弦相似度 tensor([[0.9988]])

            if siam > threshold and siam > max_threshold:
                max_threshold = siam  # 如果余弦相似度大于所设置阈值,就赋值给最大阈值。同时能够更新所设置的最大阈值。因为最后只需要拿到余弦相似度最大的哪个值。
                max_threshold_feature = x[0]
                name = x[1]

        # print('----------完美分割线----------------')
        # if max_threshold > 0:
        #     print(max_threshold_feature)
        #     name = self.face_dict[max_threshold_feature]  # 拿到余弦相似度最大时的人脸特征向量对应的类别名
        # print(max_threshold_feature)  # 拿到余弦相似度最大时的人脸特征向量

        # print(name)
        # print(max_threshold_feature)

        return name, max_threshold_feature
Exemple #9
0
    def __init__(self):
        path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\Contrast_data"
        # self.save_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\params\1.pth"
        net_path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\params\1.pth"
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')
        self.net = FaceNet().to(device)
        self.net.load_state_dict(torch.load(net_path))
        self.net.eval()
        self.face_dict = {}
        self.lists3 = []

        for face_dir in os.listdir(path):
            for face_filename in os.listdir(os.path.join(path, face_dir)):
                img = Image.open(os.path.join(path, face_dir, face_filename))

                img = trans_square(img)
                person1 = tf(img).to(device)
                person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
                self.face_dict[person1_feature] = face_dir  # 将人脸特征向量作为键,类别名作为值
                # print(self.face_dict[person1_feature])  # 人脸特征向量对应类别名 0
                self.lists3.append([person1_feature, face_dir])  # 将人脸特征向量保存下来
    def us(self, face_crop):  # face_crop
        # person1_feature = net.encode(person1[None, ...])
        # print(person1.shape)  # torch.Size([3, 112, 112])
        # print(torch.unsqueeze(person1, 0).shape)  # torch.Size([1, 3, 112, 112])
        # print(person1[None, ...].shape)  # torch.Size([1, 3, 112, 112])
        start = time.time()
        # print(np.shape(face_crop))  # (342, 258, 3)
        face_crop = trans_square(face_crop)
        # print(np.shape(face_crop))  # (342, 342, 3)
        # 将裁剪出来的图片转成正方形112*112
        person2 = tf(face_crop).to(self.device)
        # print(np.shape(face_crop))  # (342, 342, 3)

        # person2 = tf(Image.open("Contrast_data/1/pic1_0.jpg")).to(self.device)  # 需要辨认的人脸图片
        person2_feature = self.net.encode(person2[None, ...])

        # data_path = r"D:\PycharmProjects(2)\arcloss-pytorch\test_img"  # # 人脸数据库
        main_dir = r"D:\PycharmProjects\MTCNN_data\Rocog_face\Contrast_data"
        dicts = {"0": "周杰伦", "1": "迪丽热巴", "2": "黄晓明", "3": "刘辉", "4": "吴晓斌"}

        lists = []
        lists2 = []
        lists3 = []
        # for face_dir in os.listdir(main_dir):
        #     for face_filename in os.listdir(os.path.join(main_dir, face_dir)):
        #         img = Image.open(os.path.join(main_dir, face_dir, face_filename))
        #         img = trans_square(img)
        #
        #         # 将拿到的图片转成正方形112*112
        #         person1 = tf(img).to(self.device)
        #         person1_feature = self.net.encode(torch.unsqueeze(person1, 0))
        #         lists3.append([person1_feature, face_dir])

        # 改进: 改为并行比较,或查找方法,提高对比速度
        path = r"D:\PycharmProjects\MTCNN_data\Rocog_face\list_data.npz"
        list_o = npz2list(path)  # 载入numpy保存的人脸数据库文件
        # print(np.shape(list_o))
        for x in list_o:  # 遍历数据库中的人脸特征
            # print(x)
            # print(x[1])  # 0
            # print(x[0].shape)  # torch.Size([1, 512])
            siam = compare(x[0], person2_feature)  # 将数据库中的人脸和需要辨认的人脸做比较
            print("余弦相似度值:", max(siam.item(), 0))  # 余弦相似度 tensor([[0.9988]])
            # x = "周杰伦" if round(siam.item()) == 1 else "其他人"
            # x = "迪丽热巴" if round(siam.item()) == 1 else "其他人"
            # print(face_filename)

            # font = ImageFont.truetype("simhei.ttf", 20)

            if siam.item() > 0.8:  # 人脸相似度大于某个阈值,否则被pass掉。
                # print(x[1])  # 类别文件夹
                value = dicts[str(x[1])]
                lists.append(value)
                # print(lists)  # ['黄晓明', '黄晓明', '黄晓明', '黄晓明', '刘辉', '刘辉']
                # print(value)  # 将字典里的值取出来
                # print("*" * 100)
            else:
                pass

        end = time.time()
        print("对比完所有图片所用时间:", end - start)  # 0.75s
        # np.savez('list_data', lists3)
        # print(lists1)  # 每比较完一类图片,打印一次列表
        # print(lists3)
        return lists