コード例 #1
0
ファイル: face_model.py プロジェクト: JiaHeng-DLUT/IJB-C
    def __init__(self, args):
        self.args = args
        ctx = mx.gpu(args.gpu)
        _vec = args.image_size.split(',')
        assert len(_vec) == 2
        image_size = (int(_vec[0]), int(_vec[1]))
        self.model = None
        self.ga_model = None
        if len(args.model) > 0:
            self.model = get_model(ctx, image_size, args.model, 'fc1')
        if len(args.ga_model) > 0:
            self.ga_model = get_model(ctx, image_size, args.ga_model, 'fc1')

        self.threshold = args.threshold
        self.det_minsize = 50
        self.det_threshold = [0.6, 0.7, 0.8]
        #self.det_factor = 0.9
        self.image_size = image_size
        '''
    mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
    if args.det==0:
      detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)
    else:
      detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])
    self.detector = detector
    '''
        self.detector = FaceDetector()
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
コード例 #2
0
 def __init__(self, model_path=None):
     """
     :param: mobilenet_path: XXXX.pth
     """
     self.detector = FaceDetector()
     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     self.mobilenet = MobileNetV2(classes=2)
     if model_path:
         self.mobilenet.load_state_dict(
             torch.load(model_path, map_location=device))
コード例 #3
0
ファイル: FaceRec-checkpoint.py プロジェクト: Hhhana/mask
 def __init__(self, model_path=None):  # 相当于构造函数
     """
     :param: mobilenet_path: XXXX.pth
     """
     self.detector = FaceDetector()
     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     self.mobilenet = MobileNetV1(classes=2)  # 搭建一个二分类的网络结构
     if model_path:  # 如果存在modle,读取网络权重参数
         self.mobilenet.load_state_dict(
             torch.load(model_path, map_location=device))  # 加载
コード例 #4
0
ファイル: FaceRec-checkpoint.py プロジェクト: Hhhana/mask
class Recognition(object):
    classes = ["mask", "no_mask"]

    # def __init__(self, mobilenet_path="./results/test.pth"):
    def __init__(self, model_path=None):  # 相当于构造函数
        """
        :param: mobilenet_path: XXXX.pth
        """
        self.detector = FaceDetector()
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.mobilenet = MobileNetV1(classes=2)  # 搭建一个二分类的网络结构
        if model_path:  # 如果存在modle,读取网络权重参数
            self.mobilenet.load_state_dict(
                torch.load(model_path, map_location=device))  # 加载

    def face_recognize(self, image):
        # 绘制并保存标注图
        drawn_image = self.detector.draw_bboxes(image)
        return drawn_image

    '''
    # 在验证集上检测口罩
    def is_mask():
        self.mobilenet.eval()
            with torch.no_grad():
                predict_label = self.mobilenet(face).cpu().data.numpy()
            current_class = self.classes[np.argmax(predict_label).item()]
    '''

    def mask_recognize(self, image):
        b_boxes, landmarks = self.detector.detect(image)
        detect_face_img = self.detector.draw_bboxes(image)
        face_num = len(b_boxes)
        mask_num = 0
        for box in b_boxes:
            face = image.crop(tuple(box[:4]))
            face = np.array(face)
            face = transforms.ToTensor()(face).unsqueeze(0)
            self.mobilenet.eval()
            with torch.no_grad():
                predict_label = self.mobilenet(face).cpu().data.numpy()
            current_class = self.classes[np.argmax(predict_label).item()]
            draw = ImageDraw.Draw(detect_face_img)
            if current_class == "mask":
                mask_num += 1
                # font = ImageFont.truetype("consola.ttf", 5, encoding="unic"  )  # 设置字体
                draw.text((200, 50), u'yes', 'fuchsia')
            else:
                # font = ImageFont.truetype("consola.ttf", 5, encoding="unic"  )  # 设置字体
                draw.text((200, 50), u'no', 'fuchsia')

        return detect_face_img, face_num, mask_num
コード例 #5
0
ファイル: face_model.py プロジェクト: JiaHeng-DLUT/IJB-C
class FaceModel:
    def __init__(self, args):
        self.args = args
        ctx = mx.gpu(args.gpu)
        _vec = args.image_size.split(',')
        assert len(_vec) == 2
        image_size = (int(_vec[0]), int(_vec[1]))
        self.model = None
        self.ga_model = None
        if len(args.model) > 0:
            self.model = get_model(ctx, image_size, args.model, 'fc1')
        if len(args.ga_model) > 0:
            self.ga_model = get_model(ctx, image_size, args.ga_model, 'fc1')

        self.threshold = args.threshold
        self.det_minsize = 50
        self.det_threshold = [0.6, 0.7, 0.8]
        #self.det_factor = 0.9
        self.image_size = image_size
        '''
    mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
    if args.det==0:
      detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)
    else:
      detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])
    self.detector = detector
    '''
        self.detector = FaceDetector()
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    def get_input(self, face_img):
        # ret = self.detector.detect_face(face_img, det_type = self.args.det)
        ret = self.detector.detect(
            Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)))
        if ret is None:
            return None
        bbox, points = ret
        if len(bbox) == 0:
            return None
        bbox = bbox[0, 0:4]
        points = points[0, :].reshape((2, 5)).T
        # print(bbox)
        # print(points)
        # print(type(face_img))
        # cv2.imshow("", new_img)
        # cv2.waitKey(0)
        nimg = face_preprocess.preprocess(face_img,
                                          bbox,
                                          points,
                                          image_size='112,112')
        nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
        aligned = np.transpose(nimg, (2, 0, 1))
        return aligned

    def get_feature(self, aligned):
        input_blob = np.expand_dims(aligned, axis=0)
        data = mx.nd.array(input_blob)
        db = mx.io.DataBatch(data=(data, ))
        self.model.forward(db, is_train=False)
        embedding = self.model.get_outputs()[0].asnumpy()
        embedding = sklearn.preprocessing.normalize(embedding).flatten()
        return embedding

    def get_ga(self, aligned):
        input_blob = np.expand_dims(aligned, axis=0)
        data = mx.nd.array(input_blob)
        db = mx.io.DataBatch(data=(data, ))
        self.ga_model.forward(db, is_train=False)
        ret = self.ga_model.get_outputs()[0].asnumpy()
        g = ret[:, 0:2].flatten()
        gender = np.argmax(g)
        a = ret[:, 2:202].reshape((100, 2))
        a = np.argmax(a, axis=1)
        age = int(sum(a))

        return gender, age
コード例 #6
0
            draw = ImageDraw.Draw(detect_face_img)
            if current_class == "mask":
                mask_num += 1
                # font = ImageFont.truetype("consola.ttf", 5, encoding="unic"  )  # 设置字体
                draw.text((box[0], box[1]), u'yes', 'fuchsia')
            #else:
            # font = ImageFont.truetype("consola.ttf", 5, encoding="unic"  )  # 设置字体
            #draw.text((200, 50), u'no', 'fuchsia')

        return detect_face_img, face_num, mask_num


"""
检测人脸,返回人脸位置坐标
其中b_boxes是一个n*5的列表、landmarks是一个n*10的列表,n表示检测出来的人脸个数,数据详细情况如下:
bbox:[左上角x坐标, 左上角y坐标, 右下角x坐标, 右下角y坐标, 检测评分]
landmark:[右眼x, 左眼x, 鼻子x, 右嘴角x, 左嘴角x, 右眼y, 左眼y, 鼻子y, 右嘴角y, 左嘴角y]
"""
if __name__ == "__main__":
    torch.set_num_threads(1)
    detector = FaceDetector()
    img = Image.open("./test1.jpg")
    recognize = Recognition()
    """---detect face--"""
    # draw = recognize.face_recognize(img)
    # plot_image(draw)
    """---crop face ---"""
    draw, all_num, mask_nums = recognize.mask_recognize(img)
    plot_image(draw)
    print("all_num:", all_num, "mask_num", mask_nums)