Ejemplo n.º 1
0
def main(args):
    # initialize model
    det_net = init_detection_model(args.detection_model_name, half=args.half)
    headpose_net = init_headpose_model(args.headpose_model_name,
                                       half=args.half)

    img = cv2.imread(args.img_path)
    with torch.no_grad():
        bboxes = det_net.detect_faces(img, 0.97)
        # x0, y0, x1, y1, confidence_score, five points (x, y)
        bbox = list(map(int, bboxes[0]))
        # crop face region
        thld = 10
        h, w, _ = img.shape
        top = max(bbox[1] - thld, 0)
        bottom = min(bbox[3] + thld, h)
        left = max(bbox[0] - thld, 0)
        right = min(bbox[2] + thld, w)

        det_face = img[top:bottom, left:right, :].astype(np.float32) / 255.

        # resize
        det_face = cv2.resize(det_face, (224, 224),
                              interpolation=cv2.INTER_LINEAR)
        det_face = img2tensor(np.copy(det_face), bgr2rgb=False)

        # normalize
        normalize(det_face, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                  inplace=True)
        det_face = det_face.unsqueeze(0).cuda()

        yaw, pitch, roll = headpose_net(det_face)
        visualize_headpose(img, yaw, pitch, roll, args.save_path)
Ejemplo n.º 2
0
def main(args):
    # initialize model
    det_net = init_detection_model(args.model_name, half=args.half)

    img = cv2.imread(args.img_path)
    with torch.no_grad():
        bboxes = det_net.detect_faces(img, 0.97)
        # x0, y0, x1, y1, confidence_score, five points (x, y)
        print(bboxes)
        visualize_detection(img, bboxes, args.save_path)
Ejemplo n.º 3
0
    def __init__(self,
                 upscale_factor,
                 face_size=512,
                 crop_ratio=(1, 1),
                 det_model='retinaface_resnet50',
                 save_ext='png',
                 template_3points=False,
                 pad_blur=False,
                 use_parse=False,
                 device=None):
        self.template_3points = template_3points  # improve robustness
        self.upscale_factor = upscale_factor
        # the cropped face ratio based on the square face
        self.crop_ratio = crop_ratio  # (h, w)
        assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
        self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))

        if self.template_3points:
            self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
        else:
            # standard 5 landmarks for FFHQ faces with 512 x 512
            self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
                                           [201.26117, 371.41043], [313.08905, 371.15118]])
        self.face_template = self.face_template * (face_size / 512.0)
        if self.crop_ratio[0] > 1:
            self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
        if self.crop_ratio[1] > 1:
            self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
        self.save_ext = save_ext
        self.pad_blur = pad_blur
        if self.pad_blur is True:
            self.template_3points = False

        self.all_landmarks_5 = []
        self.det_faces = []
        self.affine_matrices = []
        self.inverse_affine_matrices = []
        self.cropped_faces = []
        self.restored_faces = []
        self.pad_input_imgs = []

        if device is None:
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = device

        # init face detection model
        self.face_det = init_detection_model(det_model, half=False, device=self.device)

        # init face parsing model
        self.use_parse = use_parse
        self.face_parse = init_parsing_model(model_name='parsenet', device=self.device)
Ejemplo n.º 4
0
def main(args):
    """Scripts about evaluating face quality.
        Two steps:
        1) detect the face region and crop the face
        2) evaluate the face quality by hyperIQA
    """
    # initialize model
    det_net = init_detection_model(args.detection_model_name, half=False)
    assess_net = init_assessment_model(args.assess_model_name, half=False)

    # specified face transformation in original hyperIQA
    transforms = torchvision.transforms.Compose([
        torchvision.transforms.Resize((512, 384)),
        torchvision.transforms.RandomCrop(size=224),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
    ])

    img = cv2.imread(args.img_path)
    img_name = os.path.basename(args.img_path)
    basename, _ = os.path.splitext(img_name)
    with torch.no_grad():
        bboxes = det_net.detect_faces(img, 0.97)
        box = list(map(int, bboxes[0]))
        pred_scores = []
        # BRG -> RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        for i in range(10):
            detect_face = img[box[1]:box[3], box[0]:box[2], :]
            detect_face = Image.fromarray(detect_face)

            detect_face = transforms(detect_face)
            detect_face = torch.tensor(detect_face.cuda()).unsqueeze(0)

            pred = assess_net(detect_face)
            pred_scores.append(float(pred.item()))
        score = np.mean(pred_scores)
        # quality score ranges from 0-100, a higher score indicates a better quality
        print(f'{basename} {score:.4f}')
Ejemplo n.º 5
0
    # float32, [0, 255]
    return img


if __name__ == '__main__':
    import os

    from facexlib.detection import init_detection_model
    from facexlib.utils.face_restoration_helper import get_largest_face
    from facexlib.visualization import visualize_detection

    img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png'
    img_name = os.splitext(os.path.basename(img_path))[0]

    # initialize model
    det_net = init_detection_model('retinaface_resnet50', half=False)
    img_ori = cv2.imread(img_path)
    h, w = img_ori.shape[0:2]
    # if larger than 800, scale it
    scale = max(h / 800, w / 800)
    if scale > 1:
        img = cv2.resize(img_ori, (int(w / scale), int(h / scale)),
                         interpolation=cv2.INTER_LINEAR)

    with torch.no_grad():
        bboxes = det_net.detect_faces(img, 0.97)
    if scale > 1:
        bboxes *= scale  # the score is incorrect
    bboxes = get_largest_face(bboxes, h, w)[0]
    visualize_detection(img_ori, [bboxes], f'tmp/{img_name}_det.png')
Ejemplo n.º 6
0
            cv2.circle(image, (b[5], b[10]), 1, (0, 0, 255), 4)
            cv2.circle(image, (b[6], b[11]), 1, (0, 255, 255), 4)
            cv2.circle(image, (b[7], b[12]), 1, (255, 0, 255), 4)
            cv2.circle(image, (b[8], b[13]), 1, (0, 255, 0), 4)
            cv2.circle(image, (b[9], b[14]), 1, (255, 0, 0), 4)
        else:  # retinaface, centerface
            cv2.circle(image, (b[5], b[6]), 1, (0, 0, 255), 4)
            cv2.circle(image, (b[7], b[8]), 1, (0, 255, 255), 4)
            cv2.circle(image, (b[9], b[10]), 1, (255, 0, 255), 4)
            cv2.circle(image, (b[11], b[12]), 1, (0, 255, 0), 4)
            cv2.circle(image, (b[13], b[14]), 1, (255, 0, 0), 4)
    # save image
    cv2.imwrite(save_path, image)


det_net = init_detection_model('retinaface_resnet50')
half = False

det_net.cuda().eval()
if half:
    det_net = det_net.half()

img_list = sorted(glob.glob('../../BasicSR-private/datasets/ffhq/ffhq_512/*'))


def get_center_landmark(landmarks, center):
    center = np.array(center)
    center_dist = []
    for landmark in landmarks:
        landmark_center = np.array([(landmark[0] + landmark[2]) / 2,
                                    (landmark[1] + landmark[3]) / 2])
Ejemplo n.º 7
0
def main(args):
    detect_interval = args.detect_interval
    margin = args.margin
    face_score_threshold = args.face_score_threshold

    save_frame = True
    if save_frame:
        colors = np.random.rand(32, 3)

    # init detection model and tracker
    det_net = init_detection_model('retinaface_resnet50', half=False)
    tracker = SORT(max_age=1, min_hits=2, iou_threshold=0.2)
    print('Start track...')

    # track over all frames
    frame_paths = sorted(glob.glob(os.path.join(args.input_folder, '*.jpg')))
    pbar = tqdm(total=len(frame_paths), unit='frames', desc='Extract')
    for idx, path in enumerate(frame_paths):
        img_basename = os.path.basename(path)
        frame = cv2.imread(path)
        img_size = frame.shape[0:2]

        # detection face bboxes
        with torch.no_grad():
            bboxes = det_net.detect_faces(frame, 0.97)

        additional_attr = []
        face_list = []

        for idx_bb, bbox in enumerate(bboxes):
            score = bbox[4]
            if score > face_score_threshold:
                bbox = bbox[0:5]
                det = bbox[0:4]

                # face rectangle
                det[0] = np.maximum(det[0] - margin, 0)
                det[1] = np.maximum(det[1] - margin, 0)
                det[2] = np.minimum(det[2] + margin, img_size[1])
                det[3] = np.minimum(det[3] + margin, img_size[0])
                face_list.append(bbox)
            additional_attr.append([score])
        trackers = tracker.update(np.array(face_list), img_size, additional_attr, detect_interval)

        pbar.update(1)
        pbar.set_description(f'{idx}: detect {len(bboxes)} faces in {img_basename}')

        # save frame
        if save_frame:
            for d in trackers:
                d = d.astype(np.int32)
                cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]), colors[d[4] % 32, :] * 255, 3)
                if len(face_list) != 0:
                    cv2.putText(frame, 'ID : %d  DETECT' % (d[4]), (d[0] - 10, d[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.75, colors[d[4] % 32, :] * 255, 2)
                    cv2.putText(frame, 'DETECTOR', (5, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (1, 1, 1), 2)
                else:
                    cv2.putText(frame, 'ID : %d' % (d[4]), (d[0] - 10, d[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                colors[d[4] % 32, :] * 255, 2)
            save_path = os.path.join(args.save_folder, img_basename)
            cv2.imwrite(save_path, frame)