Esempio n. 1
0
class Detector:
    def __init__(self, model_path):
        self.model = RetinaFace(model_path, 0, ctx_id=0)

    def get_face_patch(self, img):
        bboxes, points = self.model.detect(img,
                                           0.7,
                                           scales=[1.0],
                                           do_flip=False)
        if isinstance(img, str):
            img = cv2.imread(img)
        faces_ = []
        key_points_ = []
        bboxes_ = []
        for face, point in zip(bboxes, points):
            #import pdb; pdb.set_trace()
            bbox = face[0:4].astype(np.int)
            to_add_face = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
            to_add_face = cv2.cvtColor(to_add_face, cv2.COLOR_BGR2RGB)
            faces_.append(to_add_face)
            key_points_.append((points.astype(np.int), face[4]))
            bboxes_.append(bbox)
            #print(to_add_face.shape)

        return faces_, np.array(key_points_), np.array(bboxes_)
Esempio n. 2
0
def main(config: str = "config.yml"):

    with open(config) as f:
        cfg = yaml.safe_load(f)

    enter_folder = cfg["ent_folder"]
    out_folder = cfg["out_folder"]
    mode = cfg["mode"]
    make_dir_safe(out_folder)

    detector = RetinaFace(cfg["name_det"], cfg["epoch_det"], cfg["gpu"])
    align = Alinger(cfg["size"])

    for race in tqdm(os.listdir(enter_folder)):
        ent_race = os.path.join(enter_folder, race)
        out_race = os.path.join(out_folder, race)
        make_dir_safe(out_race)
        for gender in os.listdir(ent_race):
            ent_gender = os.path.join(ent_race, gender)
            out_gender = os.path.join(out_race, gender)
            make_dir_safe(out_gender)
            faces = os.listdir(ent_gender)
            faces.sort()
            k = 0
            out_faces = []
            imgs = []
            for face in tqdm(faces):
                k += 1
                ent_face = os.path.join(ent_gender, face)
                out_face = os.path.join(out_gender, face)
                img = cv2.imread(ent_face)
                try:
                    img = cv2.resize(img, (500, 500))
                    det_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    _, dets = detector.detect(det_img)
                except cv2.error:
                    dets = np.empty((0, 1))
                    print(ent_face)
                if dets.shape[0] != 1:
                    if mode == "unknown":
                        continue
                    imgs.append(0)
                else:
                    lands = rework_landmarks(dets[0])
                    rew_imgs = align([img], [lands])
                    if mode == "unknown":
                        cv2.imwrite(out_face, rew_imgs[0][0][0])
                        continue
                    imgs.append(rew_imgs[0][0][0])
                out_faces.append(out_face)
                if k == 2 and mode == "known":
                    if not isinstance(imgs[0], int) and not isinstance(imgs[1], int):
                        for img, path in zip(imgs, out_faces):
                            cv2.imwrite(path, img)
                    imgs = []
                    out_faces = []
                    k = 0
    # Read original image
    img = cv2.imread(image_path)

    # Cover unrelated faces
    if "02/frame" in image_path or "04/frame" in image_path:
        img[100:180, :260] = [0, 0, 0]
    if "07/frame" in image_path:
        img[100:180, :100] = [0, 0, 0]
    if "09/frame" in image_path or "10/frame" in image_path:
        img[100:260, :150] = [0, 0, 0]

    # Retina Face
    scales = [1.0]
    flip = False
    faces, landmarks = face_detector.detect(img,
                                            0.8,
                                            scales=scales,
                                            do_flip=flip)

    if faces is None or faces.shape[0] == 0:
        continue

    # Label
    label = {}

    # Crop face
    bbox = faces[0]
    bbox = (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
    bbox_loosen, scale_x, scale_y = utils.get_loosen_bbox(
        bbox, img, (args.input_size, args.input_size))
    crop = img[bbox_loosen[1]:bbox_loosen[3], bbox_loosen[0]:bbox_loosen[2]]
    crop = cv2.resize(crop, (args.input_size, args.input_size))
Esempio n. 4
0
thresh = 0.8

sum_3 = 0
sum_4 = 0
sum_34 = 0

cap = cv2.VideoCapture(0)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))

# Read Cam
while (True):
    ret, img = cap.read()
    #print(np.shape(img))
    faces, landmarks = detector.detect(img,
                                       thresh,
                                       scales=[1.0, 1.0],
                                       do_flip=True)

    if True:
        for i in range(faces.shape[0]):
            box = faces[i].astype(np.int)
            color = (0, 0, 255)

            # Mask detection based on landmarks
            if landmarks is not None:
                landmark5 = landmarks[i].astype(np.int)
                for l in range(landmark5.shape[0]):
                    if l == 3:  # right
                        cv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1,
                                   (0, 0, 255), 1)
                        rgb_3 = img[landmark5[l][0], landmark5[l][1]]
Esempio n. 5
0
im_size_max = np.max(im_shape[0:2])
#im_scale = 1.0
#if im_size_min>target_size or im_size_max>max_size:
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
    im_scale = float(max_size) / float(im_size_max)

print('im_scale', im_scale)

scales = [im_scale]
flip = False

for c in range(count):
    faces, landmarks = detector.detect(img,
                                       thresh,
                                       scales=scales,
                                       do_flip=flip)
    print(c, faces.shape, landmarks.shape)

if faces is not None:
    print('find', faces.shape[0], 'faces')
    for i in range(faces.shape[0]):
        #print('score', faces[i][4])
        box = faces[i].astype(np.int)
        #color = (255,0,0)
        color = (0, 0, 255)
        cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
        if landmarks is not None:
            landmark5 = landmarks[i].astype(np.int)
            #print(landmark.shape)
            for l in range(landmark5.shape[0]):
    ret, frame = cap.read()
    frames += 1
    # frame = cv2.resize(frame, (save_width, save_height))
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    if frames % 30 == 0:
        print('FPS:', int(1 / (time.clock() - last_time) * 30))
        last_time = time.clock()

    if frames % 3 == 0:
        trackers = []
        texts = []

        detect_tick = time.time()
        bboxes, landmarks0 = detector.detect(frame,
                                             thresh,
                                             scales=[im_scale],
                                             do_flip=False)
        detect_tock = time.time()

        if len(bboxes) != 0:
            reco_tick = time.time()
            for bbox, landmarks in zip(bboxes, landmarks0):
                bbox = bbox.astype(np.int64)
                box = np.array([bbox[0], bbox[1], bbox[2], bbox[3]])

                # bbox = np.array([bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]])
                landmarks = np.array([
                    landmarks[0][0], landmarks[1][0], landmarks[2][0],
                    landmarks[3][0], landmarks[4][0], landmarks[0][1],
                    landmarks[1][1], landmarks[2][1], landmarks[3][1],
                    landmarks[4][1]
Esempio n. 7
0
class FaceDetector():
    def __init__(self, model_retina_path, gpu_id):
        self.model = RetinaFace(model_retina_path,
                                0,
                                ctx_id=gpu_id,
                                network='net3')

    def detect(self, img, scale_ratio=1.0):
        ret = self.model.detect(img, 0.5, scales=[scale_ratio], do_flip=False)
        if ret is None:
            return [], []
        bboxes, points = ret
        if len(bboxes) == 0:
            return [], []
        return np.asarray(bboxes), points

    def align(self, img, bbox=None, landmark=None, **kwargs):
        M = None
        image_size = []
        str_image_size = kwargs.get('image_size', '')
        if len(str_image_size) > 0:
            image_size = [int(x) for x in str_image_size.split(',')]
            if len(image_size) == 1:
                image_size = [image_size[0], image_size[0]]
                assert len(image_size) == 2
            assert image_size[0] == 112
            assert image_size[0] == 112 or image_size[1] == 96
        if landmark is not None:
            assert len(image_size) == 2
            src = np.array(
                [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
                 [33.5493, 92.3655], [62.7299, 92.2041]],
                dtype=np.float32)
            if image_size[1] == 112:
                src[:, 0] += 8.0
            dst = landmark.astype(np.float32)

            tform = trans.SimilarityTransform()
            tform.estimate(dst, src)
            M = tform.params[0:2, :]
            #M = cv2.estimateRigidTransform( dst.reshape(1,5,2), src.reshape(1,5,2), False)

        if M is None:
            if bbox is None:  #use center crop
                det = np.zeros(4, dtype=np.int32)
                det[0] = int(img.shape[1] * 0.0625)
                det[1] = int(img.shape[0] * 0.0625)
                det[2] = img.shape[1] - det[0]
                det[3] = img.shape[0] - det[1]
            else:
                det = bbox
            margin = kwargs.get('margin', 44)
            bb = np.zeros(4, dtype=np.int32)
            bb[0] = np.maximum(det[0] - margin / 2, 0)
            bb[1] = np.maximum(det[1] - margin / 2, 0)
            bb[2] = np.minimum(det[2] + margin / 2, img.shape[1])
            bb[3] = np.minimum(det[3] + margin / 2, img.shape[0])
            ret = img[bb[1]:bb[3], bb[0]:bb[2], :]
            if len(image_size) > 0:
                ret = cv2.resize(ret, (image_size[1], image_size[0]))
            return ret
        else:  #do align using landmark
            assert len(image_size) == 2
            warped = cv2.warpAffine(img,
                                    M, (image_size[1], image_size[0]),
                                    borderValue=0.0)
            return warped
Esempio n. 8
0
class Model:
    def __init__(self,retina_path,request_add,det_threshold=0.8):
        self.detector = RetinaFace(retina_path,0, ctx_id=0)
        self.request_add = request_add
        self.det_threshold = det_threshold
        
    def check_mask(self,encoded_image):
        r = requests.post(self.request_add,data=encoded_image)
        result = r.json()['result']
        return result
    
    
    def get_face_patch(self,img):
        bboxes,points = self.detector.detect(img, self.det_threshold,scales=[1.0],do_flip=False)
        faces_=[]
        key_points_=[]
        bboxes_=[]
        for face,point in zip(bboxes,points):
            #import pdb; pdb.set_trace()
            bbox = face[0:4].astype(np.int)
            to_add_face=img[bbox[1]:bbox[3],bbox[0]:bbox[2]]
            to_add_face = get_padded_image(to_add_face)[...,::-1]/255.0
           # print(to_add_face.shape)
            faces_.append(to_add_face)
            key_points_.append((points.astype(np.int),face[4]))
            bboxes_.append(bbox)
            #print(to_add_face.shape)

        return np.array(faces_),np.array(key_points_),np.array(bboxes_)
    
    def generate_output(self,read_path,write_path,type_='video'):
        if type_ == 'video':
            self.generate_video(read_path,write_path)
        else:
            self.generate_image(read_path,write_path)
        
        
    def generate_video(self,read_path,write_path):
        cap = cv2.VideoCapture(read_path)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        fps = cap.get(cv2.CAP_PROP_FPS)
        ret , fr = cap.read()
        org_h,org_w = fr.shape[:2]
        out = cv2.VideoWriter(f'{write_path}', fourcc, fps, (org_w,org_h))
        counter = 0
        while ret:
            if counter%2 == 0:
                counter = 0
                fr = self._infer_on_frame(fr)
                out.write(fr)
                ret,fr = cap.read()
            counter += 1
        out.release()
        
    def generate_image(self,read_path,write_path):
        fr = cv2.imread(read_path)
        if fr is None:
            print('Invalid Image')
            return
        fr = self._infer_on_frame(fr)
        plt.imsave(write_path,fr[:,:,::-1])
        
    def _infer_on_frame(self,fr):
#         org_h,org_w = fr.shape[:2]
#         fr = imutils.resize(fr,width=720)
        faces , keypoints , bboxes = self.get_face_patch(fr)
        if not len(faces)==0:
            encoded_arr = pickle.dumps(faces)
            output = self.check_mask(encoded_arr)
            
            for out,bbox in zip(output,bboxes):
#             for bbox in bboxes:
                x1,y1,x2,y2 = bbox
                if out == 'mask':
                    color = (0,255,0)
                else:
                    color = (0,0,255)
                #color = (0,255,0)
                fr = cv2.rectangle(fr.copy(),(x1,y1),(x2,y2),color,2)
#         fr = cv2.resize(fr.copy(),(org_w,org_h))
        return fr
Esempio n. 9
0
                         backbond=config["model"]["backbond"])
# Load model
net.load_weights(config["test"]["model_file"])

cap = cv2.VideoCapture(0)
if not cap.isOpened():
    print("Unable to connect to camera.")
    exit(-1)

face_detector = RetinaFace('./RetinaFace/retinaface-R50', 0, 0, 'net3')

while cap.isOpened():
    ret, frame = cap.read()
    if ret:
        faces, landmarks = face_detector.detect(frame,
                                                0.8,
                                                scales=[1.0],
                                                do_flip=False)

        if len(faces) > 0:

            face_crops = []
            face_boxes = []
            for i in range(len(faces)):
                bbox = faces[i]
                bbox = (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
                face_crop = utils.crop_face_loosely(
                    bbox, frame, (config["model"]["im_width"],
                                  config["model"]["im_height"]))
                face_crop = cv2.resize(face_crop,
                                       (config["model"]["im_width"],
                                        config["model"]["im_height"]))