Ejemplo n.º 1
0
def detect_face(data):
    from retinaface.detector import detector
    from utils import align_face

    src_path = data['src_path']
    dst_path = data['dst_path']
    boxB = np.array(data['boxB'])

    img = cv.imread(src_path)
    if img is not None:
        img, ratio = resize(img)
        boxB = boxB * ratio

        try:
            bboxes, landmarks = detector.detect_faces(img)

            if len(bboxes) > 0:
                i = select_face(bboxes, boxB)
                bbox, landms = bboxes[i], landmarks[i]
                img = align_face(img, [landms])
                dirname = os.path.dirname(dst_path)
                os.makedirs(dirname, exist_ok=True)
                cv.imwrite(dst_path, img)
        except ValueError as err:
            print(err)
        except cv.error as err:
            print(err)

    return True
Ejemplo n.º 2
0
def detect_face(data):
    from retinaface.detector import detector
    from utils import align_face

    src_path = data['src_path']
    dst_path = data['dst_path']

    img_raw = cv.imread(src_path)
    if img_raw is not None:
        img = resize(img_raw)
        try:
            bboxes, landmarks = detector.detect_faces(img,
                                                      confidence_threshold=0.9)

            if len(bboxes) > 0:
                bbox, landms = bboxes[0], landmarks[0]
                img = align_face(img, [landms])
                dirname = os.path.dirname(dst_path)
                os.makedirs(dirname, exist_ok=True)
                cv.imwrite(dst_path, img)
                return True

        except ValueError as err:
            print(err)

        img = cv.resize(img, (im_size, im_size))
        cv.imwrite(dst_path, img)
        return False

    return False
Ejemplo n.º 3
0
def get_image(full_path, landmarks, transformer):
    img = align_face(full_path, landmarks)  # BGR
    img = img[..., ::-1]  # RGB
    img = Image.fromarray(img, 'RGB')  # RGB
    img = transformer(img)
    img = img.to(device)
    return img
Ejemplo n.º 4
0
def get_image(filename):
    _, _, landmarks = get_face_all_attributes(filename)
    img = align_face(filename, landmarks)
    img = transforms.ToPILImage()(img)
    img = transformer(img)
    img = img.to(device)
    return img
Ejemplo n.º 5
0
 def read_image(filename):
     img = cv2.imread(filename)
     rectangles = utils.detect_face(img)
     faces = []
     for rect in rectangles:
         landmarks = utils.align_face(img, rect)
         faces.append(((rect.left(), rect.top(), rect.right(),
                        rect.bottom()), landmarks))
     return img, faces
Ejemplo n.º 6
0
def save_aligned(old_fn, new_fn):
    old_fn = os.path.join(extracted_folder, old_fn)
    _, _, landmarks = get_central_face_attributes(old_fn)

    unaligned_img = read_image_from_file(old_fn)
    img = align_face(unaligned_img, landmarks)  # BGR

    # img = align_face(old_fn, landmarks)
    new_fn = os.path.join('images', new_fn)
    cv.imwrite(new_fn, img)
Ejemplo n.º 7
0
def crop_one_image(filepath, oldkey, newkey):
    new_fn = filepath.replace(oldkey, newkey)
    tardir = os.path.dirname(new_fn)
    if not os.path.isdir(tardir):
        os.makedirs(tardir)

    if not os.path.exists(new_fn):
        is_valid, bounding_boxes, landmarks = get_central_face_attributes(filepath)
        if is_valid:
            img = align_face(filepath, landmarks)
            cv.imwrite(new_fn, img)
Ejemplo n.º 8
0
def detect_face(data):
    from utils import get_central_face_attributes, align_face
    src_path = data['src_path']
    dst_path = data['dst_path']
    with torch.no_grad():
        has_face, bboxes, landmarks = get_central_face_attributes(src_path)
        if has_face:
            img = align_face(src_path, landmarks)
            cv.imwrite(dst_path, img)

    return True
Ejemplo n.º 9
0
def get_image(samples, transformer, file):
    filtered = [sample for sample in samples if file in sample['full_path'].replace('\\', '/')]
    assert (len(filtered) == 1), 'len(filtered): {} file:{}'.format(len(filtered), file)
    sample = filtered[0]
    full_path = sample['full_path']
    landmarks = sample['landmarks']
    img = align_face(full_path, landmarks)
    img = transforms.ToPILImage()(img)
    img = transformer(img)
    img = img.to(device)
    return img
 def load_image(self, filename):
     img = cv2.imread(filename)
     rectangles = utils.detect_face(img)
     if not rectangles:
         return False
     rect = rectangles[0]
     landmarks = utils.align_face(img, rect)
     self.img_rect = (rect.left(), rect.top(), rect.width(), rect.height())
     self.clear_sequence()
     self.add_to_sequence(img, landmarks)
     return True
Ejemplo n.º 11
0
def get_image(samples, file):
    filtered = [
        sample for sample in samples
        if file in sample['full_path'].replace('\\', '/')
    ]
    assert (len(filtered) == 1), 'len(filtered): {} file:{}'.format(
        len(filtered), file)
    sample = filtered[0]
    full_path = sample['full_path']
    landmarks = sample['landmarks']
    img = align_face(full_path, landmarks)  # BGR
    return img
Ejemplo n.º 12
0
    def crop(self, folder):
        if os.path.isdir(folder):
            files = os.listdir(folder)
            if not os.path.exists(folder + '_crop'):
                os.makedirs(folder + '_crop')
        else:
            raise ValueError("Folder is not exist")

        for file in files:
            filepath = os.path.join(folder, file)
            new_fn = os.path.join(folder + '_crop', file)
            bounding_boxes, landmarks = get_central_face_attributes(filepath)
            img = align_face(filepath, landmarks)
            cv2.imwrite(new_fn, img)
Ejemplo n.º 13
0
def detect_face(data):
    from utils import align_face

    src_path = data['src_path']
    dst_path = data['dst_path']
    with torch.no_grad():
        has_face, bboxes, landmarks = get_central_face_attributes(src_path)
        if has_face:
            img = align_face(src_path, landmarks)
            dirname = os.path.dirname(dst_path)
            os.makedirs(dirname, exist_ok=True)
            cv.imwrite(dst_path, img)

    return True
def get_model_compatible_input(gray_frame, face):
    img_arr = utils.align_face(gray_frame, face, desiredLeftEye)
    img_arr = utils.preprocess_img(img_arr, resize=False)

    landmarks = shape_predictor(
        gray_frame,
        face,
    )
    roi1, roi2 = utils.extract_roi1_roi2(gray_frame, landmarks)
    roi1 = np.expand_dims(roi1, 0)
    roi2 = np.expand_dims(roi2, 0)
    roi1 = roi1 / 255.
    roi2 = roi2 / 255.

    return [img_arr, roi1, roi2]
def get_image(samples, file):
    filtered = [
        sample for sample in samples
        if file in sample['full_path'].replace('\\', '/')
    ]
    assert (len(filtered) == 1), 'len(filtered): {} file:{}'.format(
        len(filtered), file)
    sample = filtered[0]
    full_path = sample['full_path']
    landmarks = sample['landmarks']
    img = align_face(full_path, landmarks)  # BGR
    name_path = os.path.dirname(full_path)
    image_name = "aligned_" + os.path.basename(full_path)
    cv.imwrite(os.path.join(name_path, image_name), img)
    return img
Ejemplo n.º 16
0
def get_image(samples, transformer, file):
    filtered = [sample for sample in samples if file in sample['full_path'].replace('\\', '/')]
    assert (len(filtered) == 1), 'len(filtered): {} file:{}'.format(len(filtered), file)
    sample = filtered[0]
    full_path = sample['full_path']
    landmarks = sample['landmarks']

    unaligned_img = read_image_from_file(full_path)
    img = align_face(unaligned_img, landmarks)  # BGR
    # img = blur_and_grayscale(img)
    # img = img[..., ::-1]  # RGB
    img = Image.fromarray(img, 'RGB')  # RGB
    img = transformer(img)
    img = img.to(device)
    return img
Ejemplo n.º 17
0
def show_align():
    with open(pickle_file, 'rb') as file:
        data = pickle.load(file)

    samples = random.sample(data['samples'], 10)

    for i, sample in enumerate(samples):
        full_path = sample['full_path']
        landmarks = sample['landmarks']
        raw = cv.imread(full_path)
        raw = cv.resize(raw, (224, 224))
        img = align_face(full_path, landmarks)
        filename = 'images/{}_raw.jpg'.format(i)
        cv.imwrite(filename, raw)
        filename = 'images/{}_img.jpg'.format(i)
        cv.imwrite(filename, img)
Ejemplo n.º 18
0
    def __getitem__(self, i):
        sample = self.samples[i]
        full_path = sample['full_path']
        landmarks = sample['landmarks']

        try:
            img = align_face(full_path, landmarks)
        except Exception:
            print('full_path: ' + full_path)
            raise

        img = transforms.ToPILImage()(img)
        img = self.transformer(img)

        class_id = sample['class_id']
        return img, class_id
Ejemplo n.º 19
0
def haar_detector(frame):
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    face_frame = np.zeros(gray_frame.shape, dtype="uint8")

    offset = 15
    x_pos, y_pos = 10, 40

    faces = cascade_detector.detectMultiScale(gray_frame, 1.32, 5)
    for idx, face in enumerate(faces):
        if hist_eq:
            gray_frame = cv2.equalizeHist(gray_frame)

        img_arr = utils.align_face(gray_frame, utils.bb_to_rect(face),
                                   desiredLeftEye)
        face_frame = cv2.resize(img_arr, (48, 48),
                                interpolation=cv2.INTER_CUBIC)
        img_arr = utils.preprocess_img(img_arr, resize=False)

        predicted_proba = model.predict(img_arr)
        predicted_label = np.argmax(predicted_proba[0])

        x, y, w, h = face
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        text = f"Person {idx+1}: {label2text[predicted_label]}"
        utils.draw_text_with_backgroud(frame, text, x + 5, y, font_scale=0.4)

        text = f"Person {idx+1} :  "
        y_pos = y_pos + 2 * offset
        utils.draw_text_with_backgroud(frame,
                                       text,
                                       x_pos,
                                       y_pos,
                                       font_scale=0.3,
                                       box_coords_2=(2, -2))
        for k, v in label2text.items():
            text = f"{v}: {round(predicted_proba[0][k]*100, 3)}%"
            y_pos = y_pos + offset
            utils.draw_text_with_backgroud(frame,
                                           text,
                                           x_pos,
                                           y_pos,
                                           font_scale=0.3,
                                           box_coords_2=(2, -2))
    return frame, face_frame
Ejemplo n.º 20
0
def dlib_detector(frame_orig):
    frame = frame_orig.copy()
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    offset = 15
    x_pos, y_pos = 10, 40

    faces = hog_detector(gray_frame)
    for idx, face in enumerate(faces):
        if hist_eq:
            gray_frame = cv2.equalizeHist(gray_frame)

        img_arr = utils.align_face(gray_frame, face, desiredLeftEye)
        img_arr = utils.preprocess_img(img_arr, resize=False)

        predicted_proba = model.predict(img_arr)
        predicted_label = np.argmax(predicted_proba[0])

        x, y, w, h = rect_to_bb(face)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        text = f"Person {idx+1}: {label2text[predicted_label]}"
        utils.draw_text_with_backgroud(frame, text, x + 5, y, font_scale=0.4)

        text = f"Person {idx+1} :  "
        y_pos = y_pos + 2 * offset
        utils.draw_text_with_backgroud(frame,
                                       text,
                                       x_pos,
                                       y_pos,
                                       font_scale=0.3,
                                       box_coords_2=(2, -2))
        for k, v in label2text.items():
            text = f"{v}: {round(predicted_proba[0][k]*100, 3)}%"
            y_pos = y_pos + offset
            utils.draw_text_with_backgroud(frame,
                                           text,
                                           x_pos,
                                           y_pos,
                                           font_scale=0.3,
                                           box_coords_2=(2, -2))

    return frame
Ejemplo n.º 21
0
def detect_face(data):
    from retinaface.detector import detect_faces
    from utils import select_significant_face, align_face

    src_path = data['src_path']
    dst_path = data['dst_path']

    img_raw = cv.imread(src_path)
    if img_raw is not None:
        img = resize(img_raw)
        bboxes, landmarks = detect_faces(img, top_k=5, keep_top_k=5)
        if len(bboxes) > 0:
            i = select_significant_face(bboxes)
            bbox, landms = bboxes[i], landmarks[i]
            img = align_face(img, [landms])
            dirname = os.path.dirname(dst_path)
            os.makedirs(dirname, exist_ok=True)
            cv.imwrite(dst_path, img)

    return True
Ejemplo n.º 22
0
def extract_oneface(image, marigin=16):
    # detecting faces
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    h, w, c = image.shape
    total_boxes, points = detect_face(image, 20, pnet, rnet, onet,
                                      [0.6, 0.7, 0.7], 0.709)
    for idx, (bounding_box, keypoints) in enumerate(zip(total_boxes,
                                                        points.T)):
        bounding_boxes = {
            'box': [
                int(bounding_box[0]),
                int(bounding_box[1]),
                int(bounding_box[2] - bounding_box[0]),
                int(bounding_box[3] - bounding_box[1])
            ],
            'confidence':
            bounding_box[-1],
            'keypoints': {
                'left_eye': (int(keypoints[0]), int(keypoints[5])),
                'right_eye': (int(keypoints[1]), int(keypoints[6])),
                'nose': (int(keypoints[2]), int(keypoints[7])),
                'mouth_left': (int(keypoints[3]), int(keypoints[8])),
                'mouth_right': (int(keypoints[4]), int(keypoints[9])),
            }
        }

        bounding_box = bounding_boxes['box']
        keypoints = bounding_boxes['keypoints']

        # align face and extract it out
        align_image = align_face(image, keypoints)
        align_image = cv2.cvtColor(align_image, cv2.COLOR_RGB2BGR)

        xmin = max(bounding_box[0] - marigin, 0)
        ymin = max(bounding_box[1] - marigin, 0)
        xmax = min(bounding_box[0] + bounding_box[2] + marigin, w)
        ymax = min(bounding_box[1] + bounding_box[3] + marigin, h)

        crop_image = align_image[ymin:ymax, xmin:xmax, :]
        # "just need only one face"
        return crop_image
Ejemplo n.º 23
0
def download(tokens, idx, num):
    url = tokens[0].replace('"', '').strip()
    left = float(tokens[1].strip())
    right = float(tokens[2].strip())
    top = float(tokens[3].strip())
    bottom = float(tokens[4].strip())

    filename = url[url.rfind("/") + 1:].strip()
    fullname = os.path.join(download_folder, filename)
    # if not os.path.isfile(fullname):
    #     process = Popen(["wget", '-N', url, "-P", download_folder], stdout=PIPE)
    #     (output, err) = process.communicate()
    #     exit_code = process.wait()

    filename = '{}_{}.jpg'.format(idx, num)
    filename = os.path.join(image_folder, filename)
    # print(filename)
    if os.path.isfile(filename) and os.path.getsize(filename) > 1000:
        img = cv.imread(filename)
        if img is not None:
            h, w = img.shape[:2]
            if h == 224 and w == 224:
                return filename

    if os.path.isfile(fullname) and os.path.getsize(fullname) > 1000:
        img = cv.imread(fullname)
        if img is not None:
            height, width = img.shape[:2]
            left, right = int(round(left * width)), int(round(right * width))
            top, bottom = int(round(top * height)), int(round(bottom * height))
            img = img[top:bottom, left:right, :]
            _, landmarks = detect_faces(img, top_k=1, keep_top_k=1)
            if len(landmarks) < 1:
                return None
            img = align_face(img, landmarks)
            cv.imwrite(filename, img)
            return filename

    return None
Ejemplo n.º 24
0
 def __getitem__(self, i):
     sample = self.samples[i]
     full_path = sample['full_path']
     landmarks = sample['landmarks']
     # img = cv.imread(full_path)
     img = align_face(full_path, landmarks)
     img = transforms.ToPILImage()(img)
     # print('img.size: ' + str(img.size))
     img = self.transformer(img)
     # print('img.size(): ' + str(img.size()))
     # loc = sample['face_location']
     # x1, y1, x2, y2 = loc[0], loc[1], loc[2], loc[3]
     # img = img[y1:y2, x1:x2]
     # img = cv.resize(img, (image_w, image_h))
     # print('img.shape: ' + str(img.shape))
     # img = img.transpose(2, 0, 1)
     # assert img.shape == (3, image_h, image_w)
     # assert np.max(img) <= 255
     # img = torch.FloatTensor(img / 255.)
     age = sample['age']
     gender = sample['gender']
     return img, age, gender
Ejemplo n.º 25
0
def detect_face(data):
    src_path = data['src_path']
    dst_path = data['dst_path']
    # print(src_path)

    img_raw = cv.imread(src_path)
    if img_raw is not None:
        img, _ = resize(img_raw)

        try:
            bboxes, landmarks = detector.detect_faces(img)

            if len(bboxes) > 0:
                bbox, landms = bboxes[0], landmarks[0]
                img = align_face(img, [landms])
                dirname = os.path.dirname(dst_path)
                os.makedirs(dirname, exist_ok=True)
                cv.imwrite(dst_path, img)
                return True

        except ValueError as err:
            print(err)

    return False
Ejemplo n.º 26
0
 for i in range(5, 15, 2):
     #test_face_lndmarks.append([int(bbox_lndmarks[i + 0] - left), int(bbox_lndmarks[i + 1]-top)])
     test_face_lndmarks.append([int(bbox_lndmarks[i + 0]), int(bbox_lndmarks[i + 1])])
     
 
 test_face_lndmarks = np.array(test_face_lndmarks)
 
 ### encoding test_face
 
 #test_face_img = cv2.resize(test_face_img, (96,96), interpolation=cv2.INTER_CUBIC)
 #normalized_face_img, normalized_face_lndmarks = normalize_faces_landmarks(out_img_size=(96,96),
 #                                                                          in_img=test_face_img,
 #                                                                          landmarks=test_face_lndmarks)
 
 normalized_face_img, normalized_face_lndmarks = align_face(out_img_size=(96,96),
                                                                           in_img=test_img,
                                                                           landmarks=test_face_lndmarks)
 
 aligned_faces_list.append(normalized_face_img)
 
 #test_face_encodings = img_to_encoding(test_face_img, faceNet)
 test_face_encodings = img_to_encoding(normalized_face_img, faceNet)
 dist, identity, matched_idx=compute_dist_label(reg_faces_encodings, index_dic, test_face_encodings, THRESHOLD=0.76)
 
 
 
 #cv2.imwrite(identity+".jpg", test_face_img)
 
 ######## drawing faces and labels or person names
 
 if(identity == "unknown"):
Ejemplo n.º 27
0
def save_aligned(old_fn, new_fn):
    old_fn = os.path.join('data/lfw_funneled', old_fn)
    _, landmarks = get_central_face_attributes(old_fn)
    img = align_face(old_fn, landmarks)
    new_fn = os.path.join('images', new_fn)
    cv.imwrite(new_fn, img)
Ejemplo n.º 28
0
def dnn_detector(frame):
    frame_height = frame.shape[0]
    frame_width = frame.shape[1]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123],
                                 False, False)

    net.setInput(blob)
    detections = net.forward()
    bboxes = []
    idx = 0
    offset = 15
    x_pos, y_pos = 10, 40

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    face_frame = np.zeros(gray_frame.shape, dtype="uint8")

    for i in range(detections.shape[2]):
        confidence = detections[0, 0, i, 2]
        if confidence > conf_threshold:
            idx += 1
            x1 = int(detections[0, 0, i, 3] * frame_width)
            y1 = int(detections[0, 0, i, 4] * frame_height)
            x2 = int(detections[0, 0, i, 5] * frame_width)
            y2 = int(detections[0, 0, i, 6] * frame_height)
            bboxes.append([x1, y1, x2, y2])

            face = [x1, y1, x2 - x1, y2 - y1]

            if hist_eq:
                gray_frame = cv2.equalizeHist(gray_frame)

            img_arr = utils.align_face(gray_frame, utils.bb_to_rect(face),
                                       desiredLeftEye)
            face_frame = cv2.resize(img_arr, (48, 48),
                                    interpolation=cv2.INTER_CUBIC)
            img_arr = utils.preprocess_img(img_arr, resize=False)

            predicted_proba = model.predict(img_arr)
            predicted_label = np.argmax(predicted_proba[0])

            cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
            text = f"Person {idx}: {label2text[predicted_label]}"
            utils.draw_text_with_backgroud(frame,
                                           text,
                                           x1 + 5,
                                           y1,
                                           font_scale=0.4)

            text = f"Person {idx} :  "
            y_pos = y_pos + 2 * offset
            utils.draw_text_with_backgroud(frame,
                                           text,
                                           x_pos,
                                           y_pos,
                                           font_scale=0.3,
                                           box_coords_2=(2, -2))
            for k, v in label2text.items():
                text = f"{v}: {round(predicted_proba[0][k]*100, 3)}%"
                y_pos = y_pos + offset
                utils.draw_text_with_backgroud(frame,
                                               text,
                                               x_pos,
                                               y_pos,
                                               font_scale=0.3,
                                               box_coords_2=(2, -2))
    return frame, face_frame
Ejemplo n.º 29
0
def save_encoded_faces_pickle(images_path, out_pic_file_path, faceNet):
    '''
      load face images from the images_path, detect faces, encode them and save the results pickle file

    '''
    threshold = [0.3, 0.6, 0.7]
    faceDetector_mtcnn = mtcnn()

    # prepare our registered people data
    # assuming images folder contain individual images in subfolders

    # each person images are put in corresponding subfolders, and each image of a person contains only face

    person_subfolders = []

    for p in os.listdir(images_path):
        ppath = os.path.join(images_path, p)
        if os.path.isdir(ppath):
            person_subfolders.append(ppath)

    #initial person name label Map
    name_label_map = {}
    # integer values of each person label
    labels = []

    person_image_paths = []

    for i, person_subfolder in enumerate(person_subfolders):
        for p in os.listdir(person_subfolder):
            ppath = os.path.join(person_subfolder, p)
            if p.endswith('jpg'):
                person_image_paths.append(ppath)
                labels.append(i)
                name_label_map[p] = person_subfolder

    # now process each person images one by one and encode them using the faceNet model
    # we will stored each encodings in face_encodings and their corresponding labels in index dictionary

    index_dic = {}
    i = 0
    faces_encoding = None

    for image_path in person_image_paths:
        debug("encoding face detected in : {}".format(image_path))

        image_cv = cv2.imread(image_path, 1)

        img_draw, face_imgs, face_bboxes, faces_landmarks = extract_draw_faces(
            faceDetector_mtcnn, image_cv, threshold)

        cv2.imshow("face image", img_draw)

        if len(face_imgs) == 0:
            continue

        face_lndmrk_draw = face_imgs[0].copy()

        lndmrks = np.array(faces_landmarks[0])

        cv2.putText(face_lndmrk_draw, '1', (lndmrks[0][0], lndmrks[0][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[0][0], lndmrks[0][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '2', (lndmrks[1][0], lndmrks[1][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[1][0], lndmrks[1][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '3', (lndmrks[2][0], lndmrks[2][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[2][0], lndmrks[2][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '4', (lndmrks[3][0], lndmrks[3][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[3][0], lndmrks[3][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '5', (lndmrks[4][0], lndmrks[4][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[4][0], lndmrks[4][1]), 2,
                   (0, 255, 0))

        cv2.imshow("face", face_lndmrk_draw)

        #face_cv =  cv2.resize(face_imgs[0], (96,96), interpolation=cv2.INTER_CUBIC)

        #cv2.imshow("face 96x96", face_cv)
        #aligned_face_cv, aligned_face_lndmarks = normalize_faces_landmarks(out_img_size=(96,96), in_img=face_imgs[0], landmarks=lndmrks)
        #aligned_face_cv, aligned_face_lndmarks = align_face(out_img_size=(96,96), in_img=face_imgs[0], landmarks=lndmrks)

        aligned_face_cv, aligned_face_lndmarks = align_face(out_img_size=(96,
                                                                          96),
                                                            in_img=image_cv,
                                                            landmarks=lndmrks)

        aligned_face_draw = aligned_face_cv.copy()

        cv2.putText(aligned_face_draw, '1',
                    (aligned_face_lndmarks[0][0], aligned_face_lndmarks[0][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(aligned_face_draw,
                   (aligned_face_lndmarks[0][0], aligned_face_lndmarks[0][1]),
                   2, (0, 255, 0))
        cv2.putText(aligned_face_draw, '2',
                    (aligned_face_lndmarks[1][0], aligned_face_lndmarks[1][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(aligned_face_draw,
                   (aligned_face_lndmarks[1][0], aligned_face_lndmarks[1][1]),
                   2, (0, 255, 0))

        cv2.imshow("aligned", aligned_face_draw)

        cv2.waitKey(0)

        # we have assumed that a single image will have only the face of the person

        # compute the encodings

        #f_encoding = img_to_encoding(face_cv, faceNet)

        f_encoding = img_to_encoding(aligned_face_cv, faceNet)

        if faces_encoding is None:
            faces_encoding = f_encoding
        else:
            faces_encoding = np.concatenate((faces_encoding, f_encoding),
                                            axis=0)

        # save the label for this face in index_dic
        # later, this will be used for verification or identification of a person
        index_dic[i] = image_path
        i = i + 1

    # save the face encodings and label index dict

    np.save(os.path.join(out_pic_file_path, 'faces_encoding.npy'),
            faces_encoding)

    with open(os.path.join(out_pic_file_path, 'index_dic.pkl'), 'wb') as f:
        cPickle.dump(index_dic, f)
Ejemplo n.º 30
0
def create_hdf5_facedataset(in_images_folder_path,
                            out_hdf5_dataset_fullPath_name, faceNet):
    '''
    
    Create hdf5 face dataset by registering each person face.
    The dataset contains the face images in opencv format, each image encodings obtained
    using faceNet, and labels/person names 
    
    Input:
        in_images_folder_path: path to a folder where each person images are stored subfolders
                        we assume that the subfolders, name represent face identity, on this path
                        contain images of a specific invidividual only,  image of a person contains only face
                        
        out_hdf5_dataset_fullName: full path of the dataset to be saved
        
        faceNet deep faceNet keras model
        
    '''

    person_subfolders = []

    for p in os.listdir(in_images_folder_path):
        ppath = os.path.join(in_images_folder_path, p)
        if os.path.isdir(ppath):
            person_subfolders.append(ppath)

    face_image_paths = []

    for i, person_subfolder in enumerate(person_subfolders):
        for filename in os.listdir(person_subfolder):
            img_path = os.path.join(person_subfolder, filename)
            if filename.endswith('jpg'):
                face_image_paths.append(img_path)

                #person_name = os.path.splitext(os.path.dirname(img_path).split('/')[-1] )[0]
                #print(person_name)

    print("Total face images", len(face_image_paths))
    image_shape = (len(face_image_paths), 96, 96, 3)
    encod_shape = (len(face_image_paths), 128)

    # open hdf5 file and create dataset
    hdf5_file = h5py.File(out_hdf5_dataset_fullPath_name, mode='w')

    #hdf5_file.create_dataset("faces_encoding", shape=encod_shape, dtype=np.float32)
    # create resizable dataset as expected there may be any face in the training face images
    hdf5_file.create_dataset("faces_encoding", (3000, 128),
                             maxshape=(None, 128),
                             dtype=np.float32)

    dt = h5py.special_dtype(vlen=str)
    #hdf5_file.create_dataset("index_dic",  shape=(len(face_image_paths),), dtype=dt)
    hdf5_file.create_dataset("index_dic", (3000, ),
                             maxshape=(None, ),
                             dtype=dt)

    #hdf5_file.create_dataset("face_imgs_cv", shape=image_shape,dtype=np.uint8)
    hdf5_file.create_dataset("face_imgs_cv", (3000, 96, 96, 3),
                             maxshape=(None, 96, 96, 3),
                             dtype=np.uint8)

    # define throshod for facedetection and load mtcnn detector
    threshold = [0.3, 0.6, 0.7]
    faceDetector_mtcnn = mtcnn()

    # now process each person images one by one and encode them using the faceNet model
    # we will stored each encodings in face_encodings and their corresponding labels in index dictionary
    img_cnt = -1
    for image_path in face_image_paths:
        print("encoding face detected in : {}".format(image_path))

        image_cv = cv2.imread(image_path, 1)

        img_draw, face_imgs, face_bboxes, faces_landmarks = extract_draw_faces(
            faceDetector_mtcnn, image_cv, threshold)

        if len(face_imgs) == 0:
            continue

        img_cnt += 1

        lndmrks = np.array(faces_landmarks[0])

        person_name = os.path.splitext(
            os.path.dirname(image_path).split('/')[-1])[0]

        #aligned_face_cv, aligned_face_lndmarks = normalize_faces_landmarks(out_img_size=(96,96), in_img=face_imgs[0], landmarks=lndmrks)
        aligned_face_cv, aligned_face_lndmarks = align_face(out_img_size=(96,
                                                                          96),
                                                            in_img=image_cv,
                                                            landmarks=lndmrks)

        f_encoding = img_to_encoding(aligned_face_cv, faceNet)

        print(f_encoding.shape)

        #im[None].shape ==>(1, 96, 96, 3)

        # writing data into dataset
        print("img_cnt", img_cnt)
        hdf5_file["face_imgs_cv"][img_cnt, ...] = aligned_face_cv[None]
        hdf5_file["faces_encoding"][img_cnt, ...] = f_encoding
        hdf5_file["index_dic"][img_cnt, ...] = person_name

        ## flip face vertical
        img_cnt += 1
        print("img_cnt", img_cnt)
        #flip_aligned_face_cv = cv2.flip(aligned_face_cv,1)
        #flip_lndmrks = np.flip(lndmrks, 1)
        print(lndmrks)
        flip_lndmrks = np.multiply(
            np.subtract(lndmrks, [image_cv.shape[1], 0]), [-1, 1])

        # after flip left and right position of eye changes
        print(flip_lndmrks)
        tmp = flip_lndmrks[0].copy()
        flip_lndmrks[0] = flip_lndmrks[1].copy()
        flip_lndmrks[1] = tmp.copy()

        tmp = flip_lndmrks[3].copy()
        flip_lndmrks[3] = flip_lndmrks[4].copy()
        flip_lndmrks[3] = tmp.copy()

        print(flip_lndmrks)

        flip_image_cv = cv2.flip(image_cv, 1)

        flip_aligned_face_cv, _ = align_face(out_img_size=(96, 96),
                                             in_img=flip_image_cv,
                                             landmarks=flip_lndmrks)

        flip_f_encoding = img_to_encoding(flip_aligned_face_cv, faceNet)

        # writing data into dataset
        hdf5_file["face_imgs_cv"][img_cnt, ...] = flip_aligned_face_cv[None]
        hdf5_file["faces_encoding"][img_cnt, ...] = flip_f_encoding
        hdf5_file["index_dic"][img_cnt, ...] = person_name

        ## draw on images

        face_lndmrk_draw = face_imgs[0].copy()

        cv2.putText(face_lndmrk_draw, '1', (lndmrks[0][0], lndmrks[0][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[0][0], lndmrks[0][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '2', (lndmrks[1][0], lndmrks[1][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[1][0], lndmrks[1][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '3', (lndmrks[2][0], lndmrks[2][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[2][0], lndmrks[2][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '4', (lndmrks[3][0], lndmrks[3][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[3][0], lndmrks[3][1]), 2,
                   (0, 255, 0))

        cv2.putText(face_lndmrk_draw, '5', (lndmrks[4][0], lndmrks[4][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(face_lndmrk_draw, (lndmrks[4][0], lndmrks[4][1]), 2,
                   (0, 255, 0))

        cv2.imshow("face", face_lndmrk_draw)

        aligned_face_draw = aligned_face_cv.copy()

        cv2.putText(aligned_face_draw, '1',
                    (aligned_face_lndmarks[0][0], aligned_face_lndmarks[0][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(aligned_face_draw,
                   (aligned_face_lndmarks[0][0], aligned_face_lndmarks[0][1]),
                   2, (0, 255, 0))
        cv2.putText(aligned_face_draw, '2',
                    (aligned_face_lndmarks[1][0], aligned_face_lndmarks[1][1]),
                    cv2.FONT_ITALIC, 0.4, (0, 0, 255), 1)
        cv2.circle(aligned_face_draw,
                   (aligned_face_lndmarks[1][0], aligned_face_lndmarks[1][1]),
                   2, (0, 255, 0))

        cv2.imshow("aligned", aligned_face_draw)

        cv2.imshow("face image", img_draw)

        debug("face image of {}".format(person_name))
        if (cv2.waitKey(100) == ord('q')):
            break

    # resize datasets according to total count img_cnt

    hdf5_file["faces_encoding"].resize((img_cnt, 128))
    hdf5_file["index_dic"].resize((img_cnt, ))
    hdf5_file["face_imgs_cv"].resize((img_cnt, 96, 96, 3))

    hdf5_file.close()