Exemple #1
0
def of_dataset(folder="testset", model=None, view=False):
    '''measure the error across the given dataset,
    it compares the measured points with the annotated ground truth,
    optionally you can [view] the results'''
    assert (model)

    # load face and landmark detectors
    utils.load_shape_predictor(model)
    # utils.init_face_detector(True, 150)

    # init average-error
    err = 0
    num = 0

    for img, lmarks, path in utils.ibug_dataset(folder):
        # detections
        face = utils.prominent_face(utils.detect_faces(img, detector="dlib"))
        measured = utils.detect_landmarks(img, face)

        # get error
        num += 1
        err += normalized_root_mean_square(lmarks, measured)

        # results:
        if view is True:
            utils.draw_rect(img, face, color=Colors.yellow)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.draw_points(img, measured, color=Colors.red)
            utils.show_image(utils.show_properly(utils.crop_image(img, face)))

    print(err, num, err / num)
    print("average NRMS Error for {} is {}".format(folder, err / num))
Exemple #2
0
def build_trainset(input_dir="data", output_dir="trainset", win_size=321):
    '''scan the input folder and put every image with annotations
    output folder'''
    utils.init_face_detector(True, win_size)
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    count = int(utils.count_files_inside(output_dir) / 8)
    window = "window"
    cv2.namedWindow(window)

    for face, box in utils.faces_inside(input_dir, 1, True):
        face_copy = face.copy()
        face_rect = region(face)

        # detections
        points = utils.detect_landmarks(face, face_rect)
        utils.draw_points(face, points)

        # show face
        while (1):
            h, w = face.shape[:2]

            if h > 0 and w > 0:
                cv2.imshow(window, show_properly(face))
            else:
                break

            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break  # skip current face

            elif key == Keys.S:
                path = f"{output_dir}/face{count}"

                # save image
                cv2.imwrite(f"{path}.jpg", face_copy)

                # save annotation relative to the current face
                Annotation(f"{path}.ann", face_rect, points).save()

                # generate and save augumetations
                array = augment_data(face_copy, face_rect, points)

                for i, x in enumerate(array):
                    # save image x[0]
                    cv2.imwrite(f"{path}_{i + 1}.jpg", x[0])

                    # save annotations
                    Annotation(f"{path}_{i + 1}.ann", face_rect, x[1]).save()

                count = count + 1
                break

            elif key == Keys.Q:
                # quit program
                return cv2.destroyAllWindows()

    cv2.destroyAllWindows()
Exemple #3
0
def compare_models(folder="testset", m1=None, m2=None, view=False):
    '''compare the [m1] shape_predictor aganist the [m2] model,
    optionally you can [view] the results'''
    assert (m1 and m2)

    utils.init_face_detector(True, 150)

    # load models
    utils.load_shape_predictor(m2)
    sp_m2 = utils.shape_predictor

    utils.load_shape_predictor(m1)
    sp_m1 = utils.shape_predictor

    # init error
    err = 0
    num = 0

    for face, region in utils.faces_inside(folder):
        h, w = face.shape[:2]
        if h == 0 or w == 0:
            continue

        box = utils.Region(0, 0, region.width, region.height)
        # detect landmarks
        utils.shape_predictor = sp_m1
        lmarks_m1 = utils.detect_landmarks(face, box)

        utils.shape_predictor = sp_m2
        lmarks_m2 = utils.detect_landmarks(face, box)

        # update error:
        num += 1
        # err += normalized_root_mean_square(lmarks_m1, lmarks_m2)

        # results:
        if view is True:
            utils.draw_points(face, lmarks_m1, color=Colors.green)
            utils.draw_points(face, lmarks_m2, color=Colors.red)
            utils.show_image(utils.show_properly(face))

    if num != 0:
        err /= num

    print("the NRMSE of m1 aganist m2 is {}".format(err))
Exemple #4
0
def test(folder="testset", model="dlib/shape_predictor_68_face_landmarks.dat"):
    utils.init_face_detector(True, 150)
    utils.load_shape_predictor(model)
    my_sp = utils.shape_predictor
    dlib_sp = dlib.shape_predictor(
        "dlib/shape_predictor_68_face_landmarks.dat")

    for face, r in utils.faces_inside(folder):
        box = region(face)

        utils.shape_predictor = my_sp
        lmarks0 = utils.detect_landmarks(face, box)

        utils.shape_predictor = dlib_spq
        lmarks1 = utils.detect_landmarks(face, box)

        # draw results
        utils.draw_points(face, lmarks1, color=Colors.green)
        utils.draw_points(face, lmarks0, color=Colors.red)
        utils.show_image(show_properly(face))
Exemple #5
0
def test_augment():
    utils.init_face_detector(True, 321)
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    for img in utils.images_inside("trainset"):
        points = utils.detect_landmarks(img, region(img))

        angle = 30
        h, w = img.shape[:2]
        center = (w / 2, h / 2)

        # 30 degree rotation
        rot1 = utils.rotate_image(img, angle)
        rot_pts1 = utils.rotate_landmarks(points, center, angle)

        # -30 degree rotatation
        rot2 = utils.rotate_image(img, -angle)
        rot_pts2 = utils.rotate_landmarks(points, center, -angle)

        # mirroring
        mir = utils.flip_image(img)
        mir_pts = utils.detect_landmarks(mir, region(mir))

        utils.draw_points(img, points)
        utils.draw_points(rot1, rot_pts1, color=Colors.cyan)
        utils.draw_points(rot2, rot_pts2, color=Colors.purple)
        utils.draw_points(mir, mir_pts, color=Colors.green)

        while True:
            cv2.imshow("image", img)
            cv2.imshow("mirrored", mir)
            cv2.imshow("rotated30", rot1)
            cv2.imshow("rotated-30", rot2)

            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break
            elif key == Keys.Q:
                return cv2.destroyAllWindows()
Exemple #6
0
def another_test():
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    for img, p in utils.images_inside("uffa"):
        fast = cv2.FastFeatureDetector_create()
        # kp = fast.detect(img, None)

        # draws:
        face = utils.detect_faces(img, detector="dlib")[0]
        # utils.draw_rect(img, face, color=Colors.green)

        pts = utils.detect_landmarks(img, face)
        pts = [pts[0], pts[3], pts[6], pts[10], pts[20], pts[22], pts[35]]
        # utils.draw_points(img, pts)

        # img = cv2.drawKeypoints(img, kp, None, color=Colors.cyan)

        keypoints = []

        for p in pts:
            roi = Region(p[0] - 10, p[1] - 10, 20, 20)
            patch = utils.crop_image(img, roi)
            keypoints.append(fast.detect(patch, None))

        # for kp in keypoints[2]:
        #     print(kp)
        #     # img = cv2.drawKeypoints(img, kp, None, color=Colors.cyan)

        for p in pts:
            for kp in keypoints:
                for k in kp:
                    x = int(k.pt[0] + p[0])
                    y = int(k.pt[1] + p[1])
                    utils.draw_point(img, x, y, radius=1)

        while True:
            cv2.imshow("window", show_properly(img))
            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break
Exemple #7
0
        y1, x1, y2, x2 = int(y - 0.5 * edge), int(x - 0.5 * edge), int(
            y + 0.5 * edge), int(x + 0.5 * edge)

        if y1 < 0 or x1 < 0 or y2 > ishape[0] or x2 > ishape[1]:
            continue

        iobject = pix[y1:y2, x1:x2, :]
        iobject = transform.resize(image=iobject, output_shape=[112, 112])
        igreyobject = np.mean(iobject, axis=-1, keepdims=True)
        igreyobject = igreyobject / np.max(igreyobject)
        igreyobject = igreyobject * 255
        igreyobject = np.array(igreyobject, dtype='int32')

        ya, xa, yb, xb, yc, xc, yd, xd, ye, xe = detect_landmarks(
            interpreter=interpreter_,
            input_details=input_details_,
            output_details=output_details_,
            pix=igreyobject,
            ishape=[112, 112, 1])  # (5, h, w)

        wider_iobject = pix[y1 - int(0.2 * (y2 - y1)):y2 + int(0.2 *
                                                               (y2 - y1)), x1 -
                            int(0.2 * (x2 - x1)):x2 + int(0.2 * (x2 - x1)), :]
        if ya != 0 and yb != 0 and yc != 0 and yd != 0 and ye != 0 and wider_iobject.shape[
                0] == wider_iobject.shape[1] and wider_iobject.shape[0] != 0:
            wider_iobject = transform.resize(image=wider_iobject,
                                             output_shape=[112, 112])
            cv2.imwrite(output_path + '/objects/' + str(face_idx) + '.jpg',
                        np.array(wider_iobject, dtype='uint8'))
            face_idx += 1

        eye = ya != 0 or yb != 0