コード例 #1
0
def build_trainset(input_dir="data", output_dir="trainset", win_size=321):
    '''scan the input folder and put every image with annotations
    output folder'''
    utils.init_face_detector(True, win_size)
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    count = int(utils.count_files_inside(output_dir) / 8)
    window = "window"
    cv2.namedWindow(window)

    for face, box in utils.faces_inside(input_dir, 1, True):
        face_copy = face.copy()
        face_rect = region(face)

        # detections
        points = utils.detect_landmarks(face, face_rect)
        utils.draw_points(face, points)

        # show face
        while (1):
            h, w = face.shape[:2]

            if h > 0 and w > 0:
                cv2.imshow(window, show_properly(face))
            else:
                break

            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break  # skip current face

            elif key == Keys.S:
                path = f"{output_dir}/face{count}"

                # save image
                cv2.imwrite(f"{path}.jpg", face_copy)

                # save annotation relative to the current face
                Annotation(f"{path}.ann", face_rect, points).save()

                # generate and save augumetations
                array = augment_data(face_copy, face_rect, points)

                for i, x in enumerate(array):
                    # save image x[0]
                    cv2.imwrite(f"{path}_{i + 1}.jpg", x[0])

                    # save annotations
                    Annotation(f"{path}_{i + 1}.ann", face_rect, x[1]).save()

                count = count + 1
                break

            elif key == Keys.Q:
                # quit program
                return cv2.destroyAllWindows()

    cv2.destroyAllWindows()
コード例 #2
0
ファイル: error.py プロジェクト: shalevy1/imgann
def compare_models(folder="testset", m1=None, m2=None, view=False):
    '''compare the [m1] shape_predictor aganist the [m2] model,
    optionally you can [view] the results'''
    assert (m1 and m2)

    utils.init_face_detector(True, 150)

    # load models
    utils.load_shape_predictor(m2)
    sp_m2 = utils.shape_predictor

    utils.load_shape_predictor(m1)
    sp_m1 = utils.shape_predictor

    # init error
    err = 0
    num = 0

    for face, region in utils.faces_inside(folder):
        h, w = face.shape[:2]
        if h == 0 or w == 0:
            continue

        box = utils.Region(0, 0, region.width, region.height)
        # detect landmarks
        utils.shape_predictor = sp_m1
        lmarks_m1 = utils.detect_landmarks(face, box)

        utils.shape_predictor = sp_m2
        lmarks_m2 = utils.detect_landmarks(face, box)

        # update error:
        num += 1
        # err += normalized_root_mean_square(lmarks_m1, lmarks_m2)

        # results:
        if view is True:
            utils.draw_points(face, lmarks_m1, color=Colors.green)
            utils.draw_points(face, lmarks_m2, color=Colors.red)
            utils.show_image(utils.show_properly(face))

    if num != 0:
        err /= num

    print("the NRMSE of m1 aganist m2 is {}".format(err))
コード例 #3
0
def test(folder="testset", model="dlib/shape_predictor_68_face_landmarks.dat"):
    utils.init_face_detector(True, 150)
    utils.load_shape_predictor(model)
    my_sp = utils.shape_predictor
    dlib_sp = dlib.shape_predictor(
        "dlib/shape_predictor_68_face_landmarks.dat")

    for face, r in utils.faces_inside(folder):
        box = region(face)

        utils.shape_predictor = my_sp
        lmarks0 = utils.detect_landmarks(face, box)

        utils.shape_predictor = dlib_spq
        lmarks1 = utils.detect_landmarks(face, box)

        # draw results
        utils.draw_points(face, lmarks1, color=Colors.green)
        utils.draw_points(face, lmarks0, color=Colors.red)
        utils.show_image(show_properly(face))
コード例 #4
0
def test_augment():
    utils.init_face_detector(True, 321)
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    for img in utils.images_inside("trainset"):
        points = utils.detect_landmarks(img, region(img))

        angle = 30
        h, w = img.shape[:2]
        center = (w / 2, h / 2)

        # 30 degree rotation
        rot1 = utils.rotate_image(img, angle)
        rot_pts1 = utils.rotate_landmarks(points, center, angle)

        # -30 degree rotatation
        rot2 = utils.rotate_image(img, -angle)
        rot_pts2 = utils.rotate_landmarks(points, center, -angle)

        # mirroring
        mir = utils.flip_image(img)
        mir_pts = utils.detect_landmarks(mir, region(mir))

        utils.draw_points(img, points)
        utils.draw_points(rot1, rot_pts1, color=Colors.cyan)
        utils.draw_points(rot2, rot_pts2, color=Colors.purple)
        utils.draw_points(mir, mir_pts, color=Colors.green)

        while True:
            cv2.imshow("image", img)
            cv2.imshow("mirrored", mir)
            cv2.imshow("rotated30", rot1)
            cv2.imshow("rotated-30", rot2)

            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break
            elif key == Keys.Q:
                return cv2.destroyAllWindows()
コード例 #5
0
def main():
    global image, boxes, points

    # getting arguments from cli
    args = utils.cli_arguments()
    out = utils.open_file(args)
    img_dir = args["dir"]
    auto_landm = args["land"] is not None
    auto_faces = args["auto"]
    mirror_points = args["mirror"]

    if args["train"] is True:
        # skip everything and train the model
        return utils.train_model(out.path, args["train"])

    # cnn face detector
    utils.init_face_detector(args)

    # load shape predictor if requested
    if auto_faces and auto_landm:
        global predictor
        predictor = dlib.shape_predictor(args["land"])

    # recover last state (if append is true)
    resume, lastpath = utils.load_state(args["append"])

    for file in os.listdir(img_dir):
        # consider only images
        if not is_image(file):
            continue

        # avoid mirrored images
        if is_mirrored(file):
            continue

        # load image:
        path = os.path.join(img_dir, file)

        # trying to resume from the image located at lastpath
        if resume:
            if path == lastpath:
                resume = False
            else:
                continue

        image = cv2.imread(path)

        # clear: stack, boxes and points
        stack.clear()
        boxes.clear()
        points.clear()

        # automatically detect faces
        if auto_faces:
            draw_face(image)

            # and landmarks
            if auto_landm:
                detect_landmarks(image, boxes[-1])

        stack.append(image)

        # create a window with disabled menu when right-clicking with the mouse
        window = file
        cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_GUI_NORMAL)

        # mouse callback to window
        cv2.setMouseCallback(window, mouse_callback)

        # removing or skipping the current image without affecting output file
        skipped = False
        removed = False

        # showing image until esc is pressed
        while (1):
            cv2.imshow(window, image)
            key = cv2.waitKey(20) & 0Xff

            # listen to key events
            if key == KEY_ESC:
                break
            elif key == KEY_S:
                skipped = True
                break
            elif key == KEY_R:
                removed = True
                utils.delete_image(path)
                break
            elif key == KEY_Q:
                return quit(path, out)

        if not (skipped or removed):
            # clear point log
            print()

            # write annotations
            add_entry(out, path, boxes, points, mirror_points)

        # close window
        cv2.destroyAllWindows()

    # delete checkpoint file and close output file
    utils.delete_state()
    out.close()
コード例 #6
0
def build_trainset_auto(src="dataset", dst="trainset", debug=False):
    '''build a trainset automatically from an ibug-like dataset,
    the images are taken from [src] folder and saved to [dst] folder'''
    utils.init_face_detector(True, 150)
    qualiy = [int(cv2.IMWRITE_JPEG_QUALITY), 50]

    # file count for naming
    count = int(utils.count_files_inside(dst) / 8)

    for img, lmarks, path in utils.ibug_dataset(src):
        h, w = img.shape[:2]

        # crop a bigger region around landmarks
        region = utils.points_region(lmarks)
        scaled = region.scale(1.8, 1.8).ensure(w, h)

        img = utils.crop_image(img, scaled)

        # detect faces
        face = utils.prominent_face(utils.detect_faces(img))

        # if cnn fails try with dlib
        if face is None:
            faces = utils.detect_faces(img, detector="dlib")

            # ..if dlib fails take the region around landmarks
            if face is None:
                face = region.copy()
            else:
                face = utils.prominent_face(faces)

        # edit landmarks according to scaled region
        lmarks = adjust_landmarks(scaled, lmarks)

        # augumentations
        i = 0
        for image, landmarks, box in augment_data(img, face, lmarks):
            i = i + 1

            if debug:
                utils.draw_rect(image, box, color=Colors.yellow)
                utils.draw_points(image, landmarks, color=Colors.purple)
                name = f"image{i}"
                utils.show_image(show_properly(image), window=name)
            else:
                # save annotation and image
                ipath = os.path.join(dst, f"face{count}_{i}.jpg")
                apath = os.path.join(dst, f"face{count}_{i}.ann")
                cv2.imwrite(ipath, image, qualiy)
                Annotation(apath, box.as_list()[:4], landmarks).save()

        if debug:
            utils.draw_rect(img, face, color=Colors.red)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.show_image(show_properly(img))
        else:
            # save image and annotation
            ipath = os.path.join(dst, f"face{count}.jpg")
            apath = os.path.join(dst, f"face{count}.ann")
            cv2.imwrite(ipath, img, qualiy)
            Annotation(apath, face.as_list()[:4], lmarks).save()

        count = count + 1

        # info
        print("{} processed: {}\r".format(count, ipath))