Example #1
0
def get_landmark_points(mesh, img_shape=(320, 240), verbose=False):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = align_mesh_to_template(mesh, load_template()).apply(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError("Expected to find one face - found {}".format(
            len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape.from_mask(LANDMARK_MASK)

    # test to see if the landmark fell on the 3D surface or not
    occlusion_mask = img.mask.sample(img_lms).ravel()

    img.landmarks['__lsfm_on_surface'] = img_lms.from_mask(occlusion_mask)
    img.landmarks['__lsfm_off_surface'] = img_lms.from_mask(~occlusion_mask)
    return PointCloud(shape_img.sample(img.landmarks['__lsfm_on_surface']).T)
Example #2
0
    def getResulsFromGrayNdArray(self, fitter, ndArray):

        processor = ImageProcessor()
        grayImage255x255 = processor.processGrayImageForDisplay(ndArray)
        grayMioImage = menpo.image.Image(grayImage255x255)
        image = grayMioImage

        # Load detector
        detect = load_dlib_frontal_face_detector()

        # Detect
        bboxes = detect(image)
        print("{} detected faces.".format(len(bboxes)))

        # View
        if len(bboxes) > 0:
            image.view_landmarks(group='dlib_0',
                                 line_colour='red',
                                 render_markers=False,
                                 line_width=4)

        if (len(bboxes) == 0):
            return []

        # initial bbox
        initial_bbox = bboxes[0]

        result = fitter.fit_from_bb(image, initial_bbox, max_iters=[15, 5])

        return result
Example #3
0
def test_model(model, test_images, num_init):
    face_detector = menpodetect.load_dlib_frontal_face_detector()
    test_gt_shapes = util.get_gt_shapes(test_images)
    test_boxes = util.get_bounding_boxes(test_images, test_gt_shapes, face_detector)

    initial_errors = []
    final_errors = []

    initial_shapes = []
    final_shapes = []

    for k, (im, gt_shape, box) in enumerate(zip(test_images, test_gt_shapes, test_boxes)):
        init_shapes, fin_shapes = model.apply(im, ([box], num_init, None))

        init_shape = util.get_median_shape(init_shapes)
        final_shape = fin_shapes[0]

        initial_shapes.append(init_shape)
        final_shapes.append(final_shape)

        initial_errors.append(compute_error(init_shape, gt_shape))
        final_errors.append(compute_error(final_shape, gt_shape))

        print_dynamic('{}/{}'.format(k + 1, len(test_images)))

    return initial_errors, final_errors, initial_shapes, final_shapes
Example #4
0
def landmark_template(mesh, img_shape=(320, 240), verbose=False):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = prepare_template_reference_space(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError("Expected to find one face - found {}".format(
            len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape

    # test to see if the landmark fell on the 3D surface or not
    mesh.landmarks["ibug68"] = PointCloud(Image.sample(shape_img, img_lms).T)
    mask = np.zeros(68, dtype=np.bool)
    mask[30] = True
    mesh.landmarks["nosetip"] = mesh.landmarks["ibug68"].lms.from_mask(mask)
Example #5
0
    def _preload_dlib_detector_fitter(self):
        from menpofit.dlib import DlibWrapper
        from menpodetect import load_dlib_frontal_face_detector

        from os import path
        dir_ = path.dirname(__file__)
        self._fitter = DlibWrapper(path.join(dir_, '../pretrained/shape_predictor_68_face_landmarks.dat'))
        self._detect = load_dlib_frontal_face_detector()
Example #6
0
def fit_all(model_builder, train_images, test_images, num_init):
    face_detector = menpodetect.load_dlib_frontal_face_detector()

    train_gt_shapes = util.get_gt_shapes(train_images)
    train_boxes = util.get_bounding_boxes(train_images, train_gt_shapes, face_detector)

    model = model_builder.build(train_images, train_gt_shapes, train_boxes)

    initial_errors, final_errors, initial_shapes, final_shapes = test_model(model, test_images, num_init)

    return initial_errors, final_errors, initial_shapes, final_shapes, model
Example #7
0
def landmark_mesh(mesh, img_shape=(320, 240), verbose=False, template_fn=None):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = align_mesh_to_template(mesh, load_template(template_fn)).apply(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError(
            "Expected to find one face - found {}".format(len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape.from_mask(LANDMARK_MASK)

    # test to see if the landmark fell on the 3D surface or not
    occlusion_mask = img.mask.sample(img_lms).ravel()

    img.landmarks['__lsfm_on_surface'] = img_lms.from_mask(occlusion_mask)
    img.landmarks['__lsfm_off_surface'] = img_lms.from_mask(~occlusion_mask)
    return_dict = {
        'landmarks_2d': img_lms,
        'occlusion_mask': occlusion_mask,
        'landmarks_3d_masked': PointCloud(shape_img.sample(
            img.landmarks['__lsfm_on_surface']).T)
    }

    if (~occlusion_mask).sum() != 0:
        groups = ['dlib_0', '__lsfm_on_surface', '__lsfm_off_surface']
        marker_edge_colours = ['blue', 'yellow', 'red']
    else:
        groups = ['dlib_0', '__lsfm_on_surface']
        marker_edge_colours = ['blue', 'yellow']

    lm_img = img.rasterize_landmarks(group=groups,
                                     line_colour='blue',
                                     marker_edge_colour=marker_edge_colours)
    return_dict['landmarked_image'] = lm_img

    return return_dict
Example #8
0
def get_landmarks(image_filepath: str,
                  bb_index: int = 0,
                  do_plot: bool = True) -> Image:
    assert os.path.isfile(image_filepath), f"img not found: {image_filepath}"
    img = mio.import_image(image_filepath)

    #

    detector = load_dlib_frontal_face_detector()
    bbs = detector(img)

    assert bb_index <= len(
        bbs) - 1, f"Too few bbs found to satisfy bb_index: {bb_index}"

    # order bbs based on x position

    x_bb = list()
    for bb in bbs:
        x_bb.append(np.mean(bb.points[:, 0]))

    x_bb_index = np.argsort(x_bb)

    #

    fitter = load_balanced_frontal_face_fitter()
    lm_result = fitter.fit_from_bb(img, bbs[x_bb_index[bb_index]])

    tags = [tag for tag in img.landmarks]
    for tag in tags:
        img.landmarks.pop(tag)

    img.landmarks['ibug_0'] = lm_result.final_shape

    #

    plt.figure()
    if do_plot:
        img.view()
        bbs[bb_index].view()
        lm_result.view()

    return img
Example #9
0
    def fittingByUrlWithGtShape(self, fitter, path_to_picture):

        image = mio.import_image(path_to_picture)
        image = image.as_greyscale()

        # print(image)

        # Load detector
        detect = load_dlib_frontal_face_detector()

        # Detect
        bboxes = detect(image)
        print("{} detected faces.".format(len(bboxes)))

        # View
        if len(bboxes) > 0:
            image.view_landmarks(group='dlib_0',
                                 line_colour='red',
                                 render_markers=False,
                                 line_width=4)

        # initial bbox
        initial_bbox = bboxes[0]

        print("len landmarks")
        print(bboxes[0].landmarks)
        print(bboxes[0].landmarks.values())
        print(len(bboxes[0].landmarks))
        print(bboxes[0].landmarks.n_dims)
        print(bboxes[0].landmarks.n_groups)
        print(bboxes[0].landmarks.view_widget)

        len(initial_bbox.landmarks)

        # fit image'
        result = fitter.fit_from_bb(image,
                                    initial_bbox,
                                    max_iters=[15, 5],
                                    gt_shape=image.landmarks['PTS'].lms)
        print(result)

        return result
Example #10
0
    def getResulsFromColorImage(self, fitter, image):

        image = image.as_greyscale()

        # print(image)

        # Load detector
        detect = load_dlib_frontal_face_detector()

        # Detect
        bboxes = detect(image)
        print("{} detected faces.".format(len(bboxes)))

        # View
        if len(bboxes) > 0:
            image.view_landmarks(group='dlib_0',
                                 line_colour='red',
                                 render_markers=False,
                                 line_width=4)

        # initial bbox
        initial_bbox = bboxes[0]
        '''
        print("len landmarks")
        print(bboxes[0].landmarks)
        print(bboxes[0].landmarks.values())
        print(len(bboxes[0].landmarks))
        print(bboxes[0].landmarks.n_dims)
        print(bboxes[0].landmarks.n_groups)
        print(bboxes[0].landmarks.view_widget)

        len(initial_bbox.landmarks)
        '''

        # fit image'
        # result = fitter.fit_from_bb(image, initial_bbox, max_iters=[15, 5],
        #                             gt_shape=image.landmarks['PTS'].lms)

        result = fitter.fit_from_bb(image, initial_bbox, max_iters=[15, 5])

        return result
def process_data(dir):
    dirlist = os.listdir(dir)
    dirlist = [d for d in dirlist if not os.path.isfile(os.path.join(dir, d))]
    dirlist = sorted(dirlist)
    # load the aam model
    aam = mio.import_pickle("aam.pkl")
    # create fiiter
    fitter = LucasKanadeAAMFitter(aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=16,
                                  n_appearance=104)
    # Load detector
    detector = load_dlib_frontal_face_detector()
    #load the sentences
    for j, subdir in enumerate(dirlist):
        ids = os.listdir(os.path.join(dir, subdir, "video/"))
        ids = [i for i in ids if "head" not in i]
        ids = sorted(ids)
        for k, id in enumerate(ids):
            t = time.time()
            video = os.path.join(dir, subdir, "video", id + '/')
            process_one_sentence(video, fitter, detector)
            print(j, k, time.time() - t)
Example #12
0
#加载保存的模型
fitter = mio.import_pickle('pretrained12131_aam.pkl')()
pred_images = []
# load landmarked images
for i in mio.import_images(image_path_pred, max_images=None, verbose=True):
    # convert it to grayscale if needed
    if i.n_channels == 3:
        i = i.as_greyscale(mode='luminosity')

    # append it to the list
    pred_images.append(i)
png_list = file_name_except_format(image_path_pred)
cnt = 0
for i in pred_images:
    # Load detector
    detect = load_dlib_frontal_face_detector()
    # Detect
    bboxes = detect(i)
    print("{} detected faces.".format(len(bboxes)))
    # initial bbox
    # initial_bbox = bboxes[0]
    import numpy as np
    imHei = i.height
    imWid = i.width
    adjacency_matrix = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1],
                                 [1, 0, 0, 0]])
    points = np.array([[0, 0], [imWid, 0], [imWid, imHei], [0, imHei]])
    graph = PointDirectedGraph(points, adjacency_matrix)
    # fit image
    # result = fitter.fit_from_bb(i, initial_bbox, max_iters=[15, 5],
    #                             gt_shape=None)
Example #13
0
from utils import mkdir_p, check_if_path, Logger
from utils.path_and_folder_definition import *  # import paths for databases, folders and libraries
from utils.pipeline_aux import (check_img_type, im_read_greyscale,
                                check_initial_path)
from utils.clip import Clip
from dlib import shape_predictor
from menpo.io import export_landmark_file
from menpo.shape import PointCloud
from menpodetect.dlib.conversion import pointgraph_to_rect
from menpodetect import load_dlib_frontal_face_detector
from joblib import Parallel, delayed

dlib_init_detector = load_dlib_frontal_face_detector()
predictor_dlib = shape_predictor(path_shape_pred)
# define a lambda function that accepts the image, along with the bounding box
# and returns the landmark localisation outcome.
f = lambda imp, ln_g: detection_to_pointgraph(
    predictor_dlib(imp, pointgraph_to_rect(ln_g)))


def main_for_generic_detector(path_clips, out_bb_fol, out_landmarks_fol):
    """
    Main function for the generic detection step.
    Processes a batch of clips in the same folder. Creates the dictionary with the paths, calls
    the processing per clip.
    :param path_clips:      str: Base path that contains the frames/lns folders.
    :param out_bb_fol:      str: Folder name for exporting the bounding box of the detection.
    :param out_landmarks_fol: str: Folder name for exporting the landmarks of the predictor.
    :return:
    """
    # define a dictionary for the paths
Example #14
0
dirname = path.dirname(path.abspath(__file__))
site.addsitedir(path.join(dirname, '..'))
from facefit.ert.tree import RegressionTree

import cv2
import menpo
import hickle
import menpodetect

def add_landmarks(mat, shape):
    for i in xrange(0, 68):
        cv2.circle(mat, center=(int(shape.points[i][1]), int(shape.points[i][0])), radius=3, color=(0,255,0), thickness=-1)

model = hickle.load(sys.argv[1], safe=False)
face_detector = menpodetect.load_dlib_frontal_face_detector()

WIDTH=640
HEIGHT=480

cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, WIDTH)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, HEIGHT)

# Num of perturbations of the initial shape within a bounding box.
n_inits = 1

ret, orig = cap.read()
orig_menpo = menpo.image.Image(orig.mean(axis=2)/255.0)

while True:
Example #15
0
def load_dlib_detector():
    from menpodetect import load_dlib_frontal_face_detector
    dlib_detector = load_dlib_frontal_face_detector()
    return partial(dlib_detector, greyscale=False)
Example #16
0
    def Train(self):

        self.face_detector = load_dlib_frontal_face_detector()
Example #17
0
def load_detector():
    return load_dlib_frontal_face_detector()
Example #18
0
import menpo
import hickle
import menpodetect


def add_landmarks(mat, shape):
    for i in xrange(0, 68):
        cv2.circle(mat,
                   center=(int(shape.points[i][1]), int(shape.points[i][0])),
                   radius=3,
                   color=(0, 255, 0),
                   thickness=-1)


model = hickle.load(sys.argv[1], safe=False)
face_detector = menpodetect.load_dlib_frontal_face_detector()

WIDTH = 640
HEIGHT = 480

cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, WIDTH)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, HEIGHT)

# Num of perturbations of the initial shape within a bounding box.
n_inits = 1

ret, orig = cap.read()
orig_menpo = menpo.image.Image(orig.mean(axis=2) / 255.0)

while True:
Example #19
0
    def __init__(self, extract_opts=None, process_opts=None, output_dir=None):
        r"""

        Parameters
        ----------
        extract_opts : `dict` holding the configuration for feature extraction
            For complete description of some parameters, please refer upstream
             to their documentation in the menpofit project
            Must specify the following options:
            ``warp`` : `holistic` or `patch`;
                chooses between menpofit.aam.HolisticAAM and menpofit.aam.PatchAAM
            ``resolution_scales`` : `tuple` of `floats` between 0.0 and 1.0
                A pyramid of AAMs will be created, one for each element in the tuple
                A value of 1.0 corresponds to the full resolution images, 0.5 to a half and so on
            ``patch_shape`` : `tuple` of `tuple` of two `ints`
                Parameter required when ``warp`` is `patch`
                One tuple per resolution scale
                The patch shape is specified as a window of MxN pixels around each landmark
            ``max_shape_components`` : `int` or `list` of `ints`
                maximum number of eigenvectors (per resolution scale) kept from shape PCA
                True value can be less that max, depending on the variance in the training images
            ``max_appearance_components: `int` or `list` of `ints`
                maximum number of eigenvectors (per resolution scale) kept from texture PCA
                True value can be less that max, depending on the variance in the training images
            ``diagonal`` : `int` serving as the diagonal size of the rescaled training images
            ``features`` : `no_op`, `hog`, `dsift`, `fast_dsift`
                `no_op` uses the image pixels for the texture model
                `hog, dsift, fast_dsift` extract popular image descriptors instead
            ``landmark_dir`` : `str`, directory containing the facial landmarks for the training images
            ``landmark_group`` : `pts_face`, `pts_chin`, `pts_lips`
                `pts_face` constructs a full facial model using all the 68 landmark points
                `pts_chin` uses landmarks [2:15) plus [48:68) to model the chin and lips region
                `pts_lips` uses only [48:68) to model the lip region
            ``confidence_thresh`` : `float` in range [0:1]
                Makes use of the OpenFace average confidence score, keeping only the frames above this threshold
            ``kept_frames`` : `float` in range [0:1]
                Samples the remaining video frames (above the confidence threshold) to keep only a small proportion
                This avoids training the AAM with a large number of consecutive video frames
                Before sampling, the frames from each video are sorted by the amount of lip opening.
                Then sampling is done at evenly spaced intervals
            ``greyscale`` : `boolean`; if ``True``, converts the frames to a single channel of grey / luminance levels
                if ``False``, the model is built on the original RGB channels
            ``model_name`` : `str`; name of the AAM pickle object to be stored offline

        process_opts: `dict` holding the configuration for feature processing
            Must specif y the following options:
            ``face_detector`` : `dlib` or `opencv` or `dpm`
                Selects the implementation that detects a face in an image
                `dlib` is the fastest, `dpm` may be more accurate (check G.Chrysos, Feb 2017)
            ``landmark_fitter : `aam` or `ert`
                Selects the algorithm that fits the landmarks on a detected face
                `ert` uses a model pre-trained on challenging datasets
                `aam` may use your own model
            ``aam_fitter`` : `str`, full file name storing an AAM pickle to be used for landmark fitting
                Mandatory if ``landmark_fitter`` is AAM
            ``parameters_from`` : `fitting`, `projection`
                If `fitting`, the shape and appearance parameters optimized by the Lukas-Kanade fitting algorithm
                are returned. If `projection`, only the final shape of the fitting process will be used, initializing
                another fitter based on a new AAM specified below
            `` projection_aam`` : `str`, full file name storing an AAM pickle to be used in the process described above
            ``shape`` : `face`, `chin` or `lips`
                Chooses an AAM that may describe an entire face, or sub-parts of it
                If `chin` or `lips`, the associated landmarks will be selected from the face fitting process,
                then a few more iterations of a fitting algorithm will be run using the part AAM specified below
            ``part_AAM`` : `None` or a `str` representing the file storing a part AAM pickle (chin or lips)
                Must be different from `None` if `shape` is `chin` or `lips`
                Such part_AAM can be obtained by choosing the ``landmark_group`` parameter accordingly in the
                extraction process
            ``confidence_thresh`` : `float`, DEPRECATED
                It was used to filter out the frames having a confidence threshold for the landmarks lower than
                this value. Their corresponding features were simply arrays of zeros. Now we consider every frame
                where a face is detected.
            ``shape_components`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of the kept shape eigenvectors for the projection and fitter AAMs
                The shape feature size will be up to this value
            ``appearance_components`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of the kept texture eigenvectors for the projection and fitter AAMs
                The appearance feature size will be up to this value
            ``max_iters`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of iterations (per resolution scale) of the optimisation algorithm
                Only used for the fitter AAM, since 0 iterations are used with the projection AAM
            ``landmark_dir`` : `str`, directory containing the ground-truth facial landmarks
                for every frame of each video. Used only to compute an error between prediction and ground-truth.
                Can be `None` if the error log is not necessary
            ``log_errors`` : `boolean`
                If ``True``, generates a log file per video, stating the models used
                and the prediction error for each frame
            ``log_dir`` : `str`, directory to store the error logs above

        output_dir : `str`, absolute path where the features are to be stored
        """
        self._outDir = output_dir
        if extract_opts is not None:
            self._extractOpts = extract_opts

            self._warpType = extract_opts['warp']
            self._landmarkDir = extract_opts['landmark_dir']
            self._landmarkGroup = extract_opts['landmark_group']
            self._max_shape_components = extract_opts['max_shape_components']
            self._max_appearance_components = extract_opts['max_appearance_components']
            self._diagonal = extract_opts['diagonal']
            self._scales = extract_opts['resolution_scales']
            self._confidence_thresh = extract_opts['confidence_thresh']
            self._kept_frames = extract_opts['kept_frames']
            if extract_opts['features'] == 'fast_dsift':
                self._features = fast_dsift
            elif extract_opts['features'] == 'dsift':
                self._features = dsift
            elif extract_opts['features'] == 'hog':
                self._features = hog
            elif extract_opts['features'] == 'no_op':
                self._features = no_op
            else:
                raise Exception('Unknown feature type to extract, did you mean fast_dsift ?')

            if 'greyscale' in extract_opts.keys():
                self._greyscale = extract_opts['greyscale']
            else:
                self._greyscale = False

            self._outModelName = extract_opts['model_name']

        if process_opts is not None:
            # Face detection
            self._face_detect_method = process_opts['face_detector']
            if self._face_detect_method == 'dlib':
                from menpodetect import load_dlib_frontal_face_detector
                detector = load_dlib_frontal_face_detector()
            elif self._face_detect_method == 'opencv':
                from menpodetect import load_opencv_frontal_face_detector
                detector = load_opencv_frontal_face_detector()
            elif self._face_detect_method == 'dpm':
                from menpodetect.ffld2 import load_ffld2_frontal_face_detector
                detector = load_ffld2_frontal_face_detector()
            else:
                raise Exception('unknown detector, did you mean dlib/opencv/dpm?')

            self._face_detect = detector

            self._shape_components = process_opts['shape_components']
            self._appearance_components = process_opts['appearance_components']
            self._max_iters = process_opts['max_iters']

            self._fitter_type = process_opts['landmark_fitter']
            # Landmark fitter (pretrained ERT or AAM), actually loaded later to avoid pickling with Pool
            if self._fitter_type == 'aam':
                self._aam_fitter_file = process_opts['aam_fitter']

            # Parameters source
            # If fitting,
            self._parameters = process_opts['parameters_from']

            if self._parameters == 'aam_projection':
                self._projection_aam_file = process_opts['projection_aam']
                self._projection_aam = mio.import_pickle(self._projection_aam_file)
                self._projection_fitter = LucasKanadeAAMFitter(
                    aam=self._projection_aam,
                    lk_algorithm_cls=WibergInverseCompositional,
                    n_shape=self._shape_components,
                    n_appearance=self._appearance_components)
            else:
                pass

            self._confidence_thresh = process_opts['confidence_thresh']
            self._landmarkDir = process_opts['landmark_dir']

            self._shape = process_opts['shape']
            self._part_aam = process_opts['part_aam']

            self._log_errors = process_opts['log_errors']
            if self._log_errors is False:
                self._myresolver = None

            self._log_dir = process_opts['log_dir']
from menpo.io import export_landmark_file
from utils import mkdir_p, check_if_path, Logger
from utils.path_and_folder_definition import *  # import paths for databases, folders and libraries
from utils.pipeline_aux import (check_img_type, im_read_greyscale, check_initial_path)
from utils.clip import Clip
from dlib import shape_predictor
from menpodetect.dlib.conversion import pointgraph_to_rect
from menpodetect import load_dlib_frontal_face_detector
from menpo.shape import PointCloud
from menpo.landmark import LandmarkGroup
from joblib import Parallel, delayed

dlib_init_detector = load_dlib_frontal_face_detector()
predictor_dlib = shape_predictor(path_shape_pred)


def main_for_generic_detector(path_clips, out_bb_fol, out_landmarks_fol):
    """
    Main function for the generic detection step.
    Processes a batch of clips in the same folder. Creates the dictionary with the paths, calls
    the processing per clip.
    :param path_clips:      str: Base path that contains the frames/lns folders.
    :param out_bb_fol:      str: Folder name for exporting the bounding box of the detection.
    :param out_landmarks_fol: str: Folder name for exporting the landmarks of the predictor.
    :return:
    """
    # define a dictionary for the paths
    paths = {}
    paths['clips'] = path_clips
    paths['out_bb'] = path_clips + out_bb_fol       # path for bbox of detection
    def Train(self):

        self.face_detector = load_dlib_frontal_face_detector()
Example #22
0
def load_dlib_detector():
    from menpodetect import load_dlib_frontal_face_detector
    detector = load_dlib_frontal_face_detector()
    return partial(detector, greyscale=False)