예제 #1
0
def test_import_as_generator():
    import types

    gen = mio.import_images(mio.data_dir_path(), as_generator=True)
    assert isinstance(gen, types.GeneratorType)
    gen = mio.import_landmark_files(mio.data_dir_path(), as_generator=True)
    assert isinstance(gen, types.GeneratorType)
예제 #2
0
def test_import_lazy_list():
    from menpo.base import LazyList
    data_path = mio.data_dir_path()
    ll = mio.import_images(data_path)
    assert isinstance(ll, LazyList)
    ll = mio.import_landmark_files(data_path)
    assert isinstance(ll, LazyList)
예제 #3
0
def test_import_lazy_list():
    from menpo.base import LazyList
    data_path = mio.data_dir_path()
    ll = mio.import_images(data_path)
    assert isinstance(ll, LazyList)
    ll = mio.import_landmark_files(data_path)
    assert isinstance(ll, LazyList)
def generate_frames_max_bbox(frames_path, frames_format, pts_paths, pts_formats, pts_names, save_path,
                             proportion, figure_size, overwrite, save_original,
                             render_options, only_ln=False, verbose=True):
    # find crop offset
    print('Computing max bounding box:')
    bounds_x = []
    bounds_y = []
    try:
        if len(os.listdir(pts_paths[0])) == 0:
            raise IndexError()
    except IndexError:
        if len(pts_paths) > 0:
            print('The directory of landmarks (%s) is empty, returning' % pts_paths[0])
        return
    for s in mio.import_landmark_files(pts_paths[0] + '*.pts', verbose=verbose):
        min_b, max_b = s.lms.bounds()
        bounds_x.append(max_b[0] - min_b[0])
        bounds_y.append(max_b[1] - min_b[1])
    off1 = round(max(bounds_x) * (1. + proportion) / 2)
    off2 = round(max(bounds_y) * (1. + proportion) / 2)

    print('\nLoad images, crop and save:')
    try:
        from joblib import Parallel, delayed
        Parallel(n_jobs=-1, verbose=4)(delayed(_aux)(im, pts_paths, pts_names, pts_formats, save_path, save_original,
                                                     off1, off2, figure_size, overwrite, render_options, only_ln=only_ln)
                                       for im in mio.import_images(frames_path + '*' + frames_format, verbose=False));
    except:
        print('Sequential execution')
        for im in mio.import_images(frames_path + '*' + frames_format, verbose=verbose):
            _aux(im, pts_paths, pts_names, pts_formats, save_path, save_original,
                 off1, off2, figure_size, overwrite, render_options, only_ln=only_ln);
def generate_frames_max_bbox(frames_path, frames_format, pts_paths, pts_formats, pts_names, save_path,
                             proportion, figure_size, overwrite, save_original,
                             render_options, only_ln=False, verbose=True):
    # find crop offset
    print('Computing max bounding box:')
    bounds_x = []
    bounds_y = []
    try:
        if len(os.listdir(pts_paths[0])) == 0:
            raise IndexError()
    except IndexError:
        if len(pts_paths) > 0:
            ms = 'The directory of landmarks {} is empty, returning.'
            print(ms.format(pts_paths[0]))
        return
    for s in mio.import_landmark_files(pts_paths[0] + '*' + pts_formats[0], verbose=verbose):
        min_b, max_b = s.lms.bounds()
        bounds_x.append(max_b[0] - min_b[0])
        bounds_y.append(max_b[1] - min_b[1])
    off1 = round(max(bounds_x) * (1. + proportion) / 2)
    off2 = round(max(bounds_y) * (1. + proportion) / 2)

    print('\nLoad images, crop and save:')
    try:
        from joblib import Parallel, delayed
        Parallel(n_jobs=-1, verbose=4)(delayed(_aux)(im, pts_paths, pts_names, pts_formats, save_path, save_original,
                                                     off1, off2, figure_size, overwrite, render_options, only_ln=only_ln)
                                       for im in mio.import_images(frames_path + '*' + frames_format, verbose=False));
    except:
        print('Sequential execution')
        for im in mio.import_images(frames_path + '*' + frames_format, verbose=verbose):
            _aux(im, pts_paths, pts_names, pts_formats, save_path, save_original,
                 off1, off2, figure_size, overwrite, render_options, only_ln=only_ln);
def PDMModel(path, max_components=None):
    training_shapes = []
    for lg in print_progress(
            mio.import_landmark_files(path / '*.pts', verbose=True)):
        training_shapes.append(lg['all'])
    # train source PDM model
    shape_model = OrthoPDM(training_shapes, max_n_components=max_components)
    return shape_model, training_shapes
예제 #7
0
def PointDistributionModel(imgFolder):
    '''LOAD IMAGES'''
    path_to_lfpw = Path(imgFolder)
    training_shapes = []
    for lg in print_progress(
            mio.import_landmark_files(path_to_lfpw / '*.pts', verbose=True)):
        training_shapes.append(lg['all'])
    '''TRAIN PDM MODEL'''
    shape_model = OrthoPDM(training_shapes, max_n_components=None)
    '''MODIFY PARAMETERS'''
    shape_model.n_active_components = 20
    shape_model.n_active_components = 0.95
    return shape_model
예제 #8
0
def pca(path_to_images, max_n_components=None):
    path_to_lfpw = Path(path_to_images)

    training_shapes = []
    for lg in print_progress(
            mio.import_landmark_files(path_to_lfpw / '*.pts', verbose=True)):
        training_shapes.append(lg)  # lg['all']

    shape_model = OrthoPDM(training_shapes, max_n_components=max_n_components)
    print(shape_model)
    # visualize_pointclouds(training_shapes)
    # instance = shape_model.similarity_model.instance([100., -300., 0., 0.])
    # instance.view(render_axes=False)
    return shape_model
예제 #9
0
def get_shapes_from_image_folder(top_dir):
    """
    simple helper function to extract saved .pts shapes from a torchvision-like image folder
    """
    shapes = []
    folders = glob.glob(os.path.join(top_dir, '*'))
    for label, folder in enumerate(tqdm(folders)):
        for lg in print_progress(
                mio.import_landmark_files(os.path.join(folder, '*.pts'),
                                          verbose=False)):
            try:
                shapes.append(lg['all'])
            except:
                shapes.append(lg['PTS'])

    return shapes
예제 #10
0
파일: data_provider.py 프로젝트: ShownX/mdm
def build_reference_shape(paths, diagonal=200):
    """Builds the reference shape.

    Args:
      paths: paths that contain the ground truth landmark files.
      diagonal: the diagonal of the reference shape in pixels.
    Returns:
      the reference shape.
    """
    landmarks = []
    for path in paths:
        path = Path(path).parent.as_posix()
        landmarks += [group.lms
            for group in mio.import_landmark_files(
                path, verbose=True) if group.lms.n_points == 68]

    return compute_reference_shape(landmarks, diagonal=diagonal).points.astype(np.float32)
예제 #11
0
def build_reference_shape(paths, diagonal=200):
    """Builds the reference shape.

    Args:
      paths: paths that contain the ground truth landmark files.
      diagonal: the diagonal of the reference shape in pixels.
    Returns:
      the reference shape.
    """
    landmarks = []
    for path in paths:
        path = Path(path).parent.as_posix()
        landmarks += [
            group.lms
            for group in mio.import_landmark_files(path, verbose=True)
            if group.lms.n_points == 68
        ]

    return compute_reference_shape(landmarks,
                                   diagonal=diagonal).points.astype(np.float32)
예제 #12
0
    def get_keys(self):
        path = self.root

        lms = mio.import_landmark_files(self.root)
        ignore_list = [l.path.stem for l in lms if l.lms.n_points!=13]

        def check_valid(x):
            if x in ignore_list:
                return False

            return all([(path / '{}+svs_dark+{:02d}.pkl'.format(x, i)).exists()
                        for i in [0, 1, 2, 4]])

        keys = [str(x.stem) for x in path.glob('*.jpg') if check_valid(x.stem)][:18000]
        self._keys = keys

        print('Found {} files.'.format(len(keys)))

        if len(keys) == 0:
            raise RuntimeError('No images found in {}'.format(path))
        return tf.constant(keys, tf.string)
예제 #13
0
def process_lns_path(process, shapes=None, p_in=None, p_out=None, overwrite=None):
    """
    Processes a list of landmark files. The processing is performed per shape (file)
    and depends on the process function defined.
    Can be provided either the shapes directly or an import path.
    If an exporting path is provided, the bounding boxes will be
    exported there.

    :param process: (function) Process function that accepts a landmark (menpo.landmark)
                    and returns the same type processed.
    :param shapes:  (list, optional) List of shapes.
    :param p_in:    (string, optional) Input path for shapes if shapes is not provided.
    :param p_out:   (string, optional) Output path for the processed landmarks.
    :param overwrite: (bool, optional) Whether to overwrite existing files in p_out.
    :return:
    """
    if p_out is not None:
        assert(isdir(p_out))

    if shapes is None:
        # import the shapes from p_in.
        assert(isdir(p_in))
        shapes = list(mio.import_landmark_files(p_in))

    ln_out = []
    # dummy image
    im = mio.import_builtin_asset.lenna_png()
    # loop over the shapes to convert to bounding boxes.
    for ln in shapes:
        # process each shape by utilising the process function.
        im.landmarks['g'] = process(ln)

        if p_out is not None:
            # if path is provided, export it.
            mio.export_landmark_file(im.landmarks['g'], p_out + ln.path.name, 
                                     overwrite=overwrite)
        ln_out.append(im.landmarks['g'])
        
    return ln_out
예제 #14
0
def test_import_as_generator():
    import types
    gen = mio.import_images(mio.data_dir_path(), as_generator=True)
    assert isinstance(gen, types.GeneratorType)
    gen = mio.import_landmark_files(mio.data_dir_path(), as_generator=True)
    assert isinstance(gen, types.GeneratorType)
예제 #15
0
def test_import_landmark_files_wrong_path_raises_value_error():
    list(mio.import_landmark_files('asldfjalkgjlaknglkajlekjaltknlaekstjlakj'))
예제 #16
0
def test_import_landmark_files_wrong_path_raises_value_error():
    with raises(ValueError):
        list(
            mio.import_landmark_files(
                'asldfjalkgjlaknglkajlekjaltknlaekstjlakj'))
예제 #17
0
def deformabel(path_to_images):
    from pathlib import Path
    import menpo.io as mio

    path_to_lfpw = Path(path_to_images)

    image = mio.import_image(path_to_lfpw / 'image_0004.png')
    image = image.crop_to_landmarks_proportion(0.5)

    template = mio.import_image(path_to_lfpw / 'image_0018.png')
    template = template.crop_to_landmarks_proportion(0.5)

    template.view_landmarks(1,
                            marker_face_colour='white',
                            marker_edge_colour='black',
                            marker_size=4)

    from menpo.visualize import print_progress

    training_shapes = []
    for lg in print_progress(
            mio.import_landmark_files(path_to_lfpw / '*.pts', verbose=True)):
        training_shapes.append(lg)

    from menpofit.atm import HolisticATM
    from menpo.feature import igo

    atm = HolisticATM(template,
                      training_shapes,
                      group='PTS',
                      diagonal=180,
                      scales=(0.25, 1.0),
                      holistic_features=igo,
                      verbose=True)
    from menpofit.atm import LucasKanadeATMFitter, InverseCompositional

    fitter = LucasKanadeATMFitter(atm,
                                  lk_algorithm_cls=InverseCompositional,
                                  n_shape=[5, 15])

    # from menpodetect import load_dlib_frontal_face_detector
    #
    # # Load detector
    # detect = load_dlib_frontal_face_detector()
    #
    # # Detect
    # bboxes = detect(image)
    # print("{} detected faces.".format(len(bboxes)))

    initial_bbox = image.landmarks['PTS'].bounding_box()

    # # View
    # if len(bboxes) > 0:
    #     image.view_landmarks(group='dlib_0', line_colour='white',
    #                          render_markers=False, line_width=3)

    # initial bbox
    # initial_bbox = bboxes[0]

    # fit image
    result = fitter.fit_from_bb(image,
                                initial_bbox,
                                max_iters=20,
                                gt_shape=image.landmarks['PTS'].lms)

    # print result
    print(result)

    result.view(2, render_initial_shape=True)
import cv2
import menpo.io as mio
from menpo.visualize import print_progress
from menpo.landmark import labeller, face_ibug_68_to_face_ibug_68_trimesh
from pathlib import Path
import menpo.io as mio
from menpo.visualize import print_progress

path_to_images = '/home/ice/Documents/Micro-Expression/SAMM/006/006_1_2/'
training_images = []

path_to_lfpw = Path(path_to_images)

training_shapes = []
for lg in print_progress(
        mio.import_landmark_files(path_to_lfpw / '*.pts', verbose=True)):
    training_shapes.append(lg['all'])

for img in print_progress(mio.import_images(path_to_images, verbose=True)):
    # convert to greyscale
    if img.n_channels == 3:
        img = img.as_greyscale()
    # crop to landmarks bounding box with an extra 20% padding
    img = img.crop_to_landmarks_proportion(0.2)
    # rescale image if its diagonal is bigger than 400 pixels
    d = img.diagonal()
    if d > 400:
        img = img.rescale(400.0 / d)
    # define a TriMesh which will be useful for Piecewise Affine Warp of HolisticAAM
    labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
    # append to list