Beispiel #1
0
def test_str_mock(mock_stdout):
    print(aam)
    fitter = LucasKanadeAAMFitter(aam,
                                  algorithm=AlternatingInverseCompositional)
    print(fitter)
    print(aam2)
    fitter = LucasKanadeAAMFitter(aam2, algorithm=ProbabilisticForwardAdditive)
    print(fitter)
    def fit(self, images, landmarks):
        num_samples = len(landmarks[0])

        self.num_landmarks_by_shape = []
        for i in range(len(landmarks)):
            self.num_landmarks_by_shape.append(len(landmarks[i][0]))
        
        aam_images = self.prepare_data_for_aam(images, landmarks)
        aam = PatchAAM(aam_images, group=None, patch_shape=[(self.scales[0], self.scales[0]), (self.scales[1], self.scales[1])],
                         diagonal=150, scales=(0.5, 1.0), holistic_features=fast_dsift,
                         max_shape_components=20, max_appearance_components=150,
                         verbose=True)

        self.fitter = LucasKanadeAAMFitter(aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[5, 20], n_appearance=[30, 150])
        
        pc = []
        for img in aam_images:
            pc.append(img.landmarks[None].lms)
            
        self.mean_shape = mean_pointcloud(pc)
        self.image_shape = images[0].shape

        fitting_results = []
        for img in aam_images[:10]:
            fr = self.fitter.fit_from_shape(img, self.mean_shape, gt_shape=img.landmarks[None].lms) 
            fitting_results.append(fr)
Beispiel #3
0
 def train(self, train_img_path):
     train_imgs = self.load_database(train_img_path)
     train_imgs = self.equalize_hist(train_imgs)
     if self.algo == 'aam':
         trainer = HolisticAAM(train_imgs,
                               group='PTS',
                               verbose=True,
                               diagonal=120,
                               scales=(0.5, 1.0))
         self.fitter = LucasKanadeAAMFitter(trainer,
                                            n_shape=[6, 12],
                                            n_appearance=0.5)
         mio.export_pickle(self.fitter, self.model_filename)
         print('aam model trained and exported!')
     elif self.algo == 'asm':
         trainer = CLM(train_imgs,
                       group='PTS',
                       verbose=True,
                       diagonal=120,
                       scales=(0.5, 1.0))
         self.fitter = GradientDescentCLMFitter(trainer, n_shape=[6, 12])
         mio.export_pickle(self.fitter, self.model_filename)
         print('asm model trained and exported!')
     else:
         ValueError('algorithm must be aam or asm!')
Beispiel #4
0
def train():
    path_to_images = 'lfpw/ceph_trainset/'
    training_images = []
    for img in mio.import_images(path_to_images, verbose=True):
        # if img.n_channels == 3:
        # img = img.as_greyscale()
        img = img.crop_to_landmarks_proportion(0.2)
        d = img.diagonal()
        if d > 400:
            img = img.rescale(400.0 / d)
        training_images.append(img)
    # patch_aam = PatchAAM(training_images, group='PTS', patch_shape=[(15, 15), (23, 23)],
    #                      diagonal=200, scales=(0.5, 1.0), holistic_features=fast_dsift,
    #                      max_shape_components=60, max_appearance_components=200,
    #                      verbose=True)
    patch_aam = PatchAAM(training_images,
                         group='PTS',
                         patch_shape=[(16, 19), (19, 16)],
                         diagonal=200,
                         scales=(0.5, 1),
                         holistic_features=fast_dsift,
                         max_shape_components=74,
                         max_appearance_components=175,
                         verbose=True)
    fitter = LucasKanadeAAMFitter(patch_aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[10, 30],
                                  n_appearance=[40, 160])
    mio.export_pickle(fitter, '26_img_35_pnt.pkl', overwrite=True)
    return fitter
    def lucasKanadeAAMFitter(self, aam):
        patch_aam = aam
        fitter = LucasKanadeAAMFitter(patch_aam,
                                      lk_algorithm_cls=WibergInverseCompositional,
                                      n_shape=[5, 20],
                                      n_appearance=[30, 150])

        return fitter
Beispiel #6
0
def fitter_AAM(aam):
    from menpofit.aam import LucasKanadeAAMFitter, WibergInverseCompositional

    fitter = LucasKanadeAAMFitter(aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[5, 20],
                                  n_appearance=[30, 150])
    return fitter
Beispiel #7
0
def test_pertrurb_shape():
    fitter = LucasKanadeAAMFitter(aam)
    s = fitter.perturb_shape(training_images[0].landmarks[None].lms,
                             noise_std=0.08,
                             rotation=False)
    assert (s.n_dims == 2)
    assert (s.n_landmark_groups == 0)
    assert (s.n_points == 68)
Beispiel #8
0
def test_obtain_shape_from_bb():
    fitter = LucasKanadeAAMFitter(aam)
    s = fitter.obtain_shape_from_bb(
        np.array([[53.916, 1.853], [408.469, 339.471]]))
    assert ((np.around(s.points) == np.around(initial_shape[0].points)).all())
    assert (s.n_dims == 2)
    assert (s.n_landmark_groups == 0)
    assert (s.n_points == 68)
Beispiel #9
0
    def _load_landmark_fitter(self):
        if self._fitter_type == 'aam':
            self._aam_fitter = mio.import_pickle(self._aam_fitter_file)
            fitter = LucasKanadeAAMFitter(self._aam_fitter, lk_algorithm_cls=WibergInverseCompositional,
                                          n_shape=self._shape_components, n_appearance=self._appearance_components)
        elif self._fitter_type == 'ert':
            fitter = DlibWrapper(
                path.join(current_path, '../pretrained/shape_predictor_68_face_landmarks.dat'))
        else:
            raise Exception('unknown fitter, did you mean aam/ert?')

        self._landmark_fitter = fitter
Beispiel #10
0
def train(img_generator):
    # clean up the images with the standard menpo pre-processing
    images = [menpo_img_process(img) for img in img_generator]
    # build the AAM
    # TODO implement settings for strongest AAM we can for menpofit
    aam = AAMBuilder(features=fast_dsift,
                     normalization_diagonal=120).build(images, verbose=True,
                                                       group='gt')
    fitter = LucasKanadeAAMFitter(aam, n_shape=[3, 6, 12],
                                  n_appearance=0.9)

    # return a callable that wraps the menpo fitter in order to integrate with
    # menpobench
    return MenpoFitWrapper(fitter)
Beispiel #11
0
def aam_helper(aam, algorithm, im_number, max_iters, initial_error,
               final_error, error_type):
    fitter = LucasKanadeAAMFitter(aam, algorithm=algorithm)
    fitting_result = fitter.fit(
        training_images[im_number],
        initial_shape[im_number],
        gt_shape=training_images[im_number].landmarks[None].lms,
        max_iters=max_iters)
    assert_allclose(
        np.around(fitting_result.initial_error(error_type=error_type), 5),
        initial_error)
    assert_allclose(
        np.around(fitting_result.final_error(error_type=error_type), 5),
        final_error)
Beispiel #12
0
def train_aam(path_to_images):
    training_images = mio.import_images(path_to_images, verbose=True)
    training_images = training_images.map(process)
    aam = HolisticAAM(training_images, 
                        group='PTS', 
                        diagonal=150,
                      scales=(0.5, 1.0), verbose=True,
                      #holistic_features=double_igo,
                      max_shape_components=40, max_appearance_components=300
                     )
    fitter = LucasKanadeAAMFitter(aam, 
                                  lk_algorithm_cls=AlternatingInverseCompositional,
                                  n_shape=[10, 40], n_appearance=[60, 300]
                                 )
    return aam, fitter
Beispiel #13
0
    def _load_landmark_fitter(self):
        if self._fitter_type == 'aam':
            self._aam_fitter = mio.import_pickle(self._aam_fitter_file)
            fitter = LucasKanadeAAMFitter(
                self._aam_fitter,
                lk_algorithm_cls=WibergInverseCompositional,
                n_shape=self._shape_components,
                n_appearance=self._appearance_components)
        elif self._fitter_type == 'ert':
            _, _, fitter68 = maybe_download_models()
            fitter = DlibWrapper(fitter68)
        else:
            raise Exception('unknown fitter, did you mean aam/ert?')

        self._landmark_fitter = fitter
def eval_aams(aam_file, images: List[Image]):
    aam = joblib.load(aam_file)

    img_size = int(aam_file.split("resolution_")[1].split("_", 1)[0])
    img_size = (img_size, img_size // 4)

    fitter = LucasKanadeAAMFitter(aam)

    for i in tqdm(images):
        i = i.resize(img_size)
        fitting_result = fitter.fit_from_bb(
            i,
            i.landmarks[i.landmarks.group_labels[-1]].bounding_box(),
            max_iters=50,
            gt_shape=i.landmarks[i.landmarks.group_labels[-1]],
        )
        print("")
Beispiel #15
0
    def Train(self,
              i_diag=150,
              i_scale=[0.5, 1.0],
              i_max_greyscale_dims=200,
              i_max_shape_dims=20):

        # laterals tuned for performance gain - Sacrifice mouth modes
        self.model = HolisticAAM(
            self.LoadDataset(),
            group='PTS',
            verbose=True,
            holistic_features=float32_fast_dsift,
            diagonal=i_diag,
            scales=i_scale,
            max_appearance_components=i_max_greyscale_dims,
            max_shape_components=i_max_shape_dims)

        self.fitter = LucasKanadeAAMFitter(self.model,
                                           n_shape=[5, 15],
                                           n_appearance=[50, 150])
def process_data(dir):
    dirlist = os.listdir(dir)
    dirlist = [d for d in dirlist if not os.path.isfile(os.path.join(dir, d))]
    dirlist = sorted(dirlist)
    # load the aam model
    aam = mio.import_pickle("aam.pkl")
    # create fiiter
    fitter = LucasKanadeAAMFitter(aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=16,
                                  n_appearance=104)
    # Load detector
    detector = load_dlib_frontal_face_detector()
    #load the sentences
    for j, subdir in enumerate(dirlist):
        ids = os.listdir(os.path.join(dir, subdir, "video/"))
        ids = [i for i in ids if "head" not in i]
        ids = sorted(ids)
        for k, id in enumerate(ids):
            t = time.time()
            video = os.path.join(dir, subdir, "video", id + '/')
            process_one_sentence(video, fitter, detector)
            print(j, k, time.time() - t)
from menpofit.aam import HolisticAAM

aam = HolisticAAM(training_images,
                  group='PTS',
                  verbose=True,
                  holistic_features=custom_double_igo,
                  diagonal=120,
                  scales=(0.5, 1.0))

print(aam)

aam.view_aam_widget()

from menpofit.aam import LucasKanadeAAMFitter

fitter = LucasKanadeAAMFitter(aam, n_shape=[6, 12], n_appearance=0.5)

from menpofit.fitter import noisy_shape_from_bounding_box

fitting_results = []

for i in test_images:
    # obtain original landmarks
    gt_s = i.landmarks['PTS'].lms

    # generate perturbed landmarks
    s = noisy_shape_from_bounding_box(gt_s, gt_s.bounding_box())

    # fit image
    fr = fitter.fit_from_shape(i, s, gt_shape=gt_s)
    fitting_results.append(fr)
Beispiel #18
0
def fit(path_to_images, path_to_test, c, r, w):
    training_images = []
    for img in print_progress(mio.import_images(path_to_images, verbose=True)):
        # convert to greyscale
        if img.n_channels == 3:
            img = img.as_greyscale()
        # crop to landmarks bounding box with an extra 20% padding
        img = img.crop_to_landmarks_proportion(0.2)
        # rescale image if its diagonal is bigger than 400 pixels
        d = img.diagonal()
        if d > 1000:
            img = img.rescale(1000.0 / d)
        # define a TriMesh which will be useful for Piecewise Affine Warp of HolisticAAM
    # labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
    # append to list
        training_images.append(img)

    # ## Training ribcage - Patch
    # from menpofit.aam import PatchAAM
    # from menpo.feature import fast_dsift
    #
    # patch_aam = PatchAAM(training_images, group='PTS', patch_shape=[(15, 15), (23, 23)],
    #                      diagonal=500, scales=(0.5, 1.0), holistic_features=fast_dsift,
    #                      max_shape_components=20, max_appearance_components=150,
    #                      verbose=True)

    ## Training ribcage - Holistic

    patch_aam = HolisticAAM(training_images,
                            group='PTS',
                            diagonal=500,
                            scales=(0.5, 1.0),
                            holistic_features=fast_dsift,
                            verbose=True,
                            max_shape_components=20,
                            max_appearance_components=150)

    ## Prediction

    fitter = LucasKanadeAAMFitter(patch_aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[5, 20],
                                  n_appearance=[30, 150])

    image = mio.import_image(path_to_test)

    #initialize box

    adjacency_matrix = np.array([
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
        [1, 0, 0, 0],
    ])
    # points = np.array([[0,0], [0,2020], [2020, 2020], [2020, 0]])
    points = np.array([[r - w / 2, c - w / 2], [r - w / 2, c + w / 2],
                       [r + w / 2, c + w / 2], [r + w / 2, c - w / 2]])
    graph = PointDirectedGraph(points, adjacency_matrix)
    box = graph.bounding_box()

    # initial bbox
    initial_bbox = box

    # fit image
    result = fitter.fit_from_bb(image, initial_bbox, max_iters=[15, 5])

    pts = result.final_shape.points
    return pts
Beispiel #19
0
def test_max_iters_exception():
    fitter = LucasKanadeAAMFitter(aam,
                                  algorithm=AlternatingInverseCompositional)
    fitter.fit(training_images[0],
               initial_shape[0],
               max_iters=[10, 20, 30, 40])
Beispiel #20
0
        if d > 400:
            img = img.rescale(400.0 / d)
        # append to list
        training_images.append(img)

    patch_aam = PatchAAM(training_images,
                         group='PTS',
                         patch_shape=[(16, 19), (19, 16)],
                         diagonal=200,
                         scales=(0.5, 1.0),
                         holistic_features=fast_dsift,
                         max_shape_components=60,
                         max_appearance_components=200,
                         verbose=True)
    fitter = LucasKanadeAAMFitter(patch_aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[10, 30],
                                  n_appearance=[40, 160])

adjacency_matrix = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1],
                             [1, 0, 0, 0]])


def chen_get_bbx(txt_path):
    coordinate = np.loadtxt(txt_path, comments='\n', delimiter=',')
    y, x = coordinate.T
    max_x = max(x)
    min_x = min(x)
    max_y = max(y)
    min_y = min(y) - 18

    points = np.array([[min_x, min_y], [min_x, max_y], [max_x, max_y],
Beispiel #21
0
def auto_construct(pdm, images, trilist=None,
    fit_group='init', train_group='final',
    models=[], errors=[], costs=[], isplot=False,
    feature=[igo] * 10,
    diagonal=200,
    scales=(0.5, 1.0),
    n_shape=[2, 4],
    n_appearance=[20, 30],
    max_iters=10,
    generative_iter=30,
    discriminative_iter = 10,
    n_processes=24,
    inc_appearance=0,
    model_class=HolisticAAM,
    increament=False,
    update_shape=False,
    shape_forgetting_factor=1.0,
    appearance_forgetting_factor=1.0,
    export_path=None
):

#     initialisation
    DB_size = len(images) / 2
    DB1 = images[:DB_size]
    DB2 = images[DB_size:]


    init_shape = pdm.shape_models[-1].model.mean()
    n_iteration = 0

    if trilist is None:
        trilist = TriMesh(init_shape.points).trilist

    for j in xrange(discriminative_iter):
        i_appearance = np.array(n_appearance) + np.array(inc_appearance)
        if (i_appearance > 1).any():
            i_appearance = i_appearance.astype(int).tolist()
        else:
            i_appearance = i_appearance.tolist()
# ------------ generative iterations -------------
        for i in xrange(generative_iter):
            print 'Discriminative Iter: {}, Generative Iter: {}'.format(j, i)

            aam_fitter = LucasKanadeAAMFitter(pdm, n_shape=n_shape, n_appearance=i_appearance)

            pdm, error = generative_construct(
                DB1,
                aam_fitter, trilist,
                fit_group=fit_group,
                train_group=train_group,
                label='iteration_{:03d}'.format(n_iteration),
                feature=feature[j],
                diagonal=diagonal,
                scales=scales,
                original_shape_model=None if update_shape else pdm.shape_models,
                n_processes=n_processes,
                model_class=model_class,
                increament_model=pdm if increament else None,
                shape_forgetting_factor=shape_forgetting_factor,
                appearance_forgetting_factor=appearance_forgetting_factor,
                max_iters=max_iters
            )

            n_iteration += 1
            models.append(pdm)
            errors.append(error)

            if export_path:
                mio.export_pickle([images, models, errors], export_path, overwrite=True)

            if isplot:
                plot(errors)

# ----------- discriminative iterations------------
        aam_fitter = LucasKanadeAAMFitter(pdm, n_shape=n_shape, n_appearance=i_appearance)

        frs = mp_fit(DB2, aam_fitter, group=fit_group, n_processes=n_processes, max_iters=max_iters)
        for img, fr in zip(DB2, frs):
            img.landmarks[train_group] = fr.final_shape

        sdm = RegularizedSDM(
            DB2,
            diagonal=diagonal,
            alpha=100,
            group=train_group,
            n_iterations=4,
            scales=(0.5,0.5,1.0,1.0),
            patch_features=patch_features,
            n_perturbations=30,
            patch_shape=[(25, 25), (15, 15), (15,15), (9,9)],
            verbose=True
        )

        pdm, error = generative_construct(
            DB1,
            sdm, trilist,
            fit_group=fit_group,
            train_group=train_group,
            label='discriminative_{:02d}'.format(j),
            original_shape_model=None if update_shape else pdm.shape_models,
            feature=feature[j],
            diagonal=diagonal,
            scales=scales,
            n_processes=n_processes,
            model_class=model_class,
            increament_model=pdm if increament else None,
            shape_forgetting_factor=shape_forgetting_factor,
            appearance_forgetting_factor=appearance_forgetting_factor,
            max_iters=max_iters
        )
        models.append(pdm)
        errors.append(error)

        if export_path:
            mio.export_pickle([images, models, errors], export_path, overwrite=True)

        if isplot:
            plot(errors)

    return models[-2]
Beispiel #22
0
def test_n_appearance_exception():
    fitter = LucasKanadeAAMFitter(aam, n_appearance=[10, 20])
Beispiel #23
0
def test_n_shape_exception():
    fitter = LucasKanadeAAMFitter(aam, n_shape=[3, 6, 'a'])
Beispiel #24
0
    def __init__(self, extract_opts=None, process_opts=None, output_dir=None):
        r"""

        Parameters
        ----------
        extract_opts : `dict` holding the configuration for feature extraction
            For complete description of some parameters, please refer upstream
             to their documentation in the menpofit project
            Must specify the following options:
            ``warp`` : `holistic` or `patch`;
                chooses between menpofit.aam.HolisticAAM and menpofit.aam.PatchAAM
            ``resolution_scales`` : `tuple` of `floats` between 0.0 and 1.0
                A pyramid of AAMs will be created, one for each element in the tuple
                A value of 1.0 corresponds to the full resolution images, 0.5 to a half and so on
            ``patch_shape`` : `tuple` of `tuple` of two `ints`
                Parameter required when ``warp`` is `patch`
                One tuple per resolution scale
                The patch shape is specified as a window of MxN pixels around each landmark
            ``max_shape_components`` : `int` or `list` of `ints`
                maximum number of eigenvectors (per resolution scale) kept from shape PCA
                True value can be less that max, depending on the variance in the training images
            ``max_appearance_components: `int` or `list` of `ints`
                maximum number of eigenvectors (per resolution scale) kept from texture PCA
                True value can be less that max, depending on the variance in the training images
            ``diagonal`` : `int` serving as the diagonal size of the rescaled training images
            ``features`` : `no_op`, `hog`, `dsift`, `fast_dsift`
                `no_op` uses the image pixels for the texture model
                `hog, dsift, fast_dsift` extract popular image descriptors instead
            ``landmark_dir`` : `str`, directory containing the facial landmarks for the training images
            ``landmark_group`` : `pts_face`, `pts_chin`, `pts_lips`
                `pts_face` constructs a full facial model using all the 68 landmark points
                `pts_chin` uses landmarks [2:15) plus [48:68) to model the chin and lips region
                `pts_lips` uses only [48:68) to model the lip region
            ``confidence_thresh`` : `float` in range [0:1]
                Makes use of the OpenFace average confidence score, keeping only the frames above this threshold
            ``kept_frames`` : `float` in range [0:1]
                Samples the remaining video frames (above the confidence threshold) to keep only a small proportion
                This avoids training the AAM with a large number of consecutive video frames
                Before sampling, the frames from each video are sorted by the amount of lip opening.
                Then sampling is done at evenly spaced intervals
            ``greyscale`` : `boolean`; if ``True``, converts the frames to a single channel of grey / luminance levels
                if ``False``, the model is built on the original RGB channels
            ``model_name`` : `str`; name of the AAM pickle object to be stored offline

        process_opts: `dict` holding the configuration for feature processing
            Must specif y the following options:
            ``face_detector`` : `dlib` or `opencv` or `dpm`
                Selects the implementation that detects a face in an image
                `dlib` is the fastest, `dpm` may be more accurate (check G.Chrysos, Feb 2017)
            ``landmark_fitter : `aam` or `ert`
                Selects the algorithm that fits the landmarks on a detected face
                `ert` uses a model pre-trained on challenging datasets
                `aam` may use your own model
            ``aam_fitter`` : `str`, full file name storing an AAM pickle to be used for landmark fitting
                Mandatory if ``landmark_fitter`` is AAM
            ``parameters_from`` : `fitting`, `projection`
                If `fitting`, the shape and appearance parameters optimized by the Lukas-Kanade fitting algorithm
                are returned. If `projection`, only the final shape of the fitting process will be used, initializing
                another fitter based on a new AAM specified below
            `` projection_aam`` : `str`, full file name storing an AAM pickle to be used in the process described above
            ``shape`` : `face`, `chin` or `lips`
                Chooses an AAM that may describe an entire face, or sub-parts of it
                If `chin` or `lips`, the associated landmarks will be selected from the face fitting process,
                then a few more iterations of a fitting algorithm will be run using the part AAM specified below
            ``part_AAM`` : `None` or a `str` representing the file storing a part AAM pickle (chin or lips)
                Must be different from `None` if `shape` is `chin` or `lips`
                Such part_AAM can be obtained by choosing the ``landmark_group`` parameter accordingly in the
                extraction process
            ``confidence_thresh`` : `float`, DEPRECATED
                It was used to filter out the frames having a confidence threshold for the landmarks lower than
                this value. Their corresponding features were simply arrays of zeros. Now we consider every frame
                where a face is detected.
            ``shape_components`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of the kept shape eigenvectors for the projection and fitter AAMs
                The shape feature size will be up to this value
            ``appearance_components`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of the kept texture eigenvectors for the projection and fitter AAMs
                The appearance feature size will be up to this value
            ``max_iters`` : `int` or `list` of `ints` (one per resolution scale)
                Selects the number of iterations (per resolution scale) of the optimisation algorithm
                Only used for the fitter AAM, since 0 iterations are used with the projection AAM
            ``landmark_dir`` : `str`, directory containing the ground-truth facial landmarks
                for every frame of each video. Used only to compute an error between prediction and ground-truth.
                Can be `None` if the error log is not necessary
            ``log_errors`` : `boolean`
                If ``True``, generates a log file per video, stating the models used
                and the prediction error for each frame
            ``log_dir`` : `str`, directory to store the error logs above

        output_dir : `str`, absolute path where the features are to be stored
        """
        self._outDir = output_dir
        if extract_opts is not None:
            self._extractOpts = extract_opts

            self._warpType = extract_opts['warp']
            self._landmarkDir = extract_opts['landmark_dir']
            self._landmarkGroup = extract_opts['landmark_group']
            self._max_shape_components = extract_opts['max_shape_components']
            self._max_appearance_components = extract_opts['max_appearance_components']
            self._diagonal = extract_opts['diagonal']
            self._scales = extract_opts['resolution_scales']
            self._confidence_thresh = extract_opts['confidence_thresh']
            self._kept_frames = extract_opts['kept_frames']
            if extract_opts['features'] == 'fast_dsift':
                self._features = fast_dsift
            elif extract_opts['features'] == 'dsift':
                self._features = dsift
            elif extract_opts['features'] == 'hog':
                self._features = hog
            elif extract_opts['features'] == 'no_op':
                self._features = no_op
            else:
                raise Exception('Unknown feature type to extract, did you mean fast_dsift ?')

            if 'greyscale' in extract_opts.keys():
                self._greyscale = extract_opts['greyscale']
            else:
                self._greyscale = False

            self._outModelName = extract_opts['model_name']

        if process_opts is not None:
            # Face detection
            self._face_detect_method = process_opts['face_detector']
            if self._face_detect_method == 'dlib':
                from menpodetect import load_dlib_frontal_face_detector
                detector = load_dlib_frontal_face_detector()
            elif self._face_detect_method == 'opencv':
                from menpodetect import load_opencv_frontal_face_detector
                detector = load_opencv_frontal_face_detector()
            elif self._face_detect_method == 'dpm':
                from menpodetect.ffld2 import load_ffld2_frontal_face_detector
                detector = load_ffld2_frontal_face_detector()
            else:
                raise Exception('unknown detector, did you mean dlib/opencv/dpm?')

            self._face_detect = detector

            self._shape_components = process_opts['shape_components']
            self._appearance_components = process_opts['appearance_components']
            self._max_iters = process_opts['max_iters']

            self._fitter_type = process_opts['landmark_fitter']
            # Landmark fitter (pretrained ERT or AAM), actually loaded later to avoid pickling with Pool
            if self._fitter_type == 'aam':
                self._aam_fitter_file = process_opts['aam_fitter']

            # Parameters source
            # If fitting,
            self._parameters = process_opts['parameters_from']

            if self._parameters == 'aam_projection':
                self._projection_aam_file = process_opts['projection_aam']
                self._projection_aam = mio.import_pickle(self._projection_aam_file)
                self._projection_fitter = LucasKanadeAAMFitter(
                    aam=self._projection_aam,
                    lk_algorithm_cls=WibergInverseCompositional,
                    n_shape=self._shape_components,
                    n_appearance=self._appearance_components)
            else:
                pass

            self._confidence_thresh = process_opts['confidence_thresh']
            self._landmarkDir = process_opts['landmark_dir']

            self._shape = process_opts['shape']
            self._part_aam = process_opts['part_aam']

            self._log_errors = process_opts['log_errors']
            if self._log_errors is False:
                self._myresolver = None

            self._log_dir = process_opts['log_dir']
def process_clip(clip_name,
                 paths,
                 training_images,
                 img_type,
                 loop,
                 svm_params,
                 mi=110,
                 d_aam=130,
                 n_s=None,
                 n_a=None):
    """
    Processes a clip. Accepts a clip (along with its params and paths), trains a person-specific
    part based AAM (pbaam) and then fits it to all the frames.
    :param clip_name:   str: Name of the clip.
    :param paths:       dict: Required paths for training/fitting/exporting data.
    :param training_images: list: List of menpo images (generic images) appended to the person specific ones.
    :param img_type:    str: Suffix (extension) of the frames, e.g. '.png'.
    :param loop:        bool: Declares whether this is a 2nd fit for AAM (loop).
    :param svm_params:  dict: Required params for SVM classification. If 'apply' is False,
    the rest are not used. Otherwise, requires reference frame and classifier loaded.
    :param mi:          int: (optional) Max images of the clip loaded for the pbaam.
    :param d_aam:       int: (optional) Diagonal of AAM (param in building it).
    :param n_s:         int/list/None: (optional) Number of shapes for AAM (as expected in menpofit).
    :param n_a:         int/list/None: (optional) Number of appearances for AAM (as expected in menpofit).
    :return:
    """
    global fitter
    # paths and list of frames
    frames_path = paths['clips'] + frames + clip_name + sep
    if not check_path_and_landmarks(frames_path, clip_name,
                                    paths['in_lns'] + clip_name):
        return False
    list_frames = sorted(os.listdir(frames_path))
    pts_p = mkdir_p(paths['out_lns'] + clip_name + sep)
    svm_p = mkdir_p(paths['out_svm'] + clip_name + sep)  # svm path

    # loading images from the clip
    training_detector = load_images(list(list_frames),
                                    frames_path,
                                    paths['in_lns'],
                                    clip_name,
                                    training_images=list(training_images),
                                    max_images=mi)

    print('\nBuilding Part based AAM for the clip {}.'.format(clip_name))
    aam = PatchAAM(training_detector,
                   verbose=True,
                   holistic_features=features,
                   patch_shape=patch_shape,
                   diagonal=d_aam,
                   scales=(.5, 1))
    del training_detector

    fitter = LucasKanadeAAMFitter(aam, n_shape=n_s, n_appearance=n_a)
    # save the AAM model (requires plenty of disk space for each model).
    aam.features = None
    export_pickle(aam, paths['out_model'] + clip_name + '.pkl', overwrite=True)
    del aam

    clip = Clip(clip_name, paths['clips'], frames,
                [paths['in_lns'], paths['in_fit_lns']], [pts_p, svm_p])
    # [process_frame(frame_name, clip, img_type, svm_params,loop) for frame_name in list_frames];
    Parallel(n_jobs=-1, verbose=4)(
        delayed(process_frame)(frame_name, clip, img_type, svm_params, loop)
        for frame_name in list_frames)
    fitter = []  # reset fitter
    return True
Beispiel #26
0
    def get_feature(self, file, process_opts=None):
        r"""
        Computes the AAM features, according to the `process_opts`
        Parameters
        ----------
        file
        process_opts

        Returns
        -------
        A dictionary of five elements, each representing a variation of the computed features
        (shape and appearance alone or concatenated, with or without derivatives)
        """

        self._maybe_start_logging(file)
        self._load_landmark_fitter()

        frames = mio.import_video(file,
                                  landmark_resolver=self._myresolver,
                                  normalize=True,
                                  exact_frame_count=True)

        feat_shape = []
        feat_app = []
        feat_shape_app = []

        for frameIdx, frame in enumerate(frames):

            bounding_boxes = self._face_detect(frame)
            if len(bounding_boxes) > 0:
                initial_bbox = bounding_boxes[0]
                if self._log_errors is True:
                    gt_shape = frame.landmarks['pts_face']
                else:
                    gt_shape = None

                if isinstance(self._landmark_fitter, LucasKanadeAAMFitter):
                    result = self._landmark_fitter.fit_from_bb(
                        frame, initial_bbox,
                        max_iters=self._max_iters,
                        gt_shape=gt_shape)

                elif isinstance(self._landmark_fitter, DlibWrapper):  # DLIB fitter, doesn't have max_iters
                    result = self._landmark_fitter.fit_from_bb(
                        frame,
                        initial_bbox,
                        gt_shape=gt_shape)
                else:
                    raise Exception('incompatible landmark fitter')

                self._maybe_append_to_log(file, frameIdx, result)

                if self._shape == 'face':

                    if self._parameters == 'lk_fitting':
                        # skip the first 4 similarity params, probably not useful for classification
                        shape_param_frame = result.shape_parameters[-1][4:]
                        app_param_frame = result.appearance_parameters[-1]
                    elif self._parameters == 'aam_projection':
                        result_aam = self._projection_fitter.fit_from_shape(
                            frame,
                            result.final_shape,
                            max_iters=[0, 0, 0])

                        # TODO: analyse the case when aam true components are less than max components
                        shape_param_frame = result_aam.shape_parameters[-1][4:]
                        app_param_frame = result_aam.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame)
                    feat_app.append(app_param_frame)
                    feat_shape_app.append(np.concatenate((shape_param_frame, app_param_frame)))

                elif self._shape == 'lips':

                    # extract lips landmarks from the final face fitting to initialize the part model fitting

                    aam_lips = mio.import_pickle(self._part_aam)
                    fitter_lips = LucasKanadeAAMFitter(aam_lips, lk_algorithm_cls=WibergInverseCompositional,
                                                       n_shape=[10, 20], n_appearance=[20, 150])

                    result_lips = fitter_lips.fit_from_shape(
                        image=frame,
                        initial_shape=_pointcloud_subset(result.final_shape, 'lips'),
                        max_iters=[5, 5])

                    shape_param_frame_lips = result_lips.shape_parameters[-1][4:]
                    app_param_frame_lips = result_lips.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame_lips)
                    feat_app.append(app_param_frame_lips)
                    feat_shape_app.append(np.concatenate((shape_param_frame_lips, app_param_frame_lips)))

                elif self._shape == 'chin':

                    # extract chin and lips landmarks from the final face fitting to initialize the part model fitting

                    aam_chin = mio.import_pickle(self._part_aam)
                    fitter_chin = LucasKanadeAAMFitter(aam_chin, lk_algorithm_cls=WibergInverseCompositional,
                                                       n_shape=[10, 20, 25], n_appearance=[20, 50, 150])

                    result_chin = fitter_chin.fit_from_shape(
                        image=frame,
                        initial_shape=_pointcloud_subset(result.final_shape, 'chin'),
                        max_iters=[10, 10, 5])

                    shape_param_frame_mchin = result_chin.shape_parameters[-1][4:]
                    app_param_frame_mchin = result_chin.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame_mchin)
                    feat_app.append(app_param_frame_mchin)
                    feat_shape_app.append(np.concatenate((shape_param_frame_mchin, app_param_frame_mchin)))

                else:
                    raise Exception('Unknown shape model, currently supported are: face, lips, chin')

            else:  # we did not detect any face

                zero_feat_shape = np.zeros(process_opts['shape_components'][-1])
                zero_feat_app = np.zeros(process_opts['appearance_components'][-1])
                zero_feat_shape_app = np.zeros(
                    process_opts['shape_components'][-1] + process_opts['appearance_components'][-1])

                feat_shape.append(zero_feat_shape)
                feat_app.append(zero_feat_app)
                feat_shape_app.append(zero_feat_shape_app)

        npfeat_shape = np.array(feat_shape)
        npfeat_app = np.array(feat_app)
        npfeat_shape_app = np.array(feat_shape_app)

        npfeat_app_delta = vsrmath.accurate_derivative(npfeat_app, 'delta')
        npfeat_shape_app_delta = vsrmath.accurate_derivative(npfeat_shape_app, 'delta')

        return {'shape': npfeat_shape,
                'app': npfeat_app,
                'shape_app': npfeat_shape_app,
                'app_delta': npfeat_app_delta,
                'shape_app_delta': npfeat_shape_app_delta}
Beispiel #27
0
	def getAAMFitter(self, aam):
		aam_fitter = LucasKanadeAAMFitter(aam, n_shape=[5, 15], n_appearance=[50, 150])

		return aam_fitter
Beispiel #28
0
    return image
#训练集
path_to_images = 'D:/电信研究院/人脸矫正/labelme-master/examples/transfer'
training_images = []
for img in print_progress(mio.import_images(path_to_images, verbose=True)):
    training_images.append(process(img))
# %matplotlib inline,这步只能在jupyter里可视化
# from menpowidgets import visualize_images
# visualize_images(training_images)

aam = HolisticAAM(training_images, reference_shape=None,
                  diagonal=150, scales=(0.5, 1.0),
                  holistic_features=igo, verbose=True)

fitter = LucasKanadeAAMFitter(aam,
                              lk_algorithm_cls=WibergInverseCompositional,
                              n_shape=[3, 20], n_appearance=[30, 150])
# % matplotlib inline


# method to load a database
def load_database(path_to_images, crop_percentage, max_images=None):
    images = []
    # load landmarked images
    for i in mio.import_images(path_to_images, max_images=max_images, verbose=True):
        # crop image
        i = i.crop_to_landmarks_proportion(crop_percentage)

        # convert it to grayscale if needed
        if i.n_channels == 3:
            i = i.as_greyscale(mode='luminosity')
Beispiel #29
0
def aam_fit_benchmark(fitting_images,
                      aam,
                      fitting_options=None,
                      perturb_options=None,
                      verbose=False):
    r"""
    Fits a trained AAM model to a database.

    Parameters
    ----------
    fitting_images: list of :class:MaskedImage objects
        A list of the fitting images.
    aam: :class:menpo.fitmultilevel.aam.AAM object
        The trained AAM object. It can be generated from the
        aam_build_benchmark() method.
    fitting_options: dictionary, optional
        A dictionary with the parameters that will be passed in the
        LucasKanadeAAMFitter (:class:menpo.fitmultilevel.aam.base).
        If None, the default options will be used.
        This is an example of the dictionary with the default options:
            fitting_options = {'algorithm': AlternatingInverseCompositional,
                               'md_transform': OrthoMDTransform,
                               'global_transform': AlignmentSimilarity,
                               'n_shape': None,
                               'n_appearance': None,
                               'max_iters': 50,
                               'error_type': 'me_norm'
                               }
        For an explanation of the options, please refer to the
        LucasKanadeAAMFitter documentation.

        Default: None
    bounding_boxes: list of (2, 2) ndarray, optional
        If provided, fits will be initialized from a bounding box. If
        None, perturbation of ground truth will be used instead.
        can be provided). Interpreted as [[min_x, min_y], [max_x, max_y]].
    perturb_options: dictionary, optional
        A dictionary with parameters that control the perturbation on the
        ground truth shape with noise of specified std. Note that if
        bounding_box is provided perturb_options is ignored and not used.
        If None, the default options will be used.
        This is an example of the dictionary with the default options:
            initialization_options = {'noise_std': 0.04,
                                      'rotation': False
                                      }
        For an explanation of the options, please refer to the perturb_shape()
        method documentation of :map:`MultilevelFitter`.
    verbose: bool, optional
        If True, it prints information regarding the AAM fitting including
        progress bar, current image error and percentage of images with errors
        less or equal than a value.

        Default: False

    Returns
    -------
    fitting_results: :map:`FittingResultList`
        A list with the :map:`FittingResult` object per image.
    """
    if verbose:
        print('AAM Fitting:')
        perc1 = 0.
        perc2 = 0.

    # parse options
    if fitting_options is None:
        fitting_options = {}
    if perturb_options is None:
        perturb_options = {}

    # extract some options
    group = fitting_options.pop('gt_group', 'PTS')
    max_iters = fitting_options.pop('max_iters', 50)
    error_type = fitting_options.pop('error_type', 'me_norm')

    # create fitter
    fitter = LucasKanadeAAMFitter(aam, **fitting_options)

    # fit images
    n_images = len(fitting_images)
    fitting_results = []
    for j, i in enumerate(fitting_images):
        # perturb shape
        gt_s = i.landmarks[group].lms
        if 'bbox' in i.landmarks:
            # shape from bounding box
            s = fitter.obtain_shape_from_bb(i.landmarks['bbox'].lms.points)
        else:
            # shape from perturbation
            s = fitter.perturb_shape(gt_s, **perturb_options)
        # fit
        fr = fitter.fit(i, s, gt_shape=gt_s, max_iters=max_iters)
        fitting_results.append(fr)

        # print
        final_error = fr.final_error(error_type=error_type)
        initial_error = fr.initial_error(error_type=error_type)
        if verbose:
            if error_type == 'me_norm':
                if final_error <= 0.03:
                    perc1 += 1.
                if final_error <= 0.04:
                    perc2 += 1.
            elif error_type == 'rmse':
                if final_error <= 0.05:
                    perc1 += 1.
                if final_error <= 0.06:
                    perc2 += 1.
            print_dynamic('- {0} - [<=0.03: {1:.1f}%, <=0.04: {2:.1f}%] - '
                          'Image {3}/{4} (error: {5:.3f} --> {6:.3f})'.format(
                              progress_bar_str(float(j + 1) / n_images,
                                               show_bar=False),
                              perc1 * 100. / n_images, perc2 * 100. / n_images,
                              j + 1, n_images, initial_error, final_error))
    if verbose:
        print_dynamic('- Fitting completed: [<=0.03: {0:.1f}%, <=0.04: '
                      '{1:.1f}%]\n'.format(perc1 * 100. / n_images,
                                           perc2 * 100. / n_images))

    return fitting_results