def eval_aams(aam_file, images: List[Image]):
    aam = joblib.load(aam_file)

    img_size = int(aam_file.split("resolution_")[1].split("_", 1)[0])
    img_size = (img_size, img_size // 4)

    fitter = LucasKanadeAAMFitter(aam)

    for i in tqdm(images):
        i = i.resize(img_size)
        fitting_result = fitter.fit_from_bb(
            i,
            i.landmarks[i.landmarks.group_labels[-1]].bounding_box(),
            max_iters=50,
            gt_shape=i.landmarks[i.landmarks.group_labels[-1]],
        )
        print("")
Beispiel #2
0
class AAM:
    '''
    Initialise members and train AAM

    \param[in]  i_dataset   Full filepath to training and test dataset
    \param[in]  i_debug     True to display debug info
    '''
    def __init__(self, i_dataset, i_debug=False):

        self.debug = i_debug
        self.dataset = i_dataset

        if not os.path.exists(self.dataset):
            raise RuntimeError('Database dir does not exist in ' +
                               self.dataset)

        self.Train()

        self.viola_face_detector = FaceDetectViola(False)
        self.menpo_face_detector = FaceDetectMenpo()

        if self.debug:
            self.PrintDebug()

    '''
    Load training images and annotated landmarks from a training set in the file system
    '''

    def LoadDataset(self):

        trainset = os.path.join(self.dataset, 'trainset', '*')
        training_images = [
            self.LoadImage(img)
            for img in menpoio.import_images(trainset, verbose=True)
        ]

        return training_images

    '''
    Crops image landmarks (0.1 referenced from AAMs Basics) and convert to greyscale

    \param[in]  i_img   Menpo image to process
    \return processed menpo image
    '''

    def LoadImage(self, i_img, i_landmark_crop=0.5):

        img = i_img.crop_to_landmarks_proportion(i_landmark_crop)
        img = GreyscaleConversionMenpo(img)

        return img

    '''
    Train an Active Appearance Model and compute the

    \param[in]  i_diag                  Search gradient along model landmark
    \param[in]  i_scale                 Scale applied to search direction (search) || (initial, search)
    \param[in]  i_max_greyscale_dims    Dimensionality limit for PCA appearance model
    \param[in]  i_max_shape_dims        Dimensionality limit for PCA keypoint components
    '''

    def Train(self,
              i_diag=150,
              i_scale=[0.5, 1.0],
              i_max_greyscale_dims=200,
              i_max_shape_dims=20):

        # laterals tuned for performance gain - Sacrifice mouth modes
        self.model = HolisticAAM(
            self.LoadDataset(),
            group='PTS',
            verbose=True,
            holistic_features=float32_fast_dsift,
            diagonal=i_diag,
            scales=i_scale,
            max_appearance_components=i_max_greyscale_dims,
            max_shape_components=i_max_shape_dims)

        self.fitter = LucasKanadeAAMFitter(self.model,
                                           n_shape=[5, 15],
                                           n_appearance=[50, 150])

    '''
    Fit an appearance model to an image with annotated landmarks

    \return Converged candidate fit
    '''

    def FitAnnotatedImage(self, i_img):

        gt = i_img.landmarks['PTS'].lms
        initial_shape = self.fitter.perturb_from_bb(gt, gt.bounding_box())

        return self.fitter.fit_from_shape(i_img, initial_shape, gt_shape=gt)

    '''
    Fit an appearance model to an image without annotations using Menpo Face Detection

    \return Converged landmarks
    '''

    def FitWildImageMenpo(self, i_img, i_initial_guess=None, i_max_iters=10):

        # Convert menpo image to expected openCV format
        i_img = GreyscaleConversionMenpo(i_img)

        ret = None
        if i_initial_guess is not None:
            pts = menpo.shape.PointCloud(i_initial_guess, False)
            ret = self.fitter.fit_from_shape(i_img, pts,
                                             i_max_iters).final_shape.points
        else:
            bb = self.menpo_face_detector.Detect(i_img)
            if bb is not None:
                ret = self.fitter.fit_from_bb(i_img, bb,
                                              i_max_iters).final_shape.points

        return ret

    '''
    Fit an appearance model to an image without annotations using Viola Face Detection

    \return Converged landmarks
    '''

    def FitWildImageViola(self, i_img, i_initial_guess=None, i_max_iters=10):

        # Convert menpo image to expected openCV format
        i_img = GreyscaleConversionMenpo(i_img)

        img = i_img.pixels[0] * 255
        img = numpy.array(img, dtype=numpy.uint8)

        # Detect face with experiment tuning according to lfpw testset
        ret = None

        if i_initial_guess is None:
            faces = self.viola_face_detector.Detect(img, 3, 1.1, 0.125, 1.0)

            # Fit candidate model
            if len(faces) > 1:
                faces = [GetLargestROI(faces)]

                faces = ConvertRectToMenpoBoundingBox(faces)
                fit = self.fitter.fit_from_bb(i_img, faces[0], i_max_iters)
                ret = fit.final_shape.points

        elif i_initial_guess is not None:

            pts = menpo.shape.PointCloud(i_initial_guess, False)
            ret = self.fitter.fit_from_shape(i_img, pts,
                                             i_max_iters).final_shape.points

        return ret

    '''
    Print debug information for the AAM class
    '''

    def PrintDebug(self):

        print('Dataset', self.dataset)
        print self.model
Beispiel #3
0
    # image_road = R'C:\Users\chen\Desktop\img_above_1k_resolutoin\img_red_dot/' + str(i) + '.jpg'
    image = mio.import_image(image_road)
    resolution = image.shape[1]
    # image = image.as_greyscale()

    # note_base_road = R"D:\labwork\ao's_menpo\landmark_dateset\AnnotationsByMD\chen_format"
    # current_note_road = note_base_road + '/' + str(i) + '.txt'
    # bboxes, ground_truth_np = chen_get_bbx(current_note_road)

    txt_path = os.path.join(coordinate_path, img.split(".")[0] + ".txt")
    print(txt_path)
    bboxes, _ = chen_get_bbx(txt_path)

    _, ground_truth_np = chen_get_bbx(txt_path)

    result = fitter.fit_from_bb(image, bboxes, max_iters=[30, 5])

    pre_landmarks = result.final_shape.as_vector().copy()
    pre_landmarks.resize((35, 2))
    pre_landmarks[:, [0, 1]] = pre_landmarks[:, [1, 0]]

    # chen_comput_error(ground_truth_np, pre_landmarks)

    # scale = 940 / resolution
    # chen_comput_relative_error(ground_truth_np * scale, pre_landmarks * scale)

    # # save
    save_path = R'C:\Users\chen\Desktop\250\predict'
    tmp_path = os.path.join(save_path, img.split(".")[0] + ".txt")
    print(tmp_path)
    np.savetxt(tmp_path, pre_landmarks, fmt='%d', newline='\r\n')
class AAM:

    '''
    Initialise members and train AAM

    \param[in]  i_dataset   Full filepath to training and test dataset
    \param[in]  i_debug     True to display debug info
    '''
    def __init__(self, i_dataset, i_debug = False):

        self.debug = i_debug
        self.dataset = i_dataset

        if not os.path.exists(self.dataset):
            raise RuntimeError('Database dir does not exist in ' + self.dataset)

        self.Train()

        self.viola_face_detector = FaceDetectViola(False)
        self.menpo_face_detector = FaceDetectMenpo()

        if self.debug:
            self.PrintDebug()

    '''
    Load training images and annotated landmarks from a training set in the file system
    '''
    def LoadDataset(self):

        trainset = os.path.join(self.dataset,'trainset','*')
        training_images = [self.LoadImage(img) for img in menpoio.import_images(trainset, verbose = True) ]

        return training_images

    '''
    Crops image landmarks (0.1 referenced from AAMs Basics) and convert to greyscale

    \param[in]  i_img   Menpo image to process
    \return processed menpo image
    '''
    def LoadImage(self, i_img, i_landmark_crop = 0.5):

        img = i_img.crop_to_landmarks_proportion( i_landmark_crop )  
        img = GreyscaleConversionMenpo(img)
        
        return img

    '''
    Train an Active Appearance Model and compute the

    \param[in]  i_diag                  Search gradient along model landmark
    \param[in]  i_scale                 Scale applied to search direction (search) || (initial, search)
    \param[in]  i_max_greyscale_dims    Dimensionality limit for PCA appearance model
    \param[in]  i_max_shape_dims        Dimensionality limit for PCA keypoint components
    '''
    def Train(self, i_diag = 150, i_scale = [0.5, 1.0], i_max_greyscale_dims = 200, i_max_shape_dims = 20):

        # laterals tuned for performance gain - Sacrifice mouth modes
        self.model = HolisticAAM(
            self.LoadDataset(),
            group='PTS',
            verbose=True,
            holistic_features=float32_fast_dsift,
            diagonal=i_diag,
            scales=i_scale,
            max_appearance_components = i_max_greyscale_dims,
            max_shape_components = i_max_shape_dims)
        
        self.fitter = LucasKanadeAAMFitter(
            self.model,
            n_shape = [5, 15],
            n_appearance= [50, 150]);            

    '''
    Fit an appearance model to an image with annotated landmarks

    \return Converged candidate fit
    '''
    def FitAnnotatedImage(self, i_img):

        gt = i_img.landmarks['PTS'].lms
        initial_shape = self.fitter.perturb_from_bb( gt, gt.bounding_box() )

        return self.fitter.fit_from_shape(i_img, initial_shape, gt_shape=gt)

    '''
    Fit an appearance model to an image without annotations using Menpo Face Detection

    \return Converged landmarks
    '''
    def FitWildImageMenpo(self, i_img, i_initial_guess = None, i_max_iters = 10):

        # Convert menpo image to expected openCV format
        i_img = GreyscaleConversionMenpo(i_img)

        ret = None
        if i_initial_guess is not None:
            pts = menpo.shape.PointCloud(i_initial_guess, False)
            ret = self.fitter.fit_from_shape(i_img, pts, i_max_iters).final_shape.points
        else:
            bb = self.menpo_face_detector.Detect(i_img)
            if bb is not None:
                ret = self.fitter.fit_from_bb(i_img, bb, i_max_iters).final_shape.points

        return ret

    '''
    Fit an appearance model to an image without annotations using Viola Face Detection

    \return Converged landmarks
    '''
    def FitWildImageViola(self, i_img, i_initial_guess = None, i_max_iters = 10):

        # Convert menpo image to expected openCV format
        i_img = GreyscaleConversionMenpo(i_img)
            
        img = i_img.pixels[0] * 255
        img = numpy.array(img, dtype=numpy.uint8)

        # Detect face with experiment tuning according to lfpw testset
        ret = None 
        
        if i_initial_guess is None:
            faces = self.viola_face_detector.Detect(img, 3, 1.1, 0.125, 1.0)
        
            # Fit candidate model
            if len(faces) > 1:
                faces = [GetLargestROI(faces)]

                faces = ConvertRectToMenpoBoundingBox(faces)
                fit = self.fitter.fit_from_bb(i_img, faces[0], i_max_iters)
                ret = fit.final_shape.points
            
        elif i_initial_guess is not None:

            pts = menpo.shape.PointCloud(i_initial_guess, False)
            ret = self.fitter.fit_from_shape(i_img, pts, i_max_iters).final_shape.points

        return ret

    '''
    Print debug information for the AAM class
    '''
    def PrintDebug(self):

        print('Dataset', self.dataset)
        print self.model
Beispiel #5
0
def fit(path_to_images, path_to_test, c, r, w):
    training_images = []
    for img in print_progress(mio.import_images(path_to_images, verbose=True)):
        # convert to greyscale
        if img.n_channels == 3:
            img = img.as_greyscale()
        # crop to landmarks bounding box with an extra 20% padding
        img = img.crop_to_landmarks_proportion(0.2)
        # rescale image if its diagonal is bigger than 400 pixels
        d = img.diagonal()
        if d > 1000:
            img = img.rescale(1000.0 / d)
        # define a TriMesh which will be useful for Piecewise Affine Warp of HolisticAAM
    # labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
    # append to list
        training_images.append(img)

    # ## Training ribcage - Patch
    # from menpofit.aam import PatchAAM
    # from menpo.feature import fast_dsift
    #
    # patch_aam = PatchAAM(training_images, group='PTS', patch_shape=[(15, 15), (23, 23)],
    #                      diagonal=500, scales=(0.5, 1.0), holistic_features=fast_dsift,
    #                      max_shape_components=20, max_appearance_components=150,
    #                      verbose=True)

    ## Training ribcage - Holistic

    patch_aam = HolisticAAM(training_images,
                            group='PTS',
                            diagonal=500,
                            scales=(0.5, 1.0),
                            holistic_features=fast_dsift,
                            verbose=True,
                            max_shape_components=20,
                            max_appearance_components=150)

    ## Prediction

    fitter = LucasKanadeAAMFitter(patch_aam,
                                  lk_algorithm_cls=WibergInverseCompositional,
                                  n_shape=[5, 20],
                                  n_appearance=[30, 150])

    image = mio.import_image(path_to_test)

    #initialize box

    adjacency_matrix = np.array([
        [0, 1, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1],
        [1, 0, 0, 0],
    ])
    # points = np.array([[0,0], [0,2020], [2020, 2020], [2020, 0]])
    points = np.array([[r - w / 2, c - w / 2], [r - w / 2, c + w / 2],
                       [r + w / 2, c + w / 2], [r + w / 2, c - w / 2]])
    graph = PointDirectedGraph(points, adjacency_matrix)
    box = graph.bounding_box()

    # initial bbox
    initial_bbox = box

    # fit image
    result = fitter.fit_from_bb(image, initial_bbox, max_iters=[15, 5])

    pts = result.final_shape.points
    return pts