Beispiel #1
0
def build_coarse_lmdet_transform(deterministic):
    if deterministic:
        transforms = [
            # fp.Rescale(cfg.INPUT_SIZE*1.2),
            # fp.RandomAffine(35, translate=[0.2,0.2]),
            # fp.RandomAffine(shear=20),
            # fp.RandomResizedCrop(cfg.INPUT_SIZE, p=1.0, scale=(0.4,1.0), keep_aspect=False),
            # fp.RandomAffine(0, shear=0.5),
            # fp.RandomAffine(40, translate=[0.15,0.15], scale=[0.70, 2.25], shear=15, keep_aspect=False),
            # fp.RandomAffine(0, translate=[0.,0.], scale=[1.20, 1.20], shear=0, keep_aspect=True),
            fp.CenterCrop(cfg.INPUT_SIZE)
        ]
    else:
        transforms = [
            fp.RandomHorizontalFlip(0.5),
            fp.RandomAffine(40, translate=[0.15,0.15], scale=[0.70, 2.25], shear=15, keep_aspect=False),
            # # fp.Rescale(cfg.INPUT_SIZE*1.1),
            # fp.RandomRotation(35),
            # fp.RandomResizedCrop(cfg.INPUT_SIZE, p=1.0, scale=(0.65,1.0)),
            fp.CenterCrop(cfg.INPUT_SIZE)
        ]

    transforms += [fp.ToTensor(),
                   fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1]),  # VGGFace(2)
                   ]
    return tf.Compose(transforms)
Beispiel #2
0
    def __getitem__(self, idx):
        sample = self.annotations.iloc[idx]
        filename, id  = sample[0], sample.ID
        bb = None
        landmarks_for_crop = None

        if self.crop_source == 'bb_ground_truth':
            bb = self.get_bounding_box(sample)
            pose = np.zeros(3, dtype=np.float32)
        else:
            of_conf, landmarks_for_crop  = sample.of_conf, sample.landmarks_of
            pose = sample.pose

        try:
            crop, landmarks, pose, cropper = self.face_extractor.get_face(filename+'.jpg', self.fullsize_img_dir,
                                                                          self.cropped_img_dir, landmarks=landmarks_for_crop,
                                                                          bb=bb, pose=pose, use_cache=self.use_cache,
                                                                          detect_face=False, crop_type='tight',
                                                                          aligned=self.align_face_orientation)
        except:
            print(filename)
            raise
            # return self.__getitem__(random.randint(0,len(self)-1))

        try:
            landmarks, _ = cropper.apply_to_landmarks(sample.landmarks)
        except AttributeError:
            landmarks = np.zeros((68,2))

        # vis.show_landmarks(crop, landmarks, title='lms', wait=0, color=(0,0,255))

        cropped_sample = {'image': crop, 'landmarks': landmarks.astype(np.float32), 'pose': pose}

        item = self.transform(cropped_sample)

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor() ]
        transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]  # VGGFace(2)
        transforms = tf.Compose(transforms)

        result = transforms(item)

        result.update({
            'id': id,
            'fnames': filename,
            'expression': np.array([[0,0,0]], dtype=np.float32),
        })

        if self.return_modified_images:
            mod_transforms = tf.Compose([fp.RandomOcclusion()])
            crop_occ = mod_transforms(item['image'])
            crop_occ = transforms(crop_occ)
            result['image_mod'] = crop_occ

        # add landmark heatmaps if landmarks enabled
        if self.return_landmark_heatmaps:
            result['lm_heatmaps'] = create_landmark_heatmaps(item['landmarks'], self.landmark_sigma, self.landmark_ids)
        return result
Beispiel #3
0
    def __init__(self, root_dir=cfg.AFLW_ROOT, train=True, color=True, start=None,
                 max_samples=None, deterministic=None, use_cache=True,
                 daug=0, return_modified_images=False, test_split='full', align_face_orientation=True,
                 return_landmark_heatmaps=False, landmark_sigma=9, landmark_ids=range(19), **kwargs):

        assert test_split in ['full', 'frontal']
        from utils.face_extractor import FaceExtractor
        self.face_extractor = FaceExtractor()

        self.use_cache = use_cache
        self.align_face_orientation = align_face_orientation

        self.return_landmark_heatmaps = return_landmark_heatmaps
        self.return_modified_images = return_modified_images
        self.landmark_sigma = landmark_sigma
        self.landmark_ids = landmark_ids

        self.mode = TRAIN if train else VAL

        self.root_dir = root_dir
        root_dir_local = cfg.AFLW_ROOT_LOCAL
        self.fullsize_img_dir = os.path.join(root_dir, 'data/flickr')
        self.cropped_img_dir = os.path.join(root_dir_local, 'crops')
        self.feature_dir = os.path.join(root_dir_local,  'features')
        self.color = color

        annotation_filename = os.path.join(cfg.AFLW_ROOT_LOCAL, 'alfw.pkl')
        self.annotations_original = pd.read_pickle(annotation_filename)
        print("Number of images: {}".format(len(self.annotations_original)))

        self.frontal_only = test_split == 'frontal'
        self.make_split(train, self.frontal_only)

        # limit number of samples
        st,nd = 0, None
        if start is not None:
            st = start
        if max_samples is not None:
            nd = st+max_samples
        self.annotations = self.annotations[st:nd]

        if deterministic is None:
            deterministic = self.mode != TRAIN
        self.transform = ds_utils.build_transform(deterministic, True, daug)

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor() ]
        transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]  # VGGFace(2)
        self.crop_to_tensor = tf.Compose(transforms)

        print("Number of images: {}".format(len(self)))
Beispiel #4
0
    def __init__(self, root_dir, fullsize_img_dir, root_dir_local=None, train=True, color=True, start=None,
                 max_samples=None, deterministic=None, use_cache=True, detect_face=False, align_face_orientation=True,
                 return_modified_images=False, return_landmark_heatmaps=True, landmark_sigma=9, landmark_ids=range(68),
                 daug=0, **kwargs):

        from utils.face_extractor import FaceExtractor
        self.face_extractor = FaceExtractor()

        self.train = train
        self.mode = TRAIN if train else VAL
        self.use_cache = use_cache
        self.detect_face = detect_face
        self.align_face_orientation = align_face_orientation
        self.start = start
        self.max_samples = max_samples
        self.daug = daug
        self.return_modified_images = return_modified_images

        self.return_landmark_heatmaps = return_landmark_heatmaps
        self.landmark_sigma = landmark_sigma
        self.landmark_ids = landmark_ids

        self.deterministic = deterministic
        if self.deterministic is None:
            self.deterministic = self.mode != TRAIN

        self.fullsize_img_dir = fullsize_img_dir

        self.root_dir = root_dir
        self.root_dir_local = root_dir_local if root_dir_local is not None else self.root_dir

        self.cropped_img_dir = os.path.join(self.root_dir_local, 'crops')
        self.feature_dir = os.path.join(self.root_dir_local,  'features')
        self.color = color

        self.transform = ds_utils.build_transform(self.deterministic, self.color, daug)

        print("Loading annotations... ")
        self.annotations = self.create_annotations()
        print("  Number of images: {}".format(len(self.annotations)))

        self.init()
        self.select_samples()

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor() ]
        transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]  # VGGFace(2)
        self.crop_to_tensor = tf.Compose(transforms)
Beispiel #5
0
import utils.nn
from networks import saae
import config as cfg
import landmarks.lmconfig as lmcfg
import landmarks.lmutils as lmutils

from torchvision import transforms as tf
from utils import face_processing as fp

snapshot_dir = os.path.join('.')
from utils import nn, vis, face_processing


transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
transforms += [fp.ToTensor() ]
transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]
crop_to_tensor = tf.Compose(transforms)

def load_image(im_dir, fname):
    from skimage import io

    img_path = os.path.join(im_dir, fname)
    img = io.imread(img_path)
    if img is None:
        raise IOError("\tError: Could not load image {}!".format(img_path))
    if len(img.shape) == 2 or img.shape[2] == 1:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    if img.shape[2] == 4:
        print(fname, "converting RGBA to RGB...")
        img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
    assert img.shape[2] == 3, "{}, invalid format: {}".format(img_path, img.shape)
Beispiel #6
0
    def __init__(self,
                 root_dir=cfg.AFFECTNET_ROOT,
                 train=True,
                 transform=None,
                 crop_type='tight',
                 color=True,
                 start=None,
                 max_samples=None,
                 outlier_threshold=None,
                 deterministic=None,
                 use_cache=True,
                 detect_face=False,
                 align_face_orientation=False,
                 min_conf=cfg.MIN_OPENFACE_CONFIDENCE,
                 daug=0,
                 return_landmark_heatmaps=False,
                 landmark_sigma=9,
                 landmark_ids=range(68),
                 return_modified_images=False,
                 crop_source='lm_openface',
                 **kwargs):
        assert (crop_type in ['fullsize', 'tight', 'loose'])
        assert (crop_source in [
            'bb_ground_truth', 'lm_ground_truth', 'lm_cnn', 'lm_openface'
        ])

        self.face_extractor = FaceExtractor()

        self.mode = TRAIN if train else VAL

        self.crop_source = crop_source
        self.use_cache = use_cache
        self.detect_face = detect_face
        self.align_face_orientation = align_face_orientation
        self.return_landmark_heatmaps = return_landmark_heatmaps
        self.return_modified_images = return_modified_images
        self.landmark_sigma = landmark_sigma
        self.landmark_ids = landmark_ids

        self.start = start
        self.max_samples = max_samples

        self.root_dir = root_dir
        self.crop_type = crop_type
        self.color = color
        self.outlier_threshold = outlier_threshold
        self.transform = transform
        self.fullsize_img_dir = os.path.join(self.root_dir,
                                             'cropped_Annotated')
        self.cropped_img_dir = os.path.join(self.root_dir, 'crops',
                                            crop_source)
        self.feature_dir = os.path.join(self.root_dir, 'features')

        annotation_filename = 'training' if train else 'validation'
        path_annotations_mod = os.path.join(root_dir,
                                            annotation_filename + '.mod.pkl')
        if os.path.isfile(path_annotations_mod):
            print('Reading pickle file...')
            self._annotations = pd.read_pickle(path_annotations_mod)
        else:
            print('Reading CSV file...')
            self._annotations = pd.read_csv(
                os.path.join(root_dir, annotation_filename + '.csv'))
            print('done.')

            # drop non-faces
            self._annotations = self._annotations[
                self._annotations.expression < 8]

            # Samples in annotation file are somewhat clustered by expression.
            # Shuffle to create a more even distribution.
            # NOTE: deterministic, always creates the same order
            if train:
                from sklearn.utils import shuffle
                self._annotations = shuffle(self._annotations, random_state=2)

                # remove samples with inconsistent expression<->valence/arousal values
                self._remove_outliers()

            poses = []
            confs = []
            landmarks = []
            for cnt, filename in enumerate(
                    self._annotations.subDirectory_filePath):
                if cnt % 1000 == 0:
                    print(cnt)
                filename_noext = os.path.splitext(filename)[0]
                conf, lms, pose = ds_utils.read_openface_detection(
                    os.path.join(self.feature_dir, filename_noext))
                poses.append(pose)
                confs.append(conf)
                landmarks.append(lms)
            self._annotations['pose'] = poses
            self._annotations['conf'] = confs
            self._annotations['landmarks_of'] = landmarks
            # self.annotations.to_csv(path_annotations_mod, index=False)
            self._annotations.to_pickle(path_annotations_mod)

        poses = np.abs(np.stack(self._annotations.pose.values))

        only_good_image_for_training = True
        if train and only_good_image_for_training:
            print(len(self._annotations))

            min_rot_deg = 30
            max_rot_deg = 90
            # print('Limiting rotation to +-[{}-{}] degrees...'.format(min_rot_deg, max_rot_deg))
            # self._annotations = self._annotations[(poses[:, 0] < np.deg2rad(max_rot_deg)) &
            #                                       (poses[:, 1] < np.deg2rad(max_rot_deg)) &
            #                                       (poses[:, 2] < np.deg2rad(max_rot_deg))]
            # self._annotations = self._annotations[(np.deg2rad(min_rot_deg) < poses[:, 0]) |
            #                                       (np.deg2rad(min_rot_deg) < poses[:, 1])]
            # self._annotations = self._annotations[np.deg2rad(min_rot_deg) < poses[:, 1] ]

            print(len(self._annotations))

            # print('Removing OpenFace confs <={:.2f}...'.format(min_conf))
            # self._annotations = self._annotations[self._annotations.conf > cfg.MIN_OPENFACE_CONFIDENCE]
            # print(len(self._annotations))

            # select by Valence/Arousal
            # min_arousal = 0.0
            # print('Removing arousal <={:.2f}...'.format(min_arousal))
            # self._annotations = self._annotations[self._annotations.arousal > min_arousal]
            # print(len(self._annotations))

        # There is (at least) one missing image in the dataset. Remove by checking face width:
        self._annotations = self._annotations[self._annotations.face_width > 0]

        # self._annotations_balanced = self._annotations
        # self.filter_labels(label_dict_exclude={'expression': 0})
        # self.filter_labels(label_dict_exclude={'expression': 1})
        # self._annotations = self._annotations[self._annotations.arousal > 0.2]

        self.rebalance_classes()

        if deterministic is None:
            deterministic = self.mode != TRAIN
        self.transform = ds_utils.build_transform(deterministic, self.color,
                                                  daug)

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor()]
        transforms += [fp.Normalize([0.518, 0.418, 0.361],
                                    [1, 1, 1])]  # VGGFace(2)
        self.crop_to_tensor = tf.Compose(transforms)
Beispiel #7
0
    def __init__(self, root_dir=cfg.W300_ROOT, train=True,
                 transform=None, color=True, start=None, max_samples=None,
                 deterministic=None, align_face_orientation=cfg.CROP_ALIGN_ROTATION,
                 crop_type='tight', test_split='challenging', detect_face=False, use_cache=True,
                 crop_source='bb_detector', daug=0, return_modified_images=False,
                 return_landmark_heatmaps=False, landmark_sigma=3, landmark_ids=range(68), **kwargs):

        assert(crop_type in ['fullsize', 'tight','loose'])
        test_split = test_split.lower()
        assert(test_split in ['common', 'challenging', '300w', 'full'])
        assert(crop_source in W300.CROP_SOURCES)
        lmcfg.config_landmarks('300w')

        self.start = start
        self.max_samples = max_samples
        self.use_cache = use_cache
        self.crop_source = crop_source
        self.return_landmark_heatmaps = return_landmark_heatmaps
        self.return_modified_images = return_modified_images
        self.landmark_sigma = landmark_sigma
        self.landmark_ids = landmark_ids

        self.root_dir = root_dir
        self.local_root_dir = cfg.W300_ROOT_LOCAL
        self.color = color
        self.transform = transform
        self.fullsize_img_dir = os.path.join(self.root_dir, 'images')
        self.align_face_orientation = align_face_orientation
        self.detect_face = detect_face
        self.crop_type = crop_type
        self.cropped_img_dir = os.path.join(cfg.W300_ROOT_LOCAL, 'crops', crop_source)

        self.feature_dir_cnn = os.path.join(cfg.W300_ROOT_LOCAL, 'features_cnn')
        self.feature_dir_of = os.path.join(cfg.W300_ROOT_LOCAL, 'features_of')

        self.bounding_box_dir = os.path.join(cfg.W300_ROOT, 'Bounding Boxes')

        self.split = 'train' if train else test_split
        self.build_annotations(self.split)
        print("Num. images: {}".format(len(self)))

        # limit number of samples
        st,nd = 0, None
        if start is not None:
            st = start
        if max_samples is not None:
            nd = st+max_samples
        self.annotations = self.annotations[st:nd]

        if deterministic is None:
            deterministic = not train
        if self.crop_type == 'tight':
            self.transform = ds_utils.build_transform(deterministic, True, daug)
        elif self.crop_type == 'fullsize':
            self.transform = lambda x:x

        from utils.face_extractor import FaceExtractor
        self.face_extractor = FaceExtractor()

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor() ]
        transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]  # VGGFace(2)
        self.crop_to_tensor = tf.Compose(transforms)