Exemple #1
0
    parser.add_argument('--train-coords', type=bool_str, default=False, help='train landmark coordinate regression')
    parser.add_argument('--lr-lm-coords', default=0.001, type=float, help='learning rate for landmark coordinate regression')

    # visualization
    parser.add_argument('--show-random-faces', default=False, action='store_true')
    parser.add_argument('--wait', default=10, type=int)

    args = parser.parse_args()

    args.color = True

    args.dataset_train = args.dataset
    args.dataset_val = args.dataset

    lmcfg.config_landmarks(args.dataset[0])

    if args.eval:
        log.info('Switching to evaluation mode...')
        args.train_ae = False
        args.batchsize_eval = 10
        args.wait = 0
        args.workers = 0
        args.print_freq_eval = 1
        args.epochs = 1

    if args.benchmark:
        log.info('Switching to benchmark mode...')
        args.eval = True
        args.train_ae = False
        args.batchsize_eval = 50
Exemple #2
0
            result['image_mod'] = crop_occ

        if self.return_landmark_heatmaps and self.crop_type != 'fullsize':
            result['lm_heatmaps'] = create_landmark_heatmaps(
                result['landmarks'], self.landmark_sigma, self.landmark_ids)

        return result


if __name__ == '__main__':
    from utils.nn import Batch, to_numpy
    import utils.common
    from landmarks import lmconfig as lmcfg

    utils.common.init_random(3)
    lmcfg.config_landmarks('wflw')

    ds = WFLW(train=True, deterministic=True, use_cache=True, daug=0)
    # ds.filter_labels({'pose':0, 'blur':0, 'occlusion':1})
    dl = td.DataLoader(ds, batch_size=1, shuffle=False, num_workers=0)

    cfg.WITH_LANDMARK_LOSS = False

    for data in dl:
        batch = Batch(data, gpu=False)
        images = vis._to_disp_images(batch.images, denorm=True)
        # lms = lmutils.convert_landmarks(to_numpy(batch.landmarks), lmutils.LM98_TO_LM68)
        lms = batch.landmarks
        images = vis.add_landmarks_to_images(images,
                                             lms,
                                             draw_wireframe=False,
Exemple #3
0
    def __init__(self, root_dir=cfg.W300_ROOT, train=True,
                 transform=None, color=True, start=None, max_samples=None,
                 deterministic=None, align_face_orientation=cfg.CROP_ALIGN_ROTATION,
                 crop_type='tight', test_split='challenging', detect_face=False, use_cache=True,
                 crop_source='bb_detector', daug=0, return_modified_images=False,
                 return_landmark_heatmaps=False, landmark_sigma=3, landmark_ids=range(68), **kwargs):

        assert(crop_type in ['fullsize', 'tight','loose'])
        test_split = test_split.lower()
        assert(test_split in ['common', 'challenging', '300w', 'full'])
        assert(crop_source in W300.CROP_SOURCES)
        lmcfg.config_landmarks('300w')

        self.start = start
        self.max_samples = max_samples
        self.use_cache = use_cache
        self.crop_source = crop_source
        self.return_landmark_heatmaps = return_landmark_heatmaps
        self.return_modified_images = return_modified_images
        self.landmark_sigma = landmark_sigma
        self.landmark_ids = landmark_ids

        self.root_dir = root_dir
        self.local_root_dir = cfg.W300_ROOT_LOCAL
        self.color = color
        self.transform = transform
        self.fullsize_img_dir = os.path.join(self.root_dir, 'images')
        self.align_face_orientation = align_face_orientation
        self.detect_face = detect_face
        self.crop_type = crop_type
        self.cropped_img_dir = os.path.join(cfg.W300_ROOT_LOCAL, 'crops', crop_source)

        self.feature_dir_cnn = os.path.join(cfg.W300_ROOT_LOCAL, 'features_cnn')
        self.feature_dir_of = os.path.join(cfg.W300_ROOT_LOCAL, 'features_of')

        self.bounding_box_dir = os.path.join(cfg.W300_ROOT, 'Bounding Boxes')

        self.split = 'train' if train else test_split
        self.build_annotations(self.split)
        print("Num. images: {}".format(len(self)))

        # limit number of samples
        st,nd = 0, None
        if start is not None:
            st = start
        if max_samples is not None:
            nd = st+max_samples
        self.annotations = self.annotations[st:nd]

        if deterministic is None:
            deterministic = not train
        if self.crop_type == 'tight':
            self.transform = ds_utils.build_transform(deterministic, True, daug)
        elif self.crop_type == 'fullsize':
            self.transform = lambda x:x

        from utils.face_extractor import FaceExtractor
        self.face_extractor = FaceExtractor()

        transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
        transforms += [fp.ToTensor() ]
        transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]  # VGGFace(2)
        self.crop_to_tensor = tf.Compose(transforms)