args.scale_range),
                                                    ratio=(1, 1))
    cropper['val'] = cropper['train']
else:
    cropper['train'] = transforms.RandomCrop((args.im_dim, args.im_dim))
    cropper['val'] = transforms.CenterCrop((args.im_dim, args.im_dim))

# we will train with data augmentation using random crops, flips, and jittering
# we will validate with central crops
data_transforms = {
    'train':
    transforms.Compose([
        transforms.Resize(int((256 / 224) * args.im_dim)), cropper['train'],
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=.5,
                               contrast=.5,
                               saturation=.5,
                               hue=.5),
        transforms.ToTensor(), normalize
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(int((256 / 224) * args.im_dim)), cropper['val'],
        transforms.ToTensor(), normalize
    ])
}

# load the entry level info for imagenet
with open(f'{DATA_DIR}/imagesets/entrylevel-all_imagenet_info.pkl', 'rb') as f:
    entry_dict = pickle.load(f)
Beispiel #2
0
if 'subset-' in opt['dataset']:
    data_transforms['test'] = data_transforms['val']
    phases = ['train', 'val', 'test']
else:
    phases = ['train', 'val']

id_phases = ['train', 'val']
if 'gfmt' in opt['dataset'].lower():
    id_phases = []
    data_transforms = {
        'val':
        transforms.Compose([
            transforms.RandomRotation(degrees=30),
            transforms.ColorJitter(brightness=.5,
                                   contrast=.5,
                                   saturation=.5,
                                   hue=.5),
            transforms.Pad(padding=(0, 0), padding_mode='square_constant'),
            transforms.Resize((opt['im_dim'], opt['im_dim'])),
            transforms.ToTensor(), normalize
        ])
    }
    image_datasets = {
        x: MyImageFolder(os.path.join(data_dir), data_transforms[x])
        for x in ['val']
    }
    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=opt['batch_size'],
                                       shuffle=False,
                                       num_workers=args.num_workers)
        for x in ['val']
Beispiel #3
0
import numbers
import platform
from contextlib import contextmanager
from matplotlib import colors

from familiarity.transforms import functional as F
import familiarity.transforms as transforms

NORMALIZE = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)

DATA_TRANSFORM = transforms.Compose([
        transforms.Pad(padding=(0,0), padding_mode='square_constant'),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        NORMALIZE,
        ])

pltv = platform.version()
ON_CLUSTER = False if 'Darwin' in pltv or 'Ubuntu' in pltv else True


def get_layers_of_interest(net):
    if 'vgg16' in net:
        layers_of_interest = [0, 5, 10, 17, 24, 31, 33, 36, 38, 39]
        layer_names = ['image', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6', 'fc7', 'fc8', 'prob']
    elif 'vgg_m_face_bn_dag' in net:
        layers_of_interest = [0, 4, 8, 11, 14, 18, 21, 24, 25] #0th layer is the image
        layer_names = ['image', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'FC6', 'FC7', 'class-prob']
    elif 'vgg_face_dag' in net:
    if not opt['no_gpu']:
        torch.cuda.manual_seed_all(1)
        torch.backends.cudnn.deterministic = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # we will train with data augmentation using random crops, flips, and jittering
    # we will validate with central crops
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=.5,
                                   contrast=.5,
                                   saturation=.5,
                                   hue=.5),
            transforms.ToTensor(), normalize
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop((224, 224)),
            transforms.ToTensor(), normalize
        ]),
        'test':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop((224, 224)),
            transforms.ToTensor(), normalize
Beispiel #5
0
            raise NotImplementedError('net:{} not implemented yet'.format(
                opt['net']))

        #reproducible results
        torch.random.manual_seed(1)
        np.random.seed(1)
        if not opt['no_gpu']:
            torch.cuda.manual_seed_all(1)
            torch.backends.cudnn.deterministic = True

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        data_transform = transforms.Compose([
            transforms.Pad(padding=(0, 0), padding_mode='square_constant'),
            transforms.Resize((opt['im_dim'], opt['im_dim'])),
            transforms.ToTensor(),
            normalize,
        ])
        data_dir, _ = get_data_dir(opt['dataset'],
                                   opt['datasubset'],
                                   exclude_keys,
                                   id_thresh=opt['id_thresh'],
                                   n_val=opt['n_val'])
        image_dataset = MyImageFolder(data_dir, data_transform)
        dataloader = torch.utils.data.DataLoader(image_dataset,
                                                 batch_size=opt['batch_size'],
                                                 shuffle=False,
                                                 num_workers=4)
        dataset_size = len(image_dataset)
        class_names = image_dataset.classes