print('using rescaled images')
    cropper['train'] = transforms.RandomResizedCrop(args.im_dim,
                                                    scale=tuple(
                                                        args.scale_range),
                                                    ratio=(1, 1))
    cropper['val'] = cropper['train']
else:
    cropper['train'] = transforms.RandomCrop((args.im_dim, args.im_dim))
    cropper['val'] = transforms.CenterCrop((args.im_dim, args.im_dim))

# we will train with data augmentation using random crops, flips, and jittering
# we will validate with central crops
data_transforms = {
    'train':
    transforms.Compose([
        transforms.Resize(int((256 / 224) * args.im_dim)), cropper['train'],
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=.5,
                               contrast=.5,
                               saturation=.5,
                               hue=.5),
        transforms.ToTensor(), normalize
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(int((256 / 224) * args.im_dim)), cropper['val'],
        transforms.ToTensor(), normalize
    ])
}

# load the entry level info for imagenet
Beispiel #2
0
        degrees=30) if args.data_aug else None
rotator_val = FixedRotation(
    180) if args.inverted or args.inverted_test else None
cropper_train = transforms.RandomCrop(opt['im_dim'])
cropper_val = transforms.FiveCrop(
    opt['im_dim']) if args.crop5 else transforms.CenterCrop(opt['im_dim'])
tensor_maker = transforms.Lambda(
    lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]
                              )) if args.crop5 else transforms.ToTensor()

data_transforms = {
    'train':
    FlexibleCompose([
        rotator_train, flipper, jitterer,
        transforms.Pad(padding=(0, 0), padding_mode='square_constant'),
        transforms.Resize((int(
            (256 / 224) * opt['im_dim']), int((256 / 224) * opt['im_dim']))),
        cropper_train,
        transforms.ToTensor(), normalize
    ]),
    'val':
    FlexibleCompose([
        rotator_val,
        transforms.Pad(padding=(0, 0), padding_mode='square_constant'),
        transforms.Resize((opt['im_dim'], opt['im_dim'])), cropper_val,
        tensor_maker, normalize
    ])
}

if 'subset-' in opt['dataset']:
    data_transforms['test'] = data_transforms['val']
    phases = ['train', 'val', 'test']
Beispiel #3
0
import numbers
import platform
from contextlib import contextmanager
from matplotlib import colors

from familiarity.transforms import functional as F
import familiarity.transforms as transforms

NORMALIZE = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)

DATA_TRANSFORM = transforms.Compose([
        transforms.Pad(padding=(0,0), padding_mode='square_constant'),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        NORMALIZE,
        ])

pltv = platform.version()
ON_CLUSTER = False if 'Darwin' in pltv or 'Ubuntu' in pltv else True


def get_layers_of_interest(net):
    if 'vgg16' in net:
        layers_of_interest = [0, 5, 10, 17, 24, 31, 33, 36, 38, 39]
        layer_names = ['image', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6', 'fc7', 'fc8', 'prob']
    elif 'vgg_m_face_bn_dag' in net:
        layers_of_interest = [0, 4, 8, 11, 14, 18, 21, 24, 25] #0th layer is the image
        layer_names = ['image', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'FC6', 'FC7', 'class-prob']
    #reproducible results
    torch.random.manual_seed(1)
    np.random.seed(1)
    if not opt['no_gpu']:
        torch.cuda.manual_seed_all(1)
        torch.backends.cudnn.deterministic = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # we will train with data augmentation using random crops, flips, and jittering
    # we will validate with central crops
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=.5,
                                   contrast=.5,
                                   saturation=.5,
                                   hue=.5),
            transforms.ToTensor(), normalize
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop((224, 224)),
            transforms.ToTensor(), normalize
        ]),
        'test':
Beispiel #5
0
            raise NotImplementedError('net:{} not implemented yet'.format(
                opt['net']))

        #reproducible results
        torch.random.manual_seed(1)
        np.random.seed(1)
        if not opt['no_gpu']:
            torch.cuda.manual_seed_all(1)
            torch.backends.cudnn.deterministic = True

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        data_transform = transforms.Compose([
            transforms.Pad(padding=(0, 0), padding_mode='square_constant'),
            transforms.Resize((opt['im_dim'], opt['im_dim'])),
            transforms.ToTensor(),
            normalize,
        ])
        data_dir, _ = get_data_dir(opt['dataset'],
                                   opt['datasubset'],
                                   exclude_keys,
                                   id_thresh=opt['id_thresh'],
                                   n_val=opt['n_val'])
        image_dataset = MyImageFolder(data_dir, data_transform)
        dataloader = torch.utils.data.DataLoader(image_dataset,
                                                 batch_size=opt['batch_size'],
                                                 shuffle=False,
                                                 num_workers=4)
        dataset_size = len(image_dataset)
        class_names = image_dataset.classes