Esempio n. 1
0
def main_worker(args):
    global best_acc1

    # create model
    argss = default_argument_parser().parse_args()
    argss.config_file = 'mv_to_new_home/configs/RearrNet_50.yaml'
    cfg = setup(argss)
    # model = build_gtnet_backbone_pretrain(cfg, 3, 1000)
    # model = build_rearrnet_backbone_pretrain(cfg, 3, 100)
    # model = build_defenet_backbone_pretrain(cfg, 3, 100)
    # model = build_oidnet_backbone_pretrain(cfg, 3, 100)
    # model = build_rpnet_backbone_pretrain(cfg, 3, 100)
    # model = build_realnet_backbone_pretrain(cfg, 3, 100)
    model = build_oinet_backbone_pretrain(cfg, 3, 100)
    # model = build_deformnet_backbone_pretrain(cfg, 3, 100)
    model = torch.nn.DataParallel(model.cuda())

    # args.evaluate = True
    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            best_acc1 = best_acc1.to()
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    data_path = '/ws/data/imagenet'
    traindir = os.path.join(data_path, 'train')
    valdir = os.path.join(data_path, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    input_size = 128
    cifar_data_path = '/ws/data/open_datasets/classification/cifar100'
    train_dataset = datasets.CIFAR100(
        cifar_data_path,
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            transforms.RandomRotation(30),
            transforms.Resize((int(input_size * 1.4), int(input_size * 1.4))),
            transforms.CenterCrop((input_size, input_size)),
            transforms.ToTensor(),
            transforms.RandomErasing(),
            transforms.Normalize((0.5, ), (0.5, ))
        ]))
    val_dataset = datasets.CIFAR100(
        cifar_data_path,
        train=False,
        download=True,
        transform=transforms.Compose([
            # transforms.RandomRotation(90),
            # transforms.RandomHorizontalFlip(),
            transforms.Resize((int(input_size * 1.4), int(input_size * 1.4))),
            transforms.CenterCrop((input_size, input_size)),
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, ), (0.5, )),
        ]))
    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(size=299, scale=(0.08, 1), ratio=(0.75, 4/3)),
    #         transforms.RandomHorizontalFlip(p=0.5),
    #         transforms.RandomVerticalFlip(p=0.5),
    #         transforms.ColorJitter(brightness=[0.5, 1.5], contrast=[0.5, 1.5], saturation=[0.5, 1.5], hue=[-0.1, 0.1]),
    #         transforms.RandomRotation(degrees=(-45, 45)),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    # val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(324),
    #         transforms.CenterCrop(299),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=1)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=1)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            filename='/ws/data/deformed/rp_all_ckpt.pt')
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)

# Data science tools
import numpy as np
import pandas as pd
import os
from timeit import default_timer as timer

# Image transformations
image_transforms = {
    # Train uses data augmentation
    'train':
    transforms.Compose([
        transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
        transforms.RandomRotation(degrees=15),
        transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        transforms.CenterCrop(size=224),  # Image net standards
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])  # Imagenet standards
    ]),
    # Validation does not use augmentation
    'val':
    transforms.Compose([
        transforms.Resize(size=256),
        transforms.CenterCrop(size=224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
Esempio n. 3
0
def main():
    #I must know that atleast something is running
    print("Please wait while I train")
    
    args = parse_args()
    #path of data directories
    data_dir = 'flowers'
    train_dir = data_dir + '/train'
    val_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'
    
    #transformations to be applied on dataset
    train_transforms = transforms.Compose([transforms.RandomRotation(30),
                                       transforms.RandomResizedCrop(224),
                                       transforms.RandomHorizontalFlip(),
                                       transforms.ToTensor(),
                                       transforms.Normalize([0.485, 0.456, 0.406],
                                                            [0.229, 0.224, 0.225])])
    test_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),
                                             transforms.Normalize([0.485, 0.456, 0.406], 
                                                                  [0.229, 0.224, 0.225])])
    val_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),
                                             transforms.Normalize([0.485, 0.456, 0.406], 
                                                                  [0.229, 0.224, 0.225])])
    
    # TODO: Load the datasets with ImageFolder
    train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
    test_datasets = datasets.ImageFolder(test_dir, transform=test_transforms)
    val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
    
    # TODO: Using the image datasets and the trainforms, define the dataloaders
    trainloader = torch.utils.data.DataLoader(train_datasets, batch_size = 64, shuffle=True)
    valloader = torch.utils.data.DataLoader(val_datasets, batch_size = 64, shuffle=True)
    testloader = torch.utils.data.DataLoader(test_datasets, batch_size = 64, shuffle=True)
    
    #print(summary(trainloaders))
    #image, label = next(iter(trainloader))
    #helper.imshow(image[0,:]);
    
    #defining parameters that will be passed as default to the model under training
    
    model = getattr(models, args.arch)(pretrained=True)
    
    #choose out of two models
    if args.arch == 'vgg13':
    # TODO: Build and train your network
        model = models.vgg13(pretrained=True)
        print(model)
        for param in model.parameters():
            param.requires_grad = False
    
        classifier = nn.Sequential(nn.Linear(25088, 4096),
                               nn.Dropout(p=0.2),
                               nn.ReLU(),
                               nn.Linear(4096, 4096),
                               nn.ReLU(),
                               nn.Dropout(p=0.2),
                               nn.Linear(4096,102),
                               nn.LogSoftmax(dim=1))
        model.classifier= classifier
   
    elif args.arch == 'densenet121':
        model = models.densenet121(pretrained=True)
        print(model)
        for param in model.parameters():
            param.requires_grad = False
    
        classifier = nn.Sequential(nn.Linear(1024, 512),
                               nn.Dropout(p=0.6),
                               nn.ReLU(),
                               nn.Linear(512, 256),
                               nn.ReLU(),
                               nn.Dropout(p=0.6),                               
                               nn.Linear(256,102),
                               nn.LogSoftmax(dim=1))
        model.classifier = classifier
    
    model.classifier = classifier
    criterion = nn.NLLLoss()
    epochs = int(args.epochs)
    learning_rate = float(args.learning_rate)
    print_every = int(args.print_every)
    optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
    train(model, criterion, epochs, optimizer, print_every, trainloader, valloader)
    model.class_to_idx = train_datasets.class_to_idx        
    path = args.save_dir
    save_checkpoint(args, model, optimizer, learning_rate, epochs, path)
Esempio n. 4
0
data_transform = transforms.Compose([
    transforms.Resize((args.img_h, args.img_w)),
    #transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

data_transform_resize = transforms.Compose([
    #transforms.Resize((args.img_bi_h, args.img_bi_w)),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(brightness=0.2,
                           contrast=0.2,
                           saturation=0.2,
                           hue=0.1),
    transforms.RandomRotation(10),
    #transforms.RandomCrop(size=(384,128)),
    my_transforms.RandomCrop(range=(0.70, 0.95)),
    transforms.Resize((args.img_bi_h, args.img_bi_w)),
    transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

data_transform_resize2 = transforms.Compose([
    #transforms.Resize((args.img_tri_h, args.img_tri_w)),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(brightness=0.2,
                           contrast=0.2,
                           saturation=0.2,
                           hue=0.1),
    transforms.RandomRotation(10),
Esempio n. 5
0
    def build_transforms(self):
        padding = transforms.Compose([
            transforms.Pad(self.padding, padding_mode='reflect'),
            transforms.RandomRotation((-self.rotation, self.rotation)),
            transforms.RandomApply(
                [transforms.RandomAffine(0, shear=self.shear)]),
            transforms.RandomCrop(self.resize)
        ])

        rescaling = transforms.Compose([
            transforms.Resize((self.resize, self.resize)),
            transforms.RandomApply(
                [transforms.RandomAffine(0, shear=self.shear)]),
            transforms.RandomRotation((-self.rotation, self.rotation))
        ])

        crop_rescaling = transforms.Compose([
            transforms.RandomCrop(self.resize * 0.8),
            transforms.Resize((self.resize, self.resize)),
            transforms.RandomRotation((-self.rotation, self.rotation))
        ])

        basic_augmentation_transform = transforms.Compose([
            transforms.RandomChoice([padding, rescaling]),
            transforms.RandomHorizontalFlip(p=self.flip),
            transforms.ToTensor()
        ])

        strong_augmentation_transform = transforms.Compose([
            transforms.RandomChoice([padding, rescaling, crop_rescaling]),
            transforms.RandomHorizontalFlip(p=self.flip),
            transforms.RandomApply([
                transforms.ColorJitter(brightness=0.05,
                                       contrast=0.1,
                                       saturation=0.05,
                                       hue=0.1)
            ]),
            transforms.ToTensor(),
        ])

        val_test_transform = transforms.Compose([
            transforms.Resize((128, 128)),
            transforms.ToTensor(),
        ])

        if self.normalize == 'imagenet':
            normalization = transforms.Normalize([0.485, 0.456, 0.406],
                                                 [0.229, 0.224, 0.225])
            strong_augmentation_transform = transforms.Compose(
                [strong_augmentation_transform, normalization])

            basic_augmentation_transform = transforms.Compose(
                [basic_augmentation_transform, normalization])

            val_test_transform = transforms.Compose(
                [val_test_transform, normalization])
        elif self.normalize is None:
            pass
        else:
            raise Exception('Currently only support `normalize=\'imagenet\'`!')

        if self.strong:
            return strong_augmentation_transform, val_test_transform
        else:
            return basic_augmentation_transform, val_test_transform
Esempio n. 6
0
def digit_load(args):
    train_bs = args.batch_size
    if args.dset == 's2m':
        train_source = svhn.SVHN('./data/svhn/',
                                 split='train',
                                 download=True,
                                 transform=transforms.Compose([
                                     transforms.Resize(32),
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.5, 0.5, 0.5),
                                                          (0.5, 0.5, 0.5))
                                 ]))
        test_source = svhn.SVHN('./data/svhn/',
                                split='test',
                                download=True,
                                transform=transforms.Compose([
                                    transforms.Resize(32),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5),
                                                         (0.5, 0.5, 0.5))
                                ]))
        train_target = mnist.MNIST_idx(
            './data/mnist/',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(32),
                transforms.Lambda(lambda x: x.convert("RGB")),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            ]))
        test_target = mnist.MNIST(
            './data/mnist/',
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(32),
                transforms.Lambda(lambda x: x.convert("RGB")),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            ]))
    elif args.dset == 'u2m':
        train_source = usps.USPS('./data/usps/',
                                 train=True,
                                 download=True,
                                 transform=transforms.Compose([
                                     transforms.RandomCrop(28, padding=4),
                                     transforms.RandomRotation(10),
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.5, ), (0.5, ))
                                 ]))
        test_source = usps.USPS('./data/usps/',
                                train=False,
                                download=True,
                                transform=transforms.Compose([
                                    transforms.RandomCrop(28, padding=4),
                                    transforms.RandomRotation(10),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, ), (0.5, ))
                                ]))
        train_target = mnist.MNIST_idx('./data/mnist/',
                                       train=True,
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           transforms.Normalize((0.5, ),
                                                                (0.5, ))
                                       ]))
        test_target = mnist.MNIST('./data/mnist/',
                                  train=False,
                                  download=True,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.5, ), (0.5, ))
                                  ]))
    elif args.dset == 'm2u':
        train_source = mnist.MNIST('./data/mnist/',
                                   train=True,
                                   download=True,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.5, ), (0.5, ))
                                   ]))
        test_source = mnist.MNIST('./data/mnist/',
                                  train=False,
                                  download=True,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.5, ), (0.5, ))
                                  ]))

        train_target = usps.USPS_idx('./data/usps/',
                                     train=True,
                                     download=True,
                                     transform=transforms.Compose([
                                         transforms.ToTensor(),
                                         transforms.Normalize((0.5, ), (0.5, ))
                                     ]))
        test_target = usps.USPS('./data/usps/',
                                train=False,
                                download=True,
                                transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, ), (0.5, ))
                                ]))

    dset_loaders = {}
    dset_loaders["source_tr"] = DataLoader(train_source,
                                           batch_size=train_bs,
                                           shuffle=True,
                                           num_workers=args.worker,
                                           drop_last=False)
    dset_loaders["source_te"] = DataLoader(test_source,
                                           batch_size=train_bs * 2,
                                           shuffle=True,
                                           num_workers=args.worker,
                                           drop_last=False)
    dset_loaders["target"] = DataLoader(train_target,
                                        batch_size=train_bs,
                                        shuffle=True,
                                        num_workers=args.worker,
                                        drop_last=False)
    dset_loaders["target_te"] = DataLoader(train_target,
                                           batch_size=train_bs,
                                           shuffle=False,
                                           num_workers=args.worker,
                                           drop_last=False)
    dset_loaders["test"] = DataLoader(test_target,
                                      batch_size=train_bs * 2,
                                      shuffle=False,
                                      num_workers=args.worker,
                                      drop_last=False)
    return dset_loaders
Esempio n. 7
0
def get_dataset(args):
    """ return given network
    """

    if args.dataset == 'mnist':
        train_dataset = datasets.MNIST(MNIST_PATH,
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.Resize((32, 32)),
                                           transforms.ToTensor(),
                                       ]))
        val_dataset = datasets.MNIST(MNIST_PATH,
                                     train=False,
                                     download=True,
                                     transform=transforms.Compose([
                                         transforms.Resize((32, 32)),
                                         transforms.ToTensor(),
                                     ]))
    elif args.dataset == 'cifar10':
        train_dataset = datasets.CIFAR10(
            CIFAR10_PATH,
            download=True,
            transform=transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(15),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.49139968, 0.48215827, 0.44653124],
                                     std=[0.24703233, 0.24348505, 0.26158768]),
            ]))
        val_dataset = datasets.CIFAR10(
            CIFAR10_PATH,
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.49139968, 0.48215827, 0.44653124],
                                     std=[0.24703233, 0.24348505, 0.26158768]),
            ]))
    elif args.dataset == 'cifar100':
        train_dataset = datasets.CIFAR100(
            CIFAR100_PATH,
            download=True,
            transform=transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(15),
                transforms.ToTensor(),
                transforms.Normalize(mean=[
                    0.5070751592371323, 0.48654887331495095, 0.4409178433670343
                ],
                                     std=[
                                         0.2673342858792401,
                                         0.2564384629170883,
                                         0.27615047132568404
                                     ]),
            ]))
        val_dataset = datasets.CIFAR100(
            CIFAR100_PATH,
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[
                    0.5070751592371323, 0.48654887331495095, 0.4409178433670343
                ],
                                     std=[
                                         0.2673342858792401,
                                         0.2564384629170883,
                                         0.27615047132568404
                                     ]),
            ]))
    elif args.dataset == 'imagenet':
        input_image_size = 224
        scale = 256 / 224
        train_dataset_path = os.path.join(ILSVRC2012_path, 'train')
        val_dataset_path = os.path.join(ILSVRC2012_path, 'val')
        train_dataset = datasets.ImageFolder(
            train_dataset_path,
            transforms.Compose([
                transforms.RandomResizedCrop(input_image_size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))
        val_dataset = datasets.ImageFolder(
            val_dataset_path,
            transforms.Compose([
                transforms.Resize(int(input_image_size * scale)),
                transforms.CenterCrop(input_image_size),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))
    elif args.dataset == 'imagenet_32_noise':
        input_image_size = 224
        scale = 256 / 224
        train_dataset_path = os.path.join(IMAGENET_32_NOISE_PATH, 'train')
        val_dataset_path = os.path.join(IMAGENET_32_NOISE_PATH, 'val')
        train_dataset = datasets.ImageFolder(
            train_dataset_path,
            transforms.Compose([
                transforms.RandomResizedCrop(input_image_size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))
        val_dataset = datasets.ImageFolder(
            val_dataset_path,
            transforms.Compose([
                transforms.Resize(int(input_image_size * scale)),
                transforms.CenterCrop(input_image_size),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))
    elif args.dataset == 'imagenet_32_noise_2':
        input_image_size = 224
        scale = 256 / 224
        train_dataset_path = os.path.join(IMAGENET_32_NOISE_PATH_2, 'train')
        val_dataset_path = os.path.join(IMAGENET_32_NOISE_PATH_2, 'val')
        train_dataset = datasets.ImageFolder(
            train_dataset_path,
            transforms.Compose([
                transforms.RandomResizedCrop(input_image_size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))
        val_dataset = datasets.ImageFolder(
            val_dataset_path,
            transforms.Compose([
                transforms.Resize(int(input_image_size * scale)),
                transforms.CenterCrop(input_image_size),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]))

    else:
        print('the dataset name you have entered is not supported yet')
        sys.exit()

    return train_dataset, val_dataset
Esempio n. 8
0
import torch
from torchvision import datasets, transforms
import PIL

data_root = './data/'
train_root = data_root + 'train'
val_root = data_root + 'val'
test_root = data_root + 'test'

base_transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize([0.5] * 3, [0.5] * 3)])

aug_transform = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(20, resample=PIL.Image.BILINEAR),
    transforms.ColorJitter(hue=.05, saturation=.05),
    transforms.ToTensor(),
    transforms.Normalize([0.5] * 3, [0.5] * 3)
])

train_dataset = datasets.ImageFolder(root=train_root, transform=aug_transform)
val_dataset = datasets.ImageFolder(root=val_root, transform=base_transform)
test_dataset = datasets.ImageFolder(root=test_root, transform=base_transform)


def get_data_loaders(batch_size):
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)
Esempio n. 9
0
def main():
    args = parse_args()
    data_dir = args.data_dir
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'

    training_transforms = transforms.Compose([
        transforms.RandomRotation(30),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    validataion_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    testing_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # TODO: Load the datasets with ImageFolder
    image_datasets = [
        datasets.ImageFolder(train_dir, transform=training_transforms),
        datasets.ImageFolder(valid_dir, transform=validataion_transforms),
        datasets.ImageFolder(test_dir, transform=testing_transforms)
    ]

    # TODO: Using the image datasets and the trainforms, define the dataloaders
    dataloaders = [
        torch.utils.data.DataLoader(image_datasets[0],
                                    batch_size=64,
                                    shuffle=True),
        torch.utils.data.DataLoader(image_datasets[1],
                                    batch_size=64,
                                    shuffle=True),
        torch.utils.data.DataLoader(image_datasets[2],
                                    batch_size=64,
                                    shuffle=True)
    ]
    model = getattr(models, args.arch)(pretrained=True)

    hidden_units = int(args.hidden_units)  #added before or it won't work

    for param in model.parameters():
        param.requires_grad = False

    if args.arch == "vgg13":
        feature_num = model.classifier[0].in_features
        classifier = nn.Sequential(
            OrderedDict([('fc1', nn.Linear(feature_num, hidden_units)),
                         ('drop', nn.Dropout(p=0.5)), ('relu', nn.ReLU()),
                         ('fc2', nn.Linear(hidden_units, 102)),
                         ('output', nn.LogSoftmax(dim=1))]))
    elif args.arch == "densenet121":
        classifier = nn.Sequential(
            OrderedDict([('fc1', nn.Linear(1024, hidden_units)),
                         ('drop', nn.Dropout(p=0.6)), ('relu', nn.ReLU()),
                         ('fc2', nn.Linear(hidden_units, 102)),
                         ('output', nn.LogSoftmax(dim=1))]))

    model.classifier = classifier
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(),
                           lr=float(args.learning_rate))
    epochs = int(args.epochs)
    #hidden_units = int(args.hidden_units) #added for hidden units user def
    class_index = image_datasets[0].class_to_idx
    gpu = args.gpu  # get gpu set
    train(model, criterion, optimizer, dataloaders, epochs, gpu)
    model.class_to_idx = class_index
    path = args.save_dir  # new save location
    save_checkpoint(path, model, optimizer, args, classifier)
def main():
   
    parser = train_args.get_args()
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__ + ' by ' + __author__)
    cli_args = parser.parse_args()
    #  directory
	#First check
    if not os.path.isdir(cli_args.data_directory):
        print(f'Data directory {cli_args.data_directory} not found.')
        exit(1)

    # Then save directory
    if not os.path.isdir(cli_args.save_dir):
        print(f'Directory {cli_args.save_dir} does not exist. Creating...')
        os.makedirs(cli_args.save_dir)
		
    with open(cli_args.categories_json, 'r') as f:
        cat_to_name = json.load(f)
		
    output_size = len(cat_to_name)
   
    expected_means = [0.485, 0.456, 0.406]
    expected_std = [0.229, 0.224, 0.225]
    max_image_size = 224
    batch_size = 32
#train_transform
    tr_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.25),
                                           transforms.RandomRotation(25),
                                           transforms.RandomGrayscale(p=0.02),
                                           transforms.RandomResizedCrop(max_image_size),
                                           transforms.ToTensor(),
                                           transforms.Normalize(expected_means, expected_std)])
#train_dataset
    tr_dataset = datasets.ImageFolder(cli_args.data_directory, transform=tr_transform)
#tr_dataloader
    tr_dataloader = torch.utils.data.DataLoader(tr_dataset, batch_size=batch_size, shuffle=True)
	
	
# model
    if not cli_args.arch.startswith("vgg") and not cli_args.arch.startswith("densenet"):
        print("Only supporting VGG and DenseNet")
        exit(1)

    print(f"Using a pre-trained {cli_args.arch} network.")
    my_model = models.__dict__[cli_args.arch](pretrained=True)

    densenet_input = {
        'densenet121': 1024,
        'densenet169': 1664,
        'densenet161': 2208,
        'densenet201': 1920
    }

    input_size = 0

    if cli_args.arch.startswith("vgg"):
        input_size = my_model.classifier[0].in_features

    if cli_args.arch.startswith("densenet"):
        input_size = densenet_input[cli_args.arch]
		
    for param in my_model.parameters():
        param.requires_grad = False

    od = OrderedDict()
    hidden_sizes = cli_args.hidden_units

    hidden_sizes.insert(0, input_size)

    print(f"Building a {len(cli_args.hidden_units)} hidden layer classifier with inputs {cli_args.hidden_units}")

    for i in range(len(hidden_sizes) - 1):
        od['fc' + str(i + 1)] = nn.Linear(hidden_sizes[i], hidden_sizes[i + 1])
        od['relu' + str(i + 1)] = nn.ReLU()
        od['dropout' + str(i + 1)] = nn.Dropout(p=0.15)

    od['output'] = nn.Linear(hidden_sizes[i + 1], output_size)
    od['softmax'] = nn.LogSoftmax(dim=1)

    classifier = nn.Sequential(od)

    # Replace the classifier
    my_model.classifier = classifier

      my_model.zero_grad()
Esempio n. 11
0
def create_cifar_experiment(num_targets: int, num_reps: int, target_dir: str, sleep: float = 0.0):
    # Converting data to torch.FloatTensor
    transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomRotation(degrees=45),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ])

    # Download the training and test datasets
    train_data = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform)
    val_data = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform)

    # Prepare data loaders
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, num_workers=0, shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=32, num_workers=0, shuffle=True)

    parameter_dict = {
        "lr": [0.0005, 0.001, 0.005, 0.01],
        "num_filters": [4, 6, 8, 10, 12]
    }
    grid = ParameterGrid(parameter_dict)
    grid = list(grid)[:num_targets]
    grid = grid[:num_targets]

    iterations = 1
    baseline_iterations = [1, 3, 8]
    burn_in_phase_length = 3
    m_max = 10000
    strategies = []
    j = 0
    for it in baseline_iterations:
        algorithms = [
            ConvolutionalAEAlg(num_channels=3, num_filters=params["num_filters"], learning_rate=params["lr"])
            for params in grid
        ]
        strategies.append(Baseline("Baseline (round robin, m={})".format(it),
                                   algorithms=algorithms,
                                   iterations=it,
                                   burn_in_phase_length=burn_in_phase_length,
                                   sleep=0.0))
        j += 1
    algorithms = [
        ConvolutionalAEAlg(num_channels=3, num_filters=params["num_filters"], learning_rate=params["lr"])
        for params in grid
    ]
    strategies.append(AnygradSelectAll("Anygrad (no target selection)",
                                       algorithms=algorithms,
                                       iterations=iterations,
                                       burn_in_phase_length=burn_in_phase_length,
                                       sleep=0.0))
    j += 1
    algorithms = [
        ConvolutionalAEAlg(num_channels=3, num_filters=params["num_filters"], learning_rate=params["lr"])
        for params in grid
    ]
    strategies.append(AnygradOnlySelection("Anygrad (m={})".format(150),
                                           algorithms=algorithms,
                                           iterations=3,
                                           burn_in_phase_length=burn_in_phase_length,
                                           sleep=0.0))
    j += 1
    algorithms = [
        ConvolutionalAEAlg(num_channels=3, num_filters=params["num_filters"], learning_rate=params["lr"])
        for params in grid
    ]
    strategies.append(Anygrad("Anygrad (full)", algorithms=algorithms,
                              iterations=iterations,
                              burn_in_phase_length=burn_in_phase_length,
                              sleep=0.0))
    return Experiment(name="Convolutional on Cifar", strategies=strategies,
                      train_data=[train_loader], val_data=[val_loader],
                      targets=[i for i in range(num_targets)],
                      num_reps=num_reps, parallel=False,
                      target_dir=target_dir, m_max=m_max)
Esempio n. 12
0
    def log(self, strdata):
        outstr = strdata + '\n'
        outstr = outstr.encode("utf-8")
        self.file.write(outstr)

    def __del__(self):
        self.file.close()


jigsaw_image_transform = t.Compose([
    # t.Resize(96, Image.BILINEAR),
    # t.CenterCrop(90),
    t.Resize(600, Image.BILINEAR),
    t.RandomHorizontalFlip(0.5),
    t.RandomRotation([0, 360]),
    t.RandomCrop(300),
    t.ColorJitter(hue=0.01, saturation=0.01, brightness=0.01, contrast=0.01),
    t.ToTensor(),
])

rotation_image_transform = t.Compose([
    t.Resize(150, Image.BILINEAR),
    t.RandomCrop(96),
    t.ColorJitter(hue=0.01, saturation=0.01, brightness=0.01, contrast=0.01),
    t.ToTensor(),
])

tile_transform = t.Compose([
    t.Resize((100, 100)),
    t.RandomCrop(96),
# Just normalization for validation
data_transforms = {
    # 'train': transforms.Compose([
    #     transforms.RandomResizedCrop(size=input_size, scale=(0.8, 1.0)),
    #     transforms.RandomRotation(degrees=15),
    #     transforms.ColorJitter(),
    #     transforms.RandomHorizontalFlip(),
    #     transforms.CenterCrop(size=input_size),  # Image net standards
    #     transforms.ToTensor(),
    #     transforms.Normalize([0.485, 0.456, 0.406],
    #                          [0.229, 0.224, 0.225])  # Imagenet standards
    # ]),
    'train':
    transforms.Compose([
        transforms.Resize(input_size + 20),
        transforms.RandomRotation(15, expand=True),
        transforms.RandomCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.4,
                               contrast=0.4,
                               saturation=0.4,
                               hue=0.2),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(input_size),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
Esempio n. 14
0
def get_transforms():
    # Keep the same
    t0 = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale brightness between the range (1.5,3.5)
    t1 = transforms.Compose([
        transforms.ColorJitter(brightness=2.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale saturation between (1,2)
    t2 = transforms.Compose([
        transforms.ColorJitter(saturation=2),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale contrast between (1,1.5)
    t3 = transforms.Compose([
        transforms.ColorJitter(contrast=1.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale hue
    t4 = transforms.Compose([
        transforms.ColorJitter(hue=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random horizontal flips
    t5 = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random shearing
    t6 = transforms.Compose([
        transforms.RandomAffine(degrees=20, shear=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random Translation
    t7 = transforms.Compose([
        transforms.RandomAffine(degrees=10, translate=(0.2, 0.2)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random perspective change
    t8 = transforms.Compose([
        transforms.RandomPerspective(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random rotation
    t9 = transforms.Compose([
        transforms.RandomRotation(20),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    return t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
Esempio n. 15
0
                    ha="center",
                    va="center",
                    color="white" if cm[i, j] > thresh else "black")
    fig.tight_layout()
    return ax


# # DataLoader
imgSize = 512
rotateAngle = 15

preprocess = transforms.Compose([
    transforms.Scale(imgSize),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomRotation(rotateAngle),
    transforms.ToTensor(),
    transforms.Normalize((0.3749, 0.2601, 0.1856), (0.2526, 0.1780, 0.1291)),
])


def getData(mode):
    if mode == 'train':
        img = pd.read_csv('data\\csv\\train_img.csv')
        label = pd.read_csv('data\\csv\\train_label.csv')
        return np.squeeze(img.values), np.squeeze(label.values)
    elif mode == 'test':
        img = pd.read_csv('data\\csv\\test_img.csv')
        label = pd.read_csv('data\\csv\\test_label.csv')
        return np.squeeze(img.values), np.squeeze(label.values)
Esempio n. 16
0
    # 中心裁剪
    for name in os.listdir(path):
        img = Image.open(os.path.join(path, name))
        size_scale = (int(img.size[1] * 1.2), int(img.size[0] * 1.2))
        img_resize = tfs.Resize(size_scale, interpolation=2)(img)
        img_crop = tfs.RandomCrop((img.size[1], img.size[0]),
                                  padding=0,
                                  pad_if_needed=False)(img_resize)
        img_crop.save(os.path.join(path, 'C_' + name))

    # 旋转
    for name in os.listdir(path):
        img = Image.open(os.path.join(path, name))
        img_rot_1 = tfs.RandomRotation(30,
                                       resample=False,
                                       expand=False,
                                       center=None)(img)
        img_rot_2 = tfs.RandomRotation(30,
                                       resample=False,
                                       expand=False,
                                       center=None)(img)
        img_rot_1.save(os.path.join(path, 'R0_' + name))
        img_rot_2.save(os.path.join(path, 'R1_' + name))

    # 亮度
    for name in os.listdir(path):
        img = Image.open(os.path.join(path, name))
        img_clj_1 = tfs.ColorJitter(brightness=0.8,
                                    contrast=0,
                                    saturation=0,
                                    hue=0)(img)
Esempio n. 17
0
def initialize_dataloaders(data_dir):
    """ Initializes the dataloaders for train, valid, and test image sets
    
        Parameters:
        data_dir -- root directory with train, valid, and test subdirectories
                    
        Returns: -- data_loaders, image_datasets
    """
    data_dirs = {
        'train': data_dir + '/train',
        'valid': data_dir + '/valid',
        'test': data_dir + '/test'
    }
    # Special transforms for each set
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'valid':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'test':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }
    # Load the datasets
    image_datasets = {
        'train':
        datasets.ImageFolder(data_dirs['train'],
                             transform=data_transforms['train']),
        'valid':
        datasets.ImageFolder(data_dirs['valid'],
                             transform=data_transforms['valid']),
        'test':
        datasets.ImageFolder(data_dirs['test'],
                             transform=data_transforms['test'])
    }
    # Initialize the dataloaders
    data_loaders = {
        'train':
        torch.utils.data.DataLoader(image_datasets['train'],
                                    batch_size=64,
                                    shuffle=True),
        'valid':
        torch.utils.data.DataLoader(image_datasets['valid'], batch_size=32),
        'test':
        torch.utils.data.DataLoader(image_datasets['test'], batch_size=32)
    }
    return data_loaders, image_datasets
Esempio n. 18
0
        if self.transform:
            image = self.transform(image)

        return image, label, p_path


def listToJson(data, json_save):
    jsonData = json.dumps(data)
    fileObject = open(json_save, 'w')
    fileObject.write(jsonData)
    fileObject.close()


train_transform = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation((-120, 120)),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_df, val_df = train_test_split(train_csv,
                                    test_size=0.3,
                                    random_state=2018,
                                    stratify=train_csv.SCORE)
#train_df = train_csv
#val_df = val_csv

# Random sampling
#train_labels=train_df.values[:,1]
#sampler_count=[len(np.where(train_labels==i)[0])  for i in range(num_classes)]
#weight = np.array(1./np.array(sampler_count))
#weights = [weight[train_label[1]] for train_label in train_df.values ]
Esempio n. 19
0
def main():
    ### define transformations for the data
    train_transforms = transforms.Compose([
        transforms.RandomRotation(60),
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(30),
        transforms.ColorJitter(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    valid_transforms = transforms.Compose([
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    test_transforms = transforms.Compose([
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    ### define paths to the train, validation, and test data sets
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'

    ### load in the datasets
    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
    test_data = datasets.ImageFolder(test_dir, transform=test_transforms)

    ### set up dataloaders
    trainloader = torch.utils.data.DataLoader(train_data,
                                              batch_size=64,
                                              shuffle=True)
    validloader = torch.utils.data.DataLoader(valid_data,
                                              batch_size=64,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(test_data, batch_size=64)

    ### define processor
    device = torch.device(dev)
    print("using device '{}'".format(device))
    ### define model architecture and optimizer
    if arch == 'vgg16':
        model = models.vgg16(pretrained=True)
        #class_in = 25088
    else:
        model = models.densenet161(pretrained=True)
        #class_in = 2208
    class_in = model.classifier.in_features
    for param in model.parameters():
        param.requires_grad = False

    model.classifier = nn.Sequential(nn.Linear(class_in, 2000), nn.ReLU(),
                                     nn.Dropout(p=0.2), nn.Linear(2000, 512),
                                     nn.ReLU(), nn.Dropout(p=0.2),
                                     nn.Linear(512, 102), nn.LogSoftmax(dim=1))
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=learnrate)
    model = model.to(device)

    ### train the network
    epochs = epoch_count
    training_losses = []
    validation_losses = []
    model.train()
    for e in range(epochs):
        running_loss = 0
        for images, labels in trainloader:

            images = images.to(device)
            labels = labels.to(device)
            #print("image shape: '{}'".format(images.shape))
            optimizer.zero_grad()
            log_ps = model.forward(images)
            loss = criterion(log_ps, labels)
            loss.backward()
            optimizer.step()

            #print("loss: {}".format(loss.item()))
            running_loss += loss.item()

        else:
            valid_loss = 0
            accuracy = 0

            with torch.no_grad():
                model.eval()
                for images, labels in validloader:
                    images, labels = images.to(device), labels.to(device)
                    logps = model.forward(images)
                    valid_loss += criterion(logps, labels)
                    #print("step: {}, valid_loss: {}".format(e, valid_loss))

                    ps = torch.exp(logps)
                    top_p, top_class = ps.topk(1, dim=1)
                    equals = top_class == labels.view(*top_class.shape)
                    accuracy += torch.mean(equals.type(
                        torch.FloatTensor)).item()

            model.train()
            training_losses.append(running_loss / len(trainloader))
            validation_losses.append(valid_loss / len(validloader))

            print("Epoch: {}/{}.. ".format(e + 1, epochs),
                  "Training Loss: {:.3f}.. ".format(training_losses[-1]),
                  "Test Loss: {:.3f}.. ".format(validation_losses[-1]),
                  "Test Accuracy: {:.3f}".format(accuracy / len(validloader)))

    ### map from integer values to flower names
    with open('cat_to_name.json', 'r') as f:
        cat_to_name = json.load(f)

    ### attach map as a parameter to the model
    model.class_to_idx = train_data.class_to_idx

    ### save model parameters
    checkpoint = {
        'input size':
        25088,
        'output size':
        102,
        'epochs':
        epochs,
        'model':
        model,
        'classifier':
        nn.Sequential(nn.Linear(class_in, 2000), nn.ReLU(), nn.Dropout(p=0.2),
                      nn.Linear(2000, 512), nn.ReLU(), nn.Dropout(p=0.2),
                      nn.Linear(512, 102), nn.LogSoftmax(dim=1)),
        #'classifier': model.classifier(),
        'optimizer':
        optimizer.state_dict(),
        'class_to_idx':
        model.class_to_idx
    }
    torch.save(checkpoint, 'checkpoint.pth')

    # save the state dict
    torch.save(model.state_dict(), 'state_dict.pth')
Esempio n. 20
0
from torchvision import transforms

#一般随机裁剪 + 旋转用的比较多
#torch无加噪声接口
#Data augmentation helps, but not much(特征都是一类,方差小)
transform = transforms.Compose([
    transforms.Resize([32, 32]),
    transforms.Scale([32, 32]),
    transforms.RandomCrop([28, 28]),
    transforms.RandomHorizontalFlip(),  #随机水平翻转
    transforms.RandomVerticalFlip(),  #随机垂直翻转
    transforms.RandomRotation(15),  #随机旋转(-15度 < x < 15度)
    transforms.RandomRotation([0, 90, 180, 270])  #随机旋转
])
Esempio n. 21
0
        if self.transform_target is not None:
            target=self.transform_target(target)
        target=np.array(target)
        target=target.reshape(1,128,128)

        return data,data2,target
    def __len__(self):
        return len(self.data)

transform=transforms.Compose([
    # transforms.RandomVerticalFlip(p=1),
    transforms.RandomHorizontalFlip(p=1),
    # transforms.ToTensor(),
])
transform_target=transforms.Compose([
    transforms.RandomRotation(degrees=(90,90)),
    transforms.RandomHorizontalFlip(p=1),
    # transforms.ToTensor()
])

#使用dataloader处理dataset
train_data=MyDataset(data,data2,target,transform=None,transform_target=None)
valid_data=MyDataset(val_data,val_data2,val_target,transform=None,transform_target=None)
# train_data2=MyDataset(data,target,transform=transform,transform_target=transform_target)
# valid_data2=MyDataset(val_data,val_target,transform=transform,transform_target=transform_target)
BATCH_SIZE=32
train_loader=DataLoader(train_data,BATCH_SIZE,True)
valid_loader=DataLoader(valid_data,BATCH_SIZE,True)
# train_loader=DataLoader(train_data + train_data2,BATCH_SIZE,True)
# valid_loader=DataLoader(valid_data + valid_data2,BATCH_SIZE,True)
Esempio n. 22
0
import copy
import PIL

from utils import UpperAndLowerCenterCrop, TargetCenterCrop, CircleToRectangle

epochs = 12
batch_size = 16
lr = 0.001
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

data_transforms = {
    'train':
    transforms.Compose([
        # transforms.CenterCrop(1200),
        TargetCenterCrop(),
        transforms.RandomRotation(180),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ColorJitter(0.1, 0.1, 0.1, 0.1),
        transforms.Resize(448),
        transforms.ToTensor(),
    ]),
    'val':
    transforms.Compose([
        # transforms.CenterCrop(1200),
        TargetCenterCrop(),
        transforms.Resize(448),
        transforms.ToTensor()
    ]),
}

data_dir = os.path.join('data', 'upper')
Esempio n. 23
0
                    'gpu': args.gpu,
                    'input_layers': args.input_layers,
                    'hidden_layers': args.hidden_layers,
                    'output_layers': args.output_layers,
                    'drop': args.drop_rate,
                    'topk':args.topk
}


########## Step 2: Get Data
train_dir = h_params['data_dir'] + '/train'
valid_dir = h_params['data_dir'] + '/valid'
test_dir = h_params['data_dir'] + '/test'

data_transforms = {
'train': transforms.Compose([transforms.RandomRotation(30),
                                       transforms.RandomResizedCrop(224),
                                       transforms.RandomHorizontalFlip(),
                                       transforms.ToTensor(),
                                       transforms.Normalize([0.485, 0.456, 0.406], 
                                                            [0.229, 0.224, 0.225])]),
'test': transforms.Compose([transforms.Resize(256),
                                      transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize([0.485, 0.456, 0.406], 
                                                           [0.229, 0.224, 0.225])]),
'valid': transforms.Compose([transforms.Resize(256),
                                      transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize([0.485, 0.456, 0.406], 
                                                           [0.229, 0.224, 0.225])])}
Esempio n. 24
0
	if dataset_name == 'DeepFashion':
		path_to_data = 'dataset_files/df_train/V_list_eval_partition.txt'


	#############################
	#
	# Dataloader
	#
	#############################
	# net.preprocess
	#  {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'input_size': 224}
	output_size_ = 224

	transform_train_1 = transforms.Compose([
		transforms.RandomRotation(45),
		transforms.Resize(256),
		transforms.RandomCrop(output_size_),
		transforms.RandomHorizontalFlip(),
		transforms.ToTensor(),
		transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
	])

	transform_train_2 = transforms.Compose([
		transforms.RandomRotation(45),
		transforms.Resize(256),
		transforms.RandomCrop(output_size_),
		transforms.RandomHorizontalFlip(),
		transforms.ToTensor(),
		transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
	])
def main(args):
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    torch.cuda.set_device(0)
    train_batch_size = args.batch_size

    input_transform = Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(10),
        #transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0),
        ToTensor(),
        #Normalize([0.35676643, 0.33378336, 0.31191254], [0.24681774, 0.23830362, 0.2326341 ]),
    ])

    target_transform = Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(10),
    ])

    datadir = args.datadir
    dataset = carla(datadir,
                    input_transform=input_transform,
                    target_transform=target_transform)
    val_dataset = carla(datadir,
                        input_transform=ToTensor(),
                        target_transform=None)

    dataset_len = len(dataset)
    dataset_idx = list(range(dataset_len))

    #split into training & validation set
    train_ratio = 0.8
    val_ratio = 1 - train_ratio
    split = int(np.floor(train_ratio * dataset_len))
    train_idx = np.random.choice(dataset_idx, size=split, replace=False)
    val_idx = list(set(dataset_idx) - set(train_idx))

    train_sampler = SubsetRandomSampler(train_idx)
    val_sampler = SubsetRandomSampler(val_idx)

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=train_batch_size,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             sampler=val_sampler)

    print('Total images = ', dataset_len)
    print('Number of images in train set = ',
          train_batch_size * len(train_loader))
    print('Number of images in validation set = ', len(val_loader))

    net = Net(num_classes=3)
    net = net.to(device)

    weights = [0.1, 0.5, 2.0]
    weights = torch.FloatTensor(weights).to(device)
    criterion = nn.CrossEntropyLoss(weight=weights)
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     factor=0.5,
                                                     patience=3,
                                                     verbose=True)

    if (args.loadfile != None):
        net.load_state_dict(
            torch.load(args.loadfile, map_location={'cuda:1': 'cuda:0'}))
        print("Loaded saved model: ", args.loadfile)

    train(train_loader, val_loader, optimizer, scheduler, criterion, net, args,
          device)
Esempio n. 26
0
def load_data(where="./flowers"):
    '''
    Arguments : the datas' path
    Returns : The loaders for the train, validation and test datasets
    This function receives the location of the image files, applies the necessery transformations (rotations,flips,normalizations and crops) and converts the images to tensor in order to be able to be fed into the neural network
    '''

    data_dir = where
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'

    #Apply the required transfomations to the test dataset in order to maximize the efficiency of the learning
    #process

    train_transforms = transforms.Compose([
        transforms.RandomRotation(50),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # Crop and Resize the data and validation images in order to be able to be fed into the network

    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    validation_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # TODO: Load the datasets with ImageFolder
    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    validation_data = datasets.ImageFolder(valid_dir,
                                           transform=validation_transforms)
    test_data = datasets.ImageFolder(test_dir, transform=test_transforms)

    image_datasets = [train_data, validation_data, test_data]

    # TODO: Using the image datasets and the trainforms, define the dataloaders
    # The data loaders are going to use to load the data to the NN(no shit Sherlock)
    trainloader = torch.utils.data.DataLoader(train_data,
                                              batch_size=64,
                                              shuffle=True)
    vloader = torch.utils.data.DataLoader(validation_data,
                                          batch_size=32,
                                          shuffle=True)
    testloader = torch.utils.data.DataLoader(test_data,
                                             batch_size=20,
                                             shuffle=True)
    dataloaders = [trainloader, vloader, testloader]

    return image_datasets, dataloaders
Esempio n. 27
0
                             list(classifier.parameters()),
                             lr=0.0001)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[35], gamma=0.1)

#herb std-mean
#tensor([0.0808, 0.0895, 0.1141])
#tensor([0.7410, 0.7141, 0.6500])
#photo std-mean
#tensor([0.1399, 0.1464, 0.1392])
#tensor([0.2974, 0.3233, 0.2370])

data_transforms = {
    'train':
    transforms.Compose([
        #transforms.Resize((img_size, img_size)),
        transforms.RandomRotation(15),
        #transforms.RandomCrop((img_size, img_size)),
        #transforms.RandomResizedCrop((img_size, img_size)),
        #transforms.CenterCrop((img_size, img_size)),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(),
        transformations.TileCircle(),
        #transformations.ScaleChange(),
        transforms.CenterCrop((img_size, img_size)),
        transforms.ToTensor(),
        #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        transforms.Normalize([0.7410, 0.7141, 0.6500],
                             [0.0808, 0.0895, 0.1141])
        #transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ]),
    'val':
    #transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])

test_transforms = transforms.Compose([
    transforms.Resize((IMG_SIZE, IMG_SIZE)),
    #transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])

# Fine tuning augmentation(comment the above code if using this one)
train_transforms = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(10, resample=Image.BILINEAR),
    transforms.RandomAffine(8, translate=(.15, .15)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

val_transforms = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(10, resample=Image.BILINEAR),
    transforms.RandomAffine(8, translate=(.15, .15)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

test_transforms = transforms.Compose([
    transforms.RandomHorizontalFlip(),
Esempio n. 29
0
valid_dir = os.path.join(data_dir, 'valid/')
test_dir = os.path.join(data_dir, 'test/')

standard_normalization = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])

#standard_normalization = transforms.Normalize(mean=[0.5, 0.5, 0.5],
#                                             std=[0.5, 0.5, 0.5])

data_transforms = {
    'train':
    transforms.Compose([  #transforms.RandomResizedCrop(224),
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(30),
        transforms.ToTensor(), standard_normalization
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(), standard_normalization
    ]),
    'test':
    transforms.Compose([
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(), standard_normalization
    ])
}

train_data = datasets.ImageFolder(train_dir,
Esempio n. 30
0
def construct_transforms(n_in: int,
                         mode: str,
                         mean: tuple = (0.0, 0.0, 0.0),
                         std: tuple = (1.0, 1.0, 1.0),
                         augment: bool = False,
                         rotation: bool = False,
                         num_channels: int = 3,
                         jitter: float = 0.0):
    """

    :param n_in:
    :param mode:
    :param augment:
    :param rotation:
    :param jitter:
    :return:
    """
    assert mode in ['train', 'eval', 'ood']
    assert not jitter < 0.0
    transf_list = []
    # TODO Make automatic. This is here temporaricly...
    #mean = (0.4914, 0.4823, 0.4465)
    #std = (0.247, 0.243, 0.261)

    if augment:
        if mode == 'eval':
            transf_list.extend([
                transforms.Resize(n_in, Image.BICUBIC),
                transforms.CenterCrop(n_in)
            ])
        elif mode == 'train':
            transf_list.extend([
                transforms.Resize(n_in, Image.BICUBIC),
                transforms.Pad(4, padding_mode='reflect')
            ])
            if rotation:
                transf_list.append(
                    transforms.RandomRotation(degrees=15,
                                              resample=Image.BICUBIC))
            transf_list.extend([
                torchvision.transforms.ColorJitter(jitter, jitter, jitter,
                                                   jitter),
                transforms.RandomHorizontalFlip(),
                transforms.RandomCrop(n_in)
            ])
        else:
            transf_list.extend([
                transforms.Resize(n_in, Image.BICUBIC),
                transforms.Pad(4, padding_mode='reflect')
            ])
            if rotation:
                transf_list.append(
                    transforms.RandomRotation(degrees=15,
                                              resample=Image.BICUBIC))
            transf_list.extend([
                torchvision.transforms.ColorJitter(jitter, jitter, jitter,
                                                   jitter),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
                transforms.RandomCrop(n_in)
            ])
    else:
        transf_list.extend([
            transforms.Resize(n_in, Image.BICUBIC),
            transforms.CenterCrop(n_in)
        ])

    if num_channels < 3:
        transf_list.extend([transforms.Grayscale(num_output_channels=3)])

    transf_list.extend(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    return transforms.Compose(transf_list)