Esempio n. 1
0
    def add_cutout(self, thetadeg, pt, d, drill_offset_deg=0.,
                   clearance_factor=0.75):
        r"""Add a cutout

        Parameters
        ----------
        thetadeg : float
            Circumferential position of the dimple.
        pt : float
            Normalized meridional position.
        d : float
            Diameter of the drilling machine.
        drill_offset_deg : float
            Angular offset when the drilling is not normal to the shell
            surface. A positive offset means a positive rotation about the
            `\theta` axis, along the meridional plane.
        clearance_factor : float
            Fraction of the diameter to apply as clearance around the cutout.
            This clearance is partitoned and meshed separately from the rest of
            the cone / cylinder.

        Returns
        -------
        cutout : :class:`.Cutout` object.

        """
        cutout = Cutout(thetadeg, pt, d, drill_offset_deg, clearance_factor)
        cutout.impconf = self
        self.cutouts.append(cutout)
        return cutout
Esempio n. 2
0
    def add_cutout(self,
                   thetadeg,
                   pt,
                   d,
                   drill_offset_deg=0.,
                   clearance_factor=0.75):
        r"""Add a cutout

        Parameters
        ----------
        thetadeg : float
            Circumferential position of the dimple.
        pt : float
            Normalized meridional position.
        d : float
            Diameter of the drilling machine.
        drill_offset_deg : float
            Angular offset when the drilling is not normal to the shell
            surface. A positive offset means a positive rotation about the
            `\theta` axis, along the meridional plane.
        clearance_factor : float
            Fraction of the diameter to apply as clearance around the cutout.
            This clearance is partitoned and meshed separately from the rest of
            the cone / cylinder.

        Returns
        -------
        cutout : :class:`.Cutout` object.

        """
        cutout = Cutout(thetadeg, pt, d, drill_offset_deg, clearance_factor)
        cutout.impconf = self
        self.cutouts.append(cutout)
        return cutout
Esempio n. 3
0
    def add_cutout(self, thetadeg, pt, d, drill_offset_deg=0.,
                   clearance_factor=0.75, numel_radial_edge=4,
                   prop_around_hole=None):
        r"""Add a cutout

        Parameters
        ----------
        thetadeg : float
            Circumferential position of the dimple.
        pt : float
            Normalized meridional position.
        d : float
            Diameter of the drilling machine.
        drill_offset_deg : float, optional
            Angular offset when the drilling is not normal to the shell
            surface. A positive offset means a positive rotation about the
            `\theta` axis, along the meridional plane.
        clearance_factor : float, optional
            Fraction of the diameter to apply as clearance around the cutout.
            This clearance is partitoned and meshed separately from the rest of
            the cone / cylinder.
        numel_radial_edge : int, optional
            Number of elements along the radial edges about the cutout center.
            This parameter affects the aspect ratio of the elements inside the
            cutout area.
        prop_around_hole : dict, optional
            Dictionary with keys:

            - radius : float
            - 'stack': list of floats
            - 'plyts': list of floats
            - 'mat_names': list of strings

            Example::

                prop_around_holes = {
                    'radius': 10.,
                    'stack': [0, 90, 0],
                    'plyts': [0.125, 0.125, 0.125],
                    'mat_names': ['Alum', 'Alum', 'Alum'],
                }

            .. note:: `mat_names` must be a list of materials already created in
                      the current model in Abaqus

        Returns
        -------
        cutout : :class:`.Cutout` object.

        """
        cutout = Cutout(thetadeg, pt, d, drill_offset_deg, clearance_factor,
                numel_radial_edge, prop_around_hole)
        cutout.impconf = self
        self.cutouts.append(cutout)
        return cutout
Esempio n. 4
0
    def __init__(self, args):
        self.images = list()
        self.targets = list()
        self.args = args

        # for path, _, image_set in os.walk(os.path.join(args.data_dir, 'train')):
        #     if os.path.isdir(path):
        #         for image in image_set:
        lines = open(args.train_list).readlines()
        for line in lines:
            path, label = line.strip().split(' ')
            self.images.append(os.path.join(args.data_dir, path))
            self.targets.append(int(label))

        self.mean = [0.485, 0.456, 0.406]
        self.dev = [0.229, 0.224, 0.225]

        self.transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=self.mean, std=self.dev),

        ])

        if args.cutout:
            self.transform.transforms.append(Cutout(n_holes=args.n_holes, length=args.length))
Esempio n. 5
0
    def __getitem__(self, i):

        image = cv2.imread(self.images_dir + self.images[i], cv2.IMREAD_COLOR)
        image = cv2.resize(image,
                           self.base_size,
                           interpolation=cv2.INTER_LINEAR)
        if self.applyAutoAug:
            image = Image.fromarray(image.astype('uint8'))
            #policy = ImageNetPolicy()
            policy = CIFAR10Policy()
            #policy = SVHNPolicy()
            image = policy(image)
            image = np.array(image)
            #img = np.asarray(image)
        if self.applyCutout:
            image = Cutout(image)  ##这么写是错的
        #size = image.shape
        ##normalize and change it to a tensor
        #image = self.input_transform(image)
        #image = image.transpose((2, 0, 1))

        label = cv2.imread(self.labels_dir + self.labels[i],
                           cv2.IMREAD_GRAYSCALE)
        label = cv2.resize(label,
                           self.base_size,
                           interpolation=cv2.INTER_NEAREST)

        #some operations needed here
        ## depends on the range of label values
        #label = self.convert_label(label)

        image, label = self.gen_sample(image, label, self.multi_scale,
                                       self.flip)
        label = np.expand_dims(label, axis=0)

        return image.copy(), label.copy()
Esempio n. 6
0
def do_test(flag_augmetation = False, 
            flag_cutout = False, 
            n_holes = 1, 
            length = 16, 
            depth = 18,
            epochs = 100,
            lr = 0.1,
            mixup_enbale = True,
            alpha = 0.1
           ):
    model_checkpoint = "resnet" + str(depth) 
    if flag_augmetation:
        model_checkpoint += '+'
    if flag_cutout:
        model_checkpoint += "cutout"
    model_checkpoint += ".pt"
    
    normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                                     std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

    train_transform = transforms.Compose([])
    if flag_augmetation:
        train_transform.transforms.append(transforms.RandomCrop(32, padding=4))
        train_transform.transforms.append(transforms.RandomHorizontalFlip())
    train_transform.transforms.append(transforms.ToTensor())
    train_transform.transforms.append(normalize)
    if flag_cutout:
        train_transform.transforms.append(Cutout(n_holes = n_holes, length = length))


    train_set=torchvision.datasets.CIFAR10(
        root='./data/cifar10',
        train=True,
        download=True,
        transform=train_transform)

    test_set=torchvision.datasets.CIFAR10(
        root='./data/cifar10',
        train=False,
        download=True,
        transform=transforms.Compose([transforms.ToTensor(), normalize]))
    
    acc_train,acc_test = train_(train_set,test_set,lr, depth,mixup_enbale,alpha,  model_checkpoint, epochs = epochs)
    return (acc_train,acc_test)
def my_transform(train=True,
                 resize=224,
                 use_cutout=False,
                 n_holes=1,
                 length=8,
                 auto_aug=False,
                 rand_aug=False):
    transforms = []
    interpolations = [
        PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.HAMMING,
        PIL.Image.BICUBIC, PIL.Image.LANCZOS
    ]

    if train:
        # transforms.append(T.RandomRotation(90))
        transforms.append(
            T.RandomResizedCrop(resize + 5,
                                scale=(0.2, 2.0),
                                interpolation=PIL.Image.BICUBIC))
        transforms.append(T.RandomHorizontalFlip())
        # transforms.append(T.RandomVerticalFlip())
        transforms.append(T.ColorJitter(0.2, 0.2, 0.3, 0.))
        transforms.append(T.CenterCrop(resize))
        if auto_aug:
            transforms.append(AutoAugment())
        if rand_aug:
            transforms.append(Rand_Augment())
    else:
        transforms.append(T.Resize(resize, interpolation=PIL.Image.BICUBIC))
        transforms.append(T.CenterCrop(resize))

    transforms.append(T.ToTensor())
    transforms.append(
        # T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
        # T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
        T.Normalize(mean=[0.507, 0.522, 0.500], std=[0.213, 0.207, 0.212]))

    if train and use_cutout:
        transforms.append(Cutout())

    return T.Compose(transforms)
Esempio n. 8
0
    def __init__(self, dataset, label, cutout=False, i=1, j=18):
        self.dataset = []
        self.label = []
        self.cutout = cutout
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        if self.cutout:
            self.transform = transforms.Compose(
                [transforms.ToTensor(), normalize,
                 Cutout(i, j)])
        else:
            self.transform = transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ])
        for data in dataset:
            self.dataset.append(data)

        for lab in label:
            self.label.append(lab)
Esempio n. 9
0
def my_transform(train=True,
                 resize=224,
                 use_cutout=False,
                 n_holes=1,
                 length=8,
                 auto_aug=False,
                 raug=False,
                 N=0,
                 M=0):
    transforms = []

    if train:
        transforms.append(T.RandomRotation(90))
        transforms.append(
            T.RandomResizedCrop(resize + 20,
                                scale=(0.2, 1.0),
                                interpolation=PIL.Image.BICUBIC))
        transforms.append(T.RandomHorizontalFlip())
        # transforms.append(T.RandomVerticalFlip())
        transforms.append(T.ColorJitter(0.3, 0.2, 0.2, 0.2))
        transforms.append(T.CenterCrop(resize))
        if auto_aug:
            transforms.append(AutoAugment())
        if raug:
            transforms.append(Randaugment(N, M))

    else:
        transforms.append(T.Resize(resize, interpolation=PIL.Image.BICUBIC))
        transforms.append(T.CenterCrop(resize))

    transforms.append(T.ToTensor())
    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    if train and use_cutout:
        transforms.append(Cutout())

    return T.Compose(transforms)
Esempio n. 10
0
print(args)

# Image Preprocessing
normalize = transforms.Normalize(
    mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
    std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

train_transform = transforms.Compose([])
if args.data_augmentation:
    train_transform.transforms.append(transforms.RandomCrop(32, padding=4))
    train_transform.transforms.append(transforms.RandomHorizontalFlip())
train_transform.transforms.append(transforms.ToTensor())
train_transform.transforms.append(normalize)
if args.cutout:
    train_transform.transforms.append(
        Cutout(n_holes=args.n_holes, length=args.length))

test_transform = transforms.Compose([transforms.ToTensor(), normalize])

if args.dataset == 'cifar10':
    num_classes = 10
    train_dataset = datasets.CIFAR10(root='data/',
                                     train=True,
                                     transform=train_transform,
                                     download=True)

    test_dataset = datasets.CIFAR10(root='data/',
                                    train=False,
                                    transform=test_transform,
                                    download=True)
elif args.dataset == 'cifar100':
parser.add_argument('--teacher', default="resnet18", type=str)
parser.add_argument('--t',
                    default=3.0,
                    type=float,
                    help="temperature for logit distillation ")
args = parser.parse_args()
print(args)

BATCH_SIZE = 128
LR = 0.1

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4, fill=128),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    Cutout(n_holes=1, length=16),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset, testset = None, None
if args.dataset == 'cifar100':
    trainset = torchvision.datasets.CIFAR100(root='data',
                                             train=True,
                                             download=True,
                                             transform=transform_train)
    testset = torchvision.datasets.CIFAR100(root='data',
                                            train=False,
Esempio n. 12
0
    def add_cutout(self,
                   thetadeg,
                   pt,
                   d,
                   drill_offset_deg=0.,
                   clearance_factor=0.75,
                   numel_radial_edge=4,
                   prop_around_cutout=None):
        r"""Add a cutout

        Parameters
        ----------
        thetadeg : float
            Circumferential position of the dimple.
        pt : float
            Normalized meridional position.
        d : float
            Diameter of the drilling machine.
        drill_offset_deg : float, optional
            Angular offset when the drilling is not normal to the shell
            surface. A positive offset means a positive rotation about the
            `\theta` axis, along the meridional plane.
        clearance_factor : float, optional
            Fraction of the diameter to apply as clearance around the cutout.
            This clearance is partitoned and meshed separately from the rest of
            the cone / cylinder.
        numel_radial_edge : int, optional
            Number of elements along the radial edges about the cutout center.
            This parameter affects the aspect ratio of the elements inside the
            cutout area.
        prop_around_cutout : dict, optional
            Dictionary with keys:

            - 'mode' : str ('radius' or 'partition')
            - 'radius' : float
            - 'stack': list of floats
            - 'plyts': list of floats
            - 'mat_names': list of strings
            .

            Examples:

            - Defining a property with ``'mode'='radius'``::

                prop_around_cutout = {
                    'mode': 'radius',
                    'radius': 10.,
                    'stack': [0, 90, 0],
                    'plyts': [0.125, 0.125, 0.125],
                    'mat_names': ['Alum', 'Alum', 'Alum'],
                }

            - Defining a property with ``'mode'='partition'``::

                prop_around_cutout = {
                    'mode': 'partition',
                    'stack': [0, 90, 0],
                    'plyts': [0.125, 0.125, 0.125],
                    'mat_names': ['Alum', 'Alum', 'Alum'],
                }

            .. note:: ``mat_names`` must be a list of materials already created in
                      the current model in Abaqus

        Returns
        -------
        cutout : :class:`.Cutout` object.

        """
        cutout = Cutout(thetadeg, pt, d, drill_offset_deg, clearance_factor,
                        numel_radial_edge, prop_around_cutout)
        cutout.impconf = self
        self.cutouts.append(cutout)
        return cutout
Esempio n. 13
0
test_id = dataset + '_' + model

# Image Preprocessing
normalize = transforms.Normalize(
    mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
    std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

train_transform = transforms.Compose([])
train_transform.transforms.append(transforms.Resize(256))
if data_augmentation:
    train_transform.transforms.append(transforms.RandomCrop(256, padding=12))
    train_transform.transforms.append(transforms.RandomHorizontalFlip())
train_transform.transforms.append(transforms.ToTensor())
train_transform.transforms.append(normalize)
if cutout:
    train_transform.transforms.append(Cutout(n_holes=n_holes, length=length))
train_transform.transforms.append(transforms.Resize(32))

test_transform = transforms.Compose([transforms.ToTensor(), normalize])

if dataset == 'cifar10':
    num_classes = 10
    train_dataset = datasets.CIFAR10(root='data/',
                                     train=True,
                                     transform=train_transform,
                                     download=True)

    test_dataset = datasets.CIFAR10(root='data/',
                                    train=False,
                                    transform=test_transform,
                                    download=True)
Esempio n. 14
0
        else torch.device('cuda:{}'.format(options.device))
    normalize = transforms.Normalize(
        mean=[x / 255.0 for x in [123.675, 116.28, 103.53]],
        std=[x / 255.0 for x in [58.395, 57.12, 57.375]])
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize
    ]) if not options.cutout else \
    transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
        Cutout(n_holes=1, length=options.length)
    ])

    test_transform = transforms.Compose([transforms.ToTensor(), normalize])
    if options.dataset == 'cifar10':
        train_set = torchvision.datasets.CIFAR10(root=root,
                                                 train=True,
                                                 download=True,
                                                 transform=train_transform)
        test_set = torchvision.datasets.CIFAR10(root=root,
                                                train=False,
                                                download=True,
                                                transform=test_transform)
        num_classes = 10
    elif options.dataset == 'svhn':
        normalize = transforms.Normalize(
Esempio n. 15
0
def main_worker(args):
    global best_acc
    global logger

    start_epoch = args.start_epoch

    if not os.path.exists(args.checkpoint) and not args.resume:
        mkdir_p(args.checkpoint)

    if args.gpu is not None:
        print("Use GPU: {} for experiments".format(args.gpu))

    model = models.__dict__[args.arch](args.num_classes)
    if args.pretrained:
        print("==> Load pretrained model")
        model_dict = model.state_dict()
        pretrained_dict = model_zoo.load_url(model_urls[args.arch])
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)

    torch.cuda.set_device(args.gpu)
    model = model.cuda()

    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    title = 'CIFAR100-' + args.arch
    if args.resume:
        if os.path.isfile(args.resume):
            print('==> Resuming from checkpoint..')
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
            args.checkpoint = os.path.dirname(args.resume)
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            exit(-1)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Val Loss', 'Val Acc', 'Val Side1 Acc', 'Val Side2 Acc',
                          'Val Side3 Acc'])

    normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                     std=[0.2023, 0.1994, 0.2010])
    tra_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        # CIFAR10Policy(),
        transforms.ToTensor(),
        Cutout(n_holes=1, length=16),
        normalize,
    ])
    tra_test = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    # Set download=True to download data
    train_dataset = tv.datasets.CIFAR100(root=args.data, train=True, transform=tra_train, download=False)
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers
    )

    val_dataset = tv.datasets.CIFAR100(root=args.data, train=False, transform=tra_test, download=False)
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers
    )

    if args.evaluate:
        m, s1, s2, s3 = test(val_loader, model)
        print(m)
        print("\n")
        print(s1)
        print("\n")
        print(s2)
        print("\n")
        print(s3)

        return

    writer = SummaryWriter(args.checkpoint)

    for epoch in range(start_epoch, args.epochs):
        total_loss = train(train_loader, model, criterion, epoch, optimizer, args)
        writer.add_scalar('Train loss', total_loss, epoch + 1)

        # evaluate on validation set
        val_total_loss, val_main_acc, val_side1_acc, val_side2_acc, val_side3_acc = validate(
            val_loader, model,
            criterion, args)
        lr = optimizer.param_groups[0]['lr']
        logger.append(
            [lr, total_loss, val_total_loss, val_main_acc, val_side1_acc, val_side2_acc, val_side3_acc])

        writer.add_scalar('Learning rate', lr, epoch + 1)
        writer.add_scalar('Val Loss', val_total_loss, epoch + 1)
        writer.add_scalar('Main Acc', val_main_acc, epoch + 1)
        writer.add_scalar('Side1 Acc', val_side1_acc, epoch + 1)
        writer.add_scalar('Side2 Acc', val_side2_acc, epoch + 1)
        writer.add_scalar('Side3 Acc', val_side3_acc, epoch + 1)

        is_best = best_acc < val_main_acc
        best_acc = max(best_acc, val_main_acc)

        if args.save:
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

    logger.close()
    writer.close()

    print('Best accuracy:')
    print(best_acc)

    torch.cuda.empty_cache()
Esempio n. 16
0
    # obviously flipping is a bad idea, and it makes some sense not to
    # crop because there are a lot of distractor digits in the edges of the
    # image
    transform_train = transforms.ToTensor()

if args.autoaugment or args.cutout:
    assert (args.dataset == 'cifar10')
    transform_list = [
        transforms.RandomCrop(32, padding=4, fill=128),
        # fill parameter needs torchvision installed from source
        transforms.RandomHorizontalFlip()]
    if args.autoaugment:
        transform_list.append(CIFAR10Policy())
    transform_list.append(transforms.ToTensor())
    if args.cutout:
        transform_list.append(Cutout(n_holes=1, length=16))

    transform_train = transforms.Compose(transform_list)
    logger.info('Applying aggressive training augmentation: %s'
                % transform_train)

transform_test = transforms.Compose([
    transforms.ToTensor()])
# ------------------------------------------------------------------------------

# ----------------- DATASET WITH AUX PSEUDO-LABELED DATA -----------------------
trainset = SemiSupervisedDataset(base_dataset=args.dataset,
                                 add_svhn_extra=args.svhn_extra,
                                 root=args.data_dir, train=True,
                                 download=True, transform=transform_train,
                                 aux_data_filename=args.aux_data_filename,
Esempio n. 17
0
],
                        p=1)

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([
    #transforms.Resize(input_size, 3),
    #transforms.RandomResizedCrop(input_size, scale=(0.8, 1.0), ratio=(0.8, 1.2), interpolation=3),
    #transforms.RandomHorizontalFlip(),
    #transforms.RandomVerticalFlip(),
    #transforms.RandomRotation(90),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])
if CUTOUT:
    train_transform.transforms.append(Cutout(n_holes=1, length=160))
val_transform = transforms.Compose([
    transforms.Resize(input_size, 3),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])
tta_transform = transforms.Compose([
    transforms.Resize(input_size, 3),
    transforms.Lambda(lambda image: torch.stack([
        transforms.ToTensor()(image),
        transforms.ToTensor()(image.rotate(90, resample=0)),
        transforms.ToTensor()(image.rotate(180, resample=0)),
        transforms.ToTensor()(image.rotate(270, resample=0)),
        transforms.ToTensor()(image.transpose(method=Image.FLIP_TOP_BOTTOM)),
        transforms.ToTensor()
        (image.transpose(method=Image.FLIP_TOP_BOTTOM).rotate(90, resample=0)),
Esempio n. 18
0
def find_best_cutout_size(range_to_test, nb_evaluation_per_size, epochs):
    result = dict()
    seed = 12
    for size in range_to_test:
        print('\n', 'size : ', size, '\n')
        n_holes = 1
        hole_length = size

        labels_text = [
            "T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal",
            "Shirt", "Sneaker", "Bag", "Ankle boot"
        ]

        transform_training = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.2867], std=[0.3205]),
            Cutout(n_holes, hole_length)
        ])

        transform_validation = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.2867], std=[0.3205]),
        ])

        full_trainset = torchvision.datasets.FashionMNIST(
            root='data',
            train=True,
            download=True,
            transform=transform_training)
        trainset, _ = torch.utils.data.random_split(
            full_trainset, (10000, 50000),
            generator=torch.Generator().manual_seed(seed))

        full_validset = torchvision.datasets.FashionMNIST(
            root='data',
            train=False,
            download=True,
            transform=transform_training)
        validset, _ = torch.utils.data.random_split(
            full_validset, (1000, 9000),
            generator=torch.Generator().manual_seed(seed))

        trainloader = torch.utils.data.DataLoader(trainset,
                                                  batch_size=64,
                                                  shuffle=True,
                                                  num_workers=2)
        validloader = torch.utils.data.DataLoader(validset,
                                                  batch_size=64,
                                                  shuffle=False,
                                                  num_workers=2)

        DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        train_stats = np.zeros((nb_evaluation_per_size, 4))
        for i in range(nb_evaluation_per_size):
            print(f"try : {i +1}")
            train_stats[i] = iterate_on_model(epochs, DEVICE, trainloader,
                                              validloader)
            print('*')

        avg_training_accuracy = np.mean(train_stats[:, 0])
        avg_validation_accuracy = np.mean(train_stats[:, 1])
        avg_training_loss = np.mean(train_stats[:, 2])
        avg_validation_loss = np.mean(train_stats[:, 3])

        std_training_accuracy = np.std(train_stats[:, 0])
        std_validation_accuracy = np.std(train_stats[:, 1])
        std_training_loss = np.std(train_stats[:, 2])
        std_validation_loss = np.std(train_stats[:, 3])
        result[size] = [
            avg_training_accuracy, avg_validation_accuracy, avg_training_loss,
            avg_validation_loss, std_training_accuracy,
            std_validation_accuracy, std_training_loss, std_validation_loss
        ]

    return result
Esempio n. 19
0
    test_id = 'cifar10' + '_' + model

    normalize = transforms.Normalize(
        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
        std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

    train_transform = transforms.Compose([])
    if data_augmentation:
        train_transform.transforms.append(transforms.RandomCrop(32, padding=4))
        train_transform.transforms.append(transforms.RandomHorizontalFlip())
    train_transform.transforms.append(transforms.ToTensor())
    train_transform.transforms.append(normalize)

    # cutout augmentation
    train_transform.transforms.append(Cutout(
        n_holes=n_holes, length=length))  # cutout augemntation

    test_transform = transforms.Compose([transforms.ToTensor(), normalize])

    train_dataset = datasets.CIFAR10(root='data/',
                                     train=True,
                                     transform=train_transform,
                                     download=True)

    test_dataset = datasets.CIFAR10(root='data/',
                                    train=False,
                                    transform=test_transform,
                                    download=True)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
Esempio n. 20
0
transform_ori = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    #transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784)),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

transform_aug = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    CIFAR10Policy(),
    transforms.ToTensor(),
    #transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784)),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    Cutout(n_holes=args.n_holes, length=args.cutout_size),
])

#transform_aug = transform_ori
#transform_train_aug.transforms.append()

#if args.cutout:
#    transform_aug.transforms.append(Cutout(n_holes=args.n_holes, length=args.cutout_size))

#if args.autoaugment:
#    transform_aug.transforms.append(CIFAR10Policy())

transform_test = transforms.Compose([
    transforms.ToTensor(),
    #transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784)),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
Esempio n. 21
0
    print('==> Preparing data..')
    if args.a:  #TANNER: this was backwards before, switched in v2
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
    else:
        transform_train = transforms.Compose([
            # transforms.RandomRotation(15),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
    if args.c:
        transform_train.transforms.append(Cutout(n_holes=1, length=8))
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
    ])
    # CIFAR10 normalization for reference
    # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))

    trainset = torchvision.datasets.CIFAR100(root='./data',
                                             train=True,
                                             download=True,
                                             transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=2)
Esempio n. 22
0
    def __init__(self,
                 p1,
                 operation1,
                 magnitude_idx1,
                 p2,
                 operation2,
                 magnitude_idx2,
                 fillcolor=(128, 128, 128)):
        cutout_op = Cutout(1, fillcolor)
        ranges = {
            "shearX": np.linspace(0, 0.3, 10),
            "shearY": np.linspace(0, 0.3, 10),
            "translateX": np.linspace(0, 150 / 331, 10),
            "translateY": np.linspace(0, 150 / 331, 10),
            "rotate": np.linspace(0, 30, 10),
            "color": np.linspace(0.0, 0.9, 10),
            "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
            "solarize": np.linspace(256, 0, 10),
            "contrast": np.linspace(0.0, 0.9, 10),
            "sharpness": np.linspace(0.0, 0.9, 10),
            "brightness": np.linspace(0.0, 0.9, 10),
            "autocontrast": [0] * 10,
            "equalize": [0] * 10,
            "invert": [0] * 10,
            "cutout": [i * 2 for i in range(1, 11)]  # from 2 to 20 pixel
        }

        # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
        def rotate_with_fill(img, magnitude):
            rot = img.convert("RGBA").rotate(magnitude)
            return Image.composite(rot, Image.new("RGBA", rot.size,
                                                  (128, ) * 4),
                                   rot).convert(img.mode)

        func = {
            "shearX":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, magnitude * random.choice(
                                                     [-1, 1]), 0, 0, 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "shearY":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, 0, 0, magnitude * random.
                                                  choice([-1, 1]), 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "translateX":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice(
                    [-1, 1]), 0, 1, 0),
                fillcolor=fillcolor),
            "translateY":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.
                               choice([-1, 1])),
                fillcolor=fillcolor),
            "rotate":
            lambda img, magnitude: rotate_with_fill(img, magnitude),
            "color":
            lambda img, magnitude: ImageEnhance.Color(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "posterize":
            lambda img, magnitude: ImageOps.posterize(img, magnitude),
            "solarize":
            lambda img, magnitude: ImageOps.solarize(img, magnitude),
            "contrast":
            lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "sharpness":
            lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "brightness":
            lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "autocontrast":
            lambda img, magnitude: ImageOps.autocontrast(img),
            "equalize":
            lambda img, magnitude: ImageOps.equalize(img),
            "invert":
            lambda img, magnitude: ImageOps.invert(img),
            'cutout':
            lambda img, magnitutude: cutout_op(img, magnitutude)
        }

        self.p1 = p1
        self.operation1 = func[operation1]
        self.magnitude1 = ranges[operation1][magnitude_idx1]
        self.p2 = p2
        self.operation2 = func[operation2]
        self.magnitude2 = ranges[operation2][magnitude_idx2]
Esempio n. 23
0
    softmax_targets = F.softmax(targets/3.0, dim=1)
    return -(log_softmax_outputs * softmax_targets).sum(dim=1).mean()

def clip_grads(params):
    params = list(
        filter(lambda p: p.requires_grad and p.grad is not None, params))
    if len(params) > 0:
        return torch.nn.utils.clip_grad_norm_(params, max_norm=args.clip_grad, norm_type=2)


BATCH_SIZE = 128
LR = 0.1

transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128),
                         transforms.RandomHorizontalFlip(), CIFAR10Policy(),
                         transforms.ToTensor(), Cutout(n_holes=1, length=16),
                         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset, testset = None, None
if args.class_num == 100:
    print("dataset: CIFAR100")
    trainset = torchvision.datasets.CIFAR100(
        root='/home/lthpc/datasets/data',
        train=True,
        download=False,
        transform=transform_train
Esempio n. 24
0
def main():

    global best_prec1
    best_prec1 = 0

    global val_acc
    val_acc = []

    global class_num

    class_num = args.dataset == 'cifar10' and 10 or 100

    normalize = transforms.Normalize(
        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
        std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

    if args.augment:
        if args.autoaugment:
            print('Autoaugment')
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                transforms.Lambda(lambda x: F.pad(x.unsqueeze(0), (4, 4, 4, 4),
                                                  mode='reflect').squeeze()),
                transforms.ToPILImage(),
                transforms.RandomCrop(32),
                transforms.RandomHorizontalFlip(),
                CIFAR10Policy(),
                transforms.ToTensor(),
                Cutout(n_holes=args.n_holes, length=args.length),
                normalize,
            ])

        elif args.cutout:
            print('Cutout')
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                transforms.Lambda(lambda x: F.pad(x.unsqueeze(0), (4, 4, 4, 4),
                                                  mode='reflect').squeeze()),
                transforms.ToPILImage(),
                transforms.RandomCrop(32),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                Cutout(n_holes=args.n_holes, length=args.length),
                normalize,
            ])

        else:
            print('Standrad Augmentation!')
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                transforms.Lambda(lambda x: F.pad(x.unsqueeze(0), (4, 4, 4, 4),
                                                  mode='reflect').squeeze()),
                transforms.ToPILImage(),
                transforms.RandomCrop(32),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
    else:
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])

    transform_test = transforms.Compose([transforms.ToTensor(), normalize])

    kwargs = {'num_workers': 1, 'pin_memory': True}
    assert (args.dataset == 'cifar10' or args.dataset == 'cifar100')
    train_loader = torch.utils.data.DataLoader(
        datasets.__dict__[args.dataset.upper()]('../data',
                                                train=True,
                                                download=True,
                                                transform=transform_train),
        batch_size=training_configurations[args.model]['batch_size'],
        shuffle=True,
        **kwargs)
    val_loader = torch.utils.data.DataLoader(
        datasets.__dict__[args.dataset.upper()]('../data',
                                                train=False,
                                                transform=transform_test),
        batch_size=training_configurations[args.model]['batch_size'],
        shuffle=True,
        **kwargs)

    # create model
    if args.model == 'resnet':
        model = eval('networks.resnet.resnet' + str(args.layers) +
                     '_cifar')(dropout_rate=args.droprate)
    elif args.model == 'se_resnet':
        model = eval('networks.se_resnet.resnet' + str(args.layers) +
                     '_cifar')(dropout_rate=args.droprate)
    elif args.model == 'wideresnet':
        model = networks.wideresnet.WideResNet(args.layers,
                                               args.dataset == 'cifar10' and 10
                                               or 100,
                                               args.widen_factor,
                                               dropRate=args.droprate)
    elif args.model == 'se_wideresnet':
        model = networks.se_wideresnet.WideResNet(
            args.layers,
            args.dataset == 'cifar10' and 10 or 100,
            args.widen_factor,
            dropRate=args.droprate)

    elif args.model == 'densenet_bc':
        model = networks.densenet_bc.DenseNet(
            growth_rate=args.growth_rate,
            block_config=(int((args.layers - 4) / 6), ) * 3,
            compression=args.compression_rate,
            num_init_features=24,
            bn_size=args.bn_size,
            drop_rate=args.droprate,
            small_inputs=True,
            efficient=False)
    elif args.model == 'shake_pyramidnet':
        model = networks.shake_pyramidnet.PyramidNet(dataset=args.dataset,
                                                     depth=args.layers,
                                                     alpha=args.alpha,
                                                     num_classes=class_num,
                                                     bottleneck=True)

    elif args.model == 'resnext':
        if args.cardinality == 8:
            model = networks.resnext.resnext29_8_64(class_num)
        if args.cardinality == 16:
            model = networks.resnext.resnext29_16_64(class_num)

    elif args.model == 'shake_shake':
        if args.widen_factor == 112:
            model = networks.shake_shake.shake_resnet26_2x112d(class_num)
        if args.widen_factor == 32:
            model = networks.shake_shake.shake_resnet26_2x32d(class_num)
        if args.widen_factor == 96:
            model = networks.shake_shake.shake_resnet26_2x32d(class_num)

    elif args.model == 'shake_shake_x':

        model = networks.shake_shake.shake_resnext29_2x4x64d(class_num)

    if not os.path.isdir(check_point):
        mkdir_p(check_point)

    fc = Full_layer(int(model.feature_num), class_num)

    print('Number of final features: {}'.format(int(model.feature_num)))

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()]) +
        sum([p.data.nelement() for p in fc.parameters()])))

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    isda_criterion = ISDALoss(int(model.feature_num), class_num).cuda()
    ce_criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(
        [{
            'params': model.parameters()
        }, {
            'params': fc.parameters()
        }],
        lr=training_configurations[args.model]['initial_learning_rate'],
        momentum=training_configurations[args.model]['momentum'],
        nesterov=training_configurations[args.model]['nesterov'],
        weight_decay=training_configurations[args.model]['weight_decay'])

    model = torch.nn.DataParallel(model).cuda()
    fc = nn.DataParallel(fc).cuda()

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        fc.load_state_dict(checkpoint['fc'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        isda_criterion = checkpoint['isda_criterion']
        val_acc = checkpoint['val_acc']
        best_prec1 = checkpoint['best_acc']
        np.savetxt(accuracy_file, np.array(val_acc))
    else:
        start_epoch = 0

    for epoch in range(start_epoch,
                       training_configurations[args.model]['epochs']):

        adjust_learning_rate(optimizer, epoch + 1)

        # train for one epoch
        train(train_loader, model, fc, isda_criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, fc, ce_criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'fc': fc.state_dict(),
                'best_acc': best_prec1,
                'optimizer': optimizer.state_dict(),
                'isda_criterion': isda_criterion,
                'val_acc': val_acc,
            },
            is_best,
            checkpoint=check_point)
        print('Best accuracy: ', best_prec1)
        np.savetxt(accuracy_file, np.array(val_acc))

    print('Best accuracy: ', best_prec1)
    print('Average accuracy', sum(val_acc[len(val_acc) - 10:]) / 10)
    # val_acc.append(sum(val_acc[len(val_acc) - 10:]) / 10)
    # np.savetxt(val_acc, np.array(val_acc))
    np.savetxt(accuracy_file, np.array(val_acc))
Esempio n. 25
0
def prepare(args):
    global trainloader
    global testloader
    global net
    global criterion
    global optimizer
    global scheduler

    # Data
    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        CIFAR10Policy(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
        Cutout(n_holes=args.n_holes, length=args.length)
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=512,
                                              shuffle=True,
                                              num_workers=4)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=512,
                                             shuffle=False,
                                             num_workers=4)

    #classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Model
    print('==> Building model..')
    net = CXH()  #CXH_Squeeze_Excitation() #CXH()

    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss()
    # criterion = CrossEntropyLabelSmooth(10)
    # optimizer = optim.SGD(net.parameters(), lr=0.1,
    #                       momentum=0.9, weight_decay=5e-4)

    optimizer = SAM(net.parameters(), torch.optim.SGD, lr=0.1, momentum=0.9)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, 1, 2)