def main():
    #global args, best_prec1
    #args = parser.parse_args()

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=0.5,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    if not os.path.exists(traindir):
        os.mkdir(traindir)
    valdir = os.path.join(args.data, 'val')
    if not os.path.exists(valdir):
        os.mkdir(valdir)
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.MYDATA(
            '/home/hankeji/Desktop/cifar12',
            train=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                #transforms.Normalize((0.1307,), (0.3081,))
            ])),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    val_loader = torch.utils.data.DataLoader(
        datasets.MYDATA(
            '/home/hankeji/Desktop/cifar12',
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                #transforms.Normalize((0.1307,), (0.3081,))
            ])),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return
    best_prec1 = 0
    prec1 = 0
    best_prec1 = 0
    prec1 = 0
    c = []
    c = np.random.rand(args.epochs, 2)
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        print epoch
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint

        best_prec1 = max(prec1, best_prec1)

        if prec1 > best_prec1:
            torch.save(
                model,
                '/home/hankeji/Desktop/papercode/tmp/DeseNet_GTSRB_40.pkl')
        else:
            torch.save(
                model,
                '/home/hankeji/Desktop/papercode/tmp/DeseNet_netbest_GTSRB_40.pkl'
            )
        '''
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
        }, is_best)
        '''
    import matplotlib.pyplot as plt
    plt.plot(np.arange(args.epochs), c[:, 0], 'b', np.arange(args.epochs),
             c[:, 1], 'r')
    plt.show()
    return 0
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
kwargs = {'num_workers': 1, 'pin_memory': True}

train_loader = torch.utils.data.DataLoader(
    datasets.MYDATA(
        '/home/hankeji/Desktop/cifar12',
        train=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            #transforms.Normalize((0.1307,), (0.3081,))
        ])),
    batch_size=128,
    shuffle=True,
    **kwargs)

val_loader = torch.utils.data.DataLoader(
    datasets.MYDATA(
        '/home/hankeji/Desktop/cifar12',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            #transforms.Normalize((0.1307,), (0.3081,))
        ])),
Example #3
0
    ])),
                                           batch_size=1024,
                                           shuffle=True,
                                           **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(
    '../tmp',
    train=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ])),
                                          batch_size=100,
                                          shuffle=True,
                                          **kwargs)
adv_data = torch.utils.data.DataLoader(datasets.MYDATA(
    '../tmp',
    train=True,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ])),
                                       batch_size=100,
                                       shuffle=True,
                                       **kwargs)
#pth0='/home/hankeji/Desktop/Adversarial Examples/Train-FGSM-0.2.npy'
#adv_example=torch.load('/home/hankeji/Desktop/Adversarial Examples/Cat_Train-FGSM-0.2.pkl')


class target_m(nn.Module):
    def __init__(self):
        super(target_m, self).__init__()
        self.con1 = nn.Conv2d(1, 20, 5)
        self.con2 = nn.Conv2d(20, 10, 5)
        self.con3 = nn.Conv2d(10, 20, 5)