Пример #1
0
def main():
    # Scale and initialize the parameters
    best_prec1 = 0
    configs.TRAIN.epochs = int(
        math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))
    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value

    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))

    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items():
        print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    # Create the model
    if configs.pretrained:
        print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch]()
    # Wrap the model into DataParallel
    model.cuda()

    # reverse mapping
    param_to_moduleName = {}
    for m in model.modules():
        for p in m.parameters(recurse=False):
            param_to_moduleName[p] = str(type(m).__name__)

    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()

    group_decay = [
        p for p in model.parameters()
        if 'BatchNorm' not in param_to_moduleName[p]
    ]
    group_no_decay = [
        p for p in model.parameters() if 'BatchNorm' in param_to_moduleName[p]
    ]
    groups = [
        dict(params=group_decay),
        dict(params=group_no_decay, weight_decay=0)
    ]
    optimizer = torch.optim.SGD(groups,
                                configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)

    if configs.TRAIN.half and not configs.evaluate:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
    model = torch.nn.DataParallel(model)

    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

    # Initiate data loaders
    traindir = os.path.join(configs.data, 'train')
    valdir = os.path.join(configs.data, 'val')

    resize_transform = []

    if configs.DATA.img_size > 0:
        resize_transform = [transforms.Resize(configs.DATA.img_size)]

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose(resize_transform + [
            transforms.RandomResizedCrop(configs.DATA.crop_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=configs.DATA.batch_size,
        shuffle=True,
        num_workers=configs.DATA.workers,
        pin_memory=True,
        sampler=None)

    normalize = transforms.Normalize(mean=configs.TRAIN.mean,
                                     std=configs.TRAIN.std)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            valdir,
            transforms.Compose(resize_transform + [
                transforms.CenterCrop(configs.DATA.crop_size),
                transforms.ToTensor(),
            ])),
        batch_size=configs.DATA.batch_size,
        shuffle=False,
        num_workers=configs.DATA.workers,
        pin_memory=True)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0],
                         pgd_param[1], configs, logger)
        validate(val_loader, model, criterion, configs, logger)
        return

    lr_schedule = lambda t: np.interp([t], configs.TRAIN.lr_epochs, configs.
                                      TRAIN.lr_values)[0]

    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, lr_schedule,
              configs.TRAIN.half)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, configs, logger)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': configs.TRAIN.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, os.path.join('trained_models',
                                     f'{configs.output_name}'), epoch + 1)
Пример #2
0
def train_free():
    # Scale and initialize the parameters
    best_prec1 = 0
    configs.TRAIN.epochs = int(
        math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))
    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value

    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))

    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items():
        print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    # Create the model
    if configs.pretrained:
        print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch]()

    # Wrap the model into DataParallel
    model = torch.nn.DataParallel(model).cuda()

    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(),
                                configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)

    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

    # setup data loader
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(configs.DATA.cifar10_mean,
                             configs.DATA.cifar10_std)
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(configs.DATA.cifar10_mean,
                             configs.DATA.cifar10_std)
    ])
    train_dataset = torchvision.datasets.CIFAR10(root='../data',
                                                 train=True,
                                                 download=True,
                                                 transform=transform_train)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=configs.DATA.batch_size,
        shuffle=True,
        num_workers=configs.DATA.workers,
        pin_memory=True,
        sampler=None)

    testset = torchvision.datasets.CIFAR10(root='../data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    val_loader = torch.utils.data.DataLoader(
        testset,
        batch_size=configs.DATA.batch_size,
        shuffle=False,
        num_workers=configs.DATA.workers,
        pin_memory=True)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0],
                         pgd_param[1], configs, logger)
        validate(val_loader, model, criterion, configs, logger)
        return

    early_stopping = EarlyStopping(patience=15, verbose=True)

    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        adjust_learning_rate(configs.TRAIN.lr, optimizer, epoch,
                             configs.ADV.n_repeats)

        # train for one epoch
        do_train_free(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1, stopped, early_stopping = validate(val_loader, model, criterion,
                                                  configs, logger,
                                                  early_stopping)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': configs.TRAIN.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, os.path.join('trained_models', configs.output_name))
        if (stopped):
            break

    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))
    for pgd_param in configs.ADV.pgd_attack:
        validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1],
                     configs, logger)
Пример #3
0
def main():
    # Scale and initialize the parameters
    best_prec1 = 0
    if not configs.full_epoch:
        configs.TRAIN.epochs = int(math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))

    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value
    
    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))
    
    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items(): print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    
    # Create the model
    # if configs.pretrained:
    #     print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
    #     model = models.__dict__[configs.TRAIN.arch](pretrained=True)
    # else:
    #     print("=> creating model '{}'".format(configs.TRAIN.arch))
    #     model = models.__dict__[configs.TRAIN.arch]()

    print('loading arma model: ')
    if configs.model =='res':
        model = ResNet_('ResNet18',True,'CIFAR10',0,3,3)
    else:
        model = VGG('VGG16',True,'CIFAR10',0,3,3)

    # Wrap the model into DataParallel
    model = torch.nn.DataParallel(model).cuda()
    
    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()
    
    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(), configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)
    
    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))


    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])


    train_dataset = datasets.CIFAR10('../data', train=True, download=True,transform=transform_train)
    test_dataset = datasets.CIFAR10('../data', train=False, transform=transform_test)

    train_loader = torch.utils.data.DataLoader(
                train_dataset, batch_size=256, shuffle=True,
                num_workers=16, pin_memory=True, drop_last=True)

    val_loader = torch.utils.data.DataLoader(test_dataset,batch_size=256,\
        shuffle=False,num_workers=16, pin_memory=True)


    # Initiate data loaders
    # traindir = os.path.join(configs.data, 'train')
    # valdir = os.path.join(configs.data, 'val')
    
    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(configs.DATA.crop_size),
    #         transforms.RandomHorizontalFlip(),
    #         transforms.ToTensor(),
    #     ]))

    # train_loader = torch.utils.data.DataLoader(
    #     train_dataset, batch_size=configs.DATA.batch_size, shuffle=True,
    #     num_workers=configs.DATA.workers, pin_memory=True, sampler=None)
    
    normalize = transforms.Normalize(mean=configs.TRAIN.mean,
                                    std=configs.TRAIN.std)

    # val_loader = torch.utils.data.DataLoader(
    #     datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(configs.DATA.img_size),
    #         transforms.CenterCrop(configs.DATA.crop_size),
    #         transforms.ToTensor(),
    #     ])),
    #     batch_size=configs.DATA.batch_size, shuffle=False,
    #     num_workers=configs.DATA.workers, pin_memory=True)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)
        validate(val_loader, model, criterion, configs, logger)
        return
    
    save_folder = os.path.join('trained_models', configs.output_name)
    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        adjust_learning_rate(configs.TRAIN.lr, optimizer, epoch, \
            configs.ADV.n_repeats,adjust_epoch_factor=configs.adj_lr_factor)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, configs, logger)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': configs.TRAIN.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best, os.path.join('trained_models', configs.output_name))
        
    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))

    f = open(save_folder+'/log.txt','w')

    for pgd_param in configs.ADV.pgd_attack:
        acc = validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)
        f.write(str(pgd_param),' ',acc,'\n')

    f.close()
Пример #4
0
def main():
    global name_all
    global para_all
    global mean_per_epoch_cle
    global std_per_epoch_cle
    global mean_per_epoch_adv
    global std_per_epoch_adv
    # Scale and initialize the parameters
    best_prec1 = 0
    configs.TRAIN.epochs = int(
        math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))
    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value

    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))

    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items():
        print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    # Create the model
    if configs.pretrained:
        print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](pretrained=True,
                                                    useFN=configs.FN,
                                                    useWN=configs.WN)
    else:
        print("=> creating model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](useFN=configs.FN,
                                                    useWN=configs.WN)

    # Wrap the model into DataParallel
    model = torch.nn.DataParallel(model).cuda()

    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(),
                                configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)

    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

    # Initiate data loaders
    traindir = os.path.join(configs.data, 'train')
    valdir = os.path.join(configs.data, 'val')

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(configs.DATA.crop_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=configs.DATA.batch_size,
        shuffle=True,
        num_workers=configs.DATA.workers,
        pin_memory=True,
        sampler=None)

    normalize = transforms.Normalize(mean=configs.TRAIN.mean,
                                     std=configs.TRAIN.std)
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(configs.DATA.img_size),
                transforms.CenterCrop(configs.DATA.crop_size),
                transforms.ToTensor(),
            ])),
        batch_size=configs.DATA.batch_size,
        shuffle=False,
        num_workers=configs.DATA.workers,
        pin_memory=True)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0],
                         pgd_param[1], configs, logger)
        validate(val_loader, model, criterion, configs, logger)
        return

    # calculate the gradients w.r.t conv and bn layers
    if configs.print_gradients == True:
        pattern_bn = re.compile(r'bn')
        pattern_conv = re.compile(r'conv')
        for name, param in model.named_parameters():
            if pattern_bn.search(name) or pattern_conv.search(name):
                name_all.append(name)
                para_all[name] = param
                mean_per_epoch_cle[name] = []
                mean_per_epoch_adv[name] = []
                std_per_epoch_cle[name] = []
                std_per_epoch_adv[name] = []

    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        adjust_learning_rate(configs.TRAIN.lr, optimizer, epoch,
                             configs.ADV.n_repeats)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, configs, logger)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': configs.TRAIN.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, os.path.join('trained_models', configs.output_name))

    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))
    for pgd_param in configs.ADV.pgd_attack:
        validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1],
                     configs, logger)
Пример #5
0
def main():

    print(configs.TRAIN.epochs)

    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))

    # Create weights folder
    if not os.path.isdir(os.path.join('train_fgsm_output',
                                      configs.output_name)):
        os.makedirs(os.path.join('train_fgsm_output', configs.output_name))

    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items():
        print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    train_loader, test_loader = get_loaders(args.data_dir,
                                            configs.DATA.batch_size,
                                            configs.DATA.workers,
                                            configs.DATA.crop_size)

    epsilon = (args.epsilon / 255.) / std
    alpha = (args.alpha / 255.) / std
    pgd_alpha = (2 / 255.) / std

    print("=> creating model '{}'".format(configs.TRAIN.arch))
    # model = models.__dict__[configs.TRAIN.arch]()

    model = WideResNet().to(device)

    # model = PreActResNet18().cuda()
    # resnet50 = models.resnet50()
    # model =  resnet18

    # if(configs.load_weights):
    #     logger.info(pad_str("LOADING WEIGHTS"))
    #     model_path = "cifar_model_weights_30_epochs.pth"
    #     state_dict = torch.load(model_path)
    #     model.load_state_dict(state_dict)
    #     model = model.eval()

    # Use GPU or CPU
    model = model.to(device)

    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            logger.info(pad_str("PGD-" + str(pgd_param[0])))
            pgd_loss, pgd_acc = evaluate_pgd(test_loader, model, pgd_param[0],
                                             10)
            test_loss, test_acc = evaluate_standard(test_loader, model)

            logger.info('Test Loss \t Test Acc \t PGD Loss \t PGD Acc')
            logger.info('%.4f \t \t %.4f \t %.4f \t %.4f', test_loss, test_acc,
                        pgd_loss, pgd_acc)
            return

    model.train()

    opt = torch.optim.SGD(model.parameters(),
                          lr=args.lr_max,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    amp_args = dict(opt_level=args.opt_level,
                    loss_scale=args.loss_scale,
                    verbosity=False)
    if args.opt_level == 'O2':
        amp_args['master_weights'] = args.master_weights
    model, opt = amp.initialize(model, opt, **amp_args)
    criterion = nn.CrossEntropyLoss()

    if args.delta_init == 'previous':
        delta = torch.zeros(configs.DATA.batch_size, 3, 32, 32).cuda()

    lr_steps = configs.TRAIN.epochs * len(train_loader)
    if args.lr_schedule == 'cyclic':
        scheduler = torch.optim.lr_scheduler.CyclicLR(
            opt,
            base_lr=args.lr_min,
            max_lr=args.lr_max,
            step_size_up=lr_steps / 2,
            step_size_down=lr_steps / 2)
    elif args.lr_schedule == 'multistep':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            opt, milestones=[lr_steps / 2, lr_steps * 3 / 4], gamma=0.1)

    # Training
    prev_robust_acc = 0.
    start_train_time = time.time()

    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):

        # Train
        train(train_loader, model, criterion, epoch, epsilon, opt, alpha,
              scheduler)

        prec1 = validate(test_loader, model, criterion, configs, logger)
        # prec1 = 100
        if args.early_stop:
            # Check current PGD robustness of model using random minibatch
            X, y = first_batch
            pgd_delta = attack_pgd(model, X, y, epsilon, pgd_alpha, 5, 1, opt)
            with torch.no_grad():
                output = model(
                    clamp(X + pgd_delta[:X.size(0)], lower_limit, upper_limit))
            robust_acc = (output.max(1)[1] == y).sum().item() / y.size(0)
            if robust_acc - prev_robust_acc < -0.2:
                break
            prev_robust_acc = robust_acc
            best_state_dict = copy.deepcopy(model.state_dict())

    train_time = time.time()
    if not args.early_stop:
        best_state_dict = model.state_dict()
    torch.save(best_state_dict, os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)

    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))
    for pgd_param in configs.ADV.pgd_attack:
        validate_pgd(test_loader, model, criterion, pgd_param[0], pgd_param[1],
                     configs, logger)

    # Evaluation
    '''
Пример #6
0
def main():
    global name_all
    global para_all
    global mean_per_epoch_cle
    global std_per_epoch_cle
    global mean_per_epoch_adv
    global std_per_epoch_adv
    # Scale and initialize the parameters
    best_prec1 = 0
    configs.TRAIN.epochs = int(math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))
    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value
    
    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))
    
    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items(): print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    
    # Create the model
    if configs.pretrained:
        print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](pretrained=True, useFN=configs.FN, useWN=configs.WN)
    else:
        print("=> creating model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](useFN=configs.FN, useWN=configs.WN)

    # Wrap the model into DataParallel
    model = torch.nn.DataParallel(model).cuda()
    
    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()
    
    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(), configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)
    
    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

            
    # Initiate data loaders
    normalize = transforms.Normalize(mean=configs.TRAIN.mean, std=configs.TRAIN.std)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        valdir = os.path.join(configs.data, 'val')
        #fix random seed for save images and perturbation
        #torch.manual_seed(1234)
        #np.random.seed(1234)
        val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(configs.DATA.img_size),
            transforms.CenterCrop(configs.DATA.crop_size),
            transforms.ToTensor(),
        ])),
        batch_size=configs.DATA.batch_size, shuffle=False,
        num_workers=configs.DATA.workers, pin_memory=True)
        if configs.FN or configs.WN:
            print('use HE for evaluate and save images!!!!!!!!!!')
            HEs = True
        else:
            print('DO NOT use HE for evaluate and save images!!!!!!!!!!')
            HEs = False
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger, save_image=True, HE=HEs)
        validate(val_loader, model, criterion, configs, logger)
        return


    
    # If evaluate on ImageNet-C
    if configs.eva_on_imagenet_c:
        if configs.FN or configs.WN:
            print('use HE for evaluate and save images!!!!!!!!!!')
        else:
            print('DO NOT use HE for evaluate and save images!!!!!!!!!!')
        logger.info(pad_str(' Performing evaluation on ImageNet-C'))

        files_names = ['gaussian_noise', 'shot_noise', 'impulse_noise',#noise
        'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', #blur
        'frost', 'snow', 'fog', 'brightness', #whether
        'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']#digital

        for file_n in files_names:
            print('Processing: ', file_n)
            validate_ImagetNet_C(file_n, model, criterion, configs, logger)
        return
Пример #7
0
def main():
    # Scale and initialize the parameters
    best_prec1 = 0
    configs.TRAIN.epochs = int(
        math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))
    configs.ADV.fgsm_step /= configs.DATA.max_color_value
    configs.ADV.clip_eps /= configs.DATA.max_color_value

    # Create output folder
    if not os.path.isdir(os.path.join('trained_models', configs.output_name)):
        os.makedirs(os.path.join('trained_models', configs.output_name))

    # Log the config details
    logger.info(pad_str(' ARGUMENTS '))
    for k, v in configs.items():
        print('{}: {}'.format(k, v))
    logger.info(pad_str(''))

    # Create the model
    if configs.pretrained:
        print("=> using pre-trained model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(configs.TRAIN.arch))
        model = models.__dict__[configs.TRAIN.arch]()

    # Use GPU or CPU
    model = model.to(device)

    # Criterion:
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer:
    optimizer = torch.optim.SGD(model.parameters(),
                                configs.TRAIN.lr,
                                momentum=configs.TRAIN.momentum,
                                weight_decay=configs.TRAIN.weight_decay)

    # Resume if a valid checkpoint path is provided
    if configs.resume:
        if os.path.isfile(configs.resume):
            print("=> loading checkpoint '{}'".format(configs.resume))
            checkpoint = torch.load(configs.resume)
            configs.TRAIN.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

    # Data
    print('==> Preparing data..')

    trainloader, testloader = get_loaders("./data", configs.DATA.batch_size,
                                          configs.DATA.workers,
                                          configs.DATA.crop_size)

    # If in evaluate mode: perform validation on PGD attacks as well as clean samples
    if configs.evaluate:
        logger.info(pad_str(' Performing PGD Attacks '))
        for pgd_param in configs.ADV.pgd_attack:
            validate_pgd(testloader, model, criterion, pgd_param[0],
                         pgd_param[1], configs, logger)
        validate(testloader, model, criterion, configs, logger)
        return

    start_train_time = time.time()
    for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):
        adjust_learning_rate(configs.TRAIN.lr, optimizer, epoch,
                             configs.ADV.n_repeats)

        # train for one epoch
        train(trainloader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(testloader, model, criterion, configs, logger)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': configs.TRAIN.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, os.path.join('trained_models', configs.output_name))

    train_time = time.time()
    print('Total train time: %.4f minutes',
          (train_time - start_train_time) / 60)

    # Automatically perform PGD Attacks at the end of training
    logger.info(pad_str(' Performing PGD Attacks '))
    '''
    for pgd_param in configs.ADV.pgd_attack:
        validate_pgd(testloader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)
    '''
    train_time = time.time()

    best_state_dict = model.state_dict()
    torch.save(best_state_dict, os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)
    # Evaluation

    model_test = models.__dict__[configs.TRAIN.arch]().to(device)
    model_test.load_state_dict(best_state_dict)
    model_test.float()
    model_test.eval()

    for pgd_param in configs.ADV.pgd_attack:
        pgd_loss, pgd_acc = evaluate_pgd(testloader, model_test, pgd_param[0],
                                         10)
    test_loss, test_acc = evaluate_standard(testloader, model_test)

    logger.info('Test Loss \t Test Acc \t PGD Loss \t PGD Acc')
    logger.info('%.4f \t \t %.4f \t %.4f \t %.4f', test_loss, test_acc,
                pgd_loss, pgd_acc)