Beispiel #1
0
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    model = Model(i_c=1, n_c=10)

    attack = FastGradientSignUntargeted(model,
                                        args.epsilon,
                                        args.alpha,
                                        min_val=0,
                                        max_val=1,
                                        max_iters=args.k,
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':
        tr_dataset = tv.datasets.MNIST(args.data_root,
                                       train=True,
                                       transform=tv.transforms.ToTensor(),
                                       download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.MNIST(args.data_root,
                                       train=False,
                                       transform=tv.transforms.ToTensor(),
                                       download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)

    elif args.todo == 'test':
        pass
    else:
        raise NotImplementedError
def main(args):
    if args.todo == 'train' or args.todo == 'valid':
        folder_name = '%dway_%dshot_%s_%s' % (args.nway, args.shots,
                                              args.model_type, args.affix)
        model_folder = os.path.join(args.model_root, folder_name)
        log_folder = os.path.join(args.log_root, folder_name)

        mkdir(args.model_root)
        mkdir(args.log_root)
        mkdir(model_folder)
        mkdir(log_folder)
        setattr(args, 'model_folder', model_folder)
        setattr(args, 'log_folder', log_folder)
        logger = create_logger(log_folder, args.todo)
        print_args(args, logger)

        tr_dataloader = self_DataLoader(args.data_root,
                                        train=True,
                                        dataset=args.dataset,
                                        seed=args.seed,
                                        nway=args.nway)

        trainer_dict = {
            'args': args,
            'logger': logger,
            'tr_dataloader': tr_dataloader
        }

        trainer = Trainer(trainer_dict)

        ###########################################
        ## pretrain CNN embedding

        if args.pretrain:
            if args.pretrain_dir != '':
                pretrain_path = os.path.join(args.pretrain_dir,
                                             'pretrain_model.pth')
                trainer.load_pretrain(pretrain_path)
            else:
                pretr_tr_data, pretr_tr_label = tr_dataloader.get_full_data_list(
                )  # already shuffled the data

                va_size = int(0.1 * len(pretr_tr_data))

                pretr_tr_dataset = self_Dataset(pretr_tr_data[va_size:],
                                                pretr_tr_label[va_size:])
                pretr_va_dataset = self_Dataset(pretr_tr_data[:va_size],
                                                pretr_tr_label[:va_size])

                logger.info('start pretraining...')

                trainer.pretrain(pretr_tr_dataset, pretr_va_dataset)

                logger.info('finish pretraining...')

        ###########################################
        ## load the model trained before

        if args.load:
            model_path = os.path.join(args.load_dir, 'model.pth')
            trainer.load_model(model_path)

        ###########################################
        ## start training

        trainer.train()

    elif args.todo == 'test':

        print(args.load_dir)

        logger = create_logger('', args.todo)
        print_args(args, logger)

        te_dataloader = self_DataLoader(args.data_root,
                                        train=False,
                                        dataset=args.dataset,
                                        seed=args.seed,
                                        nway=args.nway)

        trainer_dict = {'args': args, 'logger': logger}

        trainer = Trainer(trainer_dict)

        test_data_list, test_label_list = te_dataloader.get_few_data_list()

        test_data_array, test_label_array = np.stack(
            test_data_list), np.hstack(test_label_list)

        if args.load:
            model_path = os.path.join(args.load_dir, 'model.pth')
            trainer.load_model(model_path)

        test_pred = trainer.test(test_data_array, te_dataloader)

        print(test_pred.shape, test_label_array.shape)

        correct = (test_pred == test_label_array).sum()
        test_acc = (test_pred == test_label_array).mean() * 100.0

        print('test_acc: %.4f %%, correct: %d / %d' %
              (test_acc, correct, len(test_label_array)))
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    model = WideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)

    attack = FastGradientSignUntargeted(model, 
                                        args.epsilon, 
                                        args.alpha, 
                                        min_val=0, 
                                        max_val=1, 
                                        max_iters=args.k, 
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':
        transform_train = tv.transforms.Compose([
                tv.transforms.ToTensor(),
                tv.transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
                                    (4,4,4,4), mode='constant', value=0).squeeze()),
                tv.transforms.ToPILImage(),
                tv.transforms.RandomCrop(32),
                tv.transforms.RandomHorizontalFlip(),
                tv.transforms.ToTensor(),
            ])
        tr_dataset = tv.datasets.CIFAR10(args.data_root, 
                                       train=True, 
                                       transform=transform_train, 
                                       download=True)

        tr_loader = DataLoader(tr_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.CIFAR10(args.data_root, 
                                       train=False, 
                                       transform=tv.transforms.ToTensor(), 
                                       download=True)

        te_loader = DataLoader(te_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        pass
    else:
        raise NotImplementedError
Beispiel #4
0
import models
from models.projector import Projector

from argument import linear_parser, print_args
from utils import progress_bar, checkpoint
from collections import OrderedDict
from attack_lib import FastGradientSignUntargeted
from loss import pairwise_similarity, NT_xent

args = linear_parser()
use_cuda = torch.cuda.is_available()
if use_cuda:
    ngpus_per_node = torch.cuda.device_count()

if args.local_rank % ngpus_per_node == 0:
    print_args(args)


def print_status(string):
    if args.local_rank % ngpus_per_node == 0:
        print(string)


print_status(torch.cuda.device_count())
print_status('Using CUDA..')

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

if args.seed != 0:
    torch.manual_seed(args.seed)
Beispiel #5
0
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)
    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)
    makedirs(log_folder)
    makedirs(model_folder)
    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)
    logger = create_logger(log_folder, args.todo, 'info')
    print_args(args, logger)

    # model = WideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)
    model = models.resnet50(pretrained=args.pretrain)
    num_classes=8
    # model.classifier = nn.Linear(model.classifier.in_features, num_classes)
    model.fc = nn.Linear(model.fc.in_features, num_classes)

    attack = FastGradientSignUntargeted(model, 
                                        args.epsilon, 
                                        args.alpha, 
                                        min_val=0, 
                                        max_val=1, 
                                        max_iters=args.k, 
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()
        # model = nn.DataParallel(model).cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':

        transform_train = tv.transforms.Compose([
                tv.transforms.Resize(256),
                tv.transforms.ToTensor(),
                tv.transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
                                   (4*6,4*6,4*6,4*6), mode='constant', value=0).squeeze()),
                tv.transforms.ToPILImage(),
                tv.transforms.RandomHorizontalFlip(),
                tv.transforms.ColorJitter(brightness=0.3, contrast=0.3, 
                                            saturation=0.3, hue=0.3),
                # tv.transforms.RandomRotation(25),
                tv.transforms.RandomAffine(25, translate=(0.2, 0.2), 
                                            scale=(0.8,1.2), shear=10),                            
                tv.transforms.RandomCrop(256),
                tv.transforms.ToTensor(),
                AddGaussianNoise(0.5, args.epsilon)
            ])
        tr_dataset = patd.PatchDataset(path_to_images=args.data_root,
                                        fold='train', 
                                        sample=args.subsample,
                                        transform=transform_train)
        tr_loader = DataLoader(tr_dataset, batch_size=args.batch_size, shuffle=True, num_workers=24)
        # evaluation during training
        transform_test = tv.transforms.Compose([
                tv.transforms.Resize(256),
                # tv.transforms.CenterCrop(224),
                tv.transforms.ToTensor(),
                # tv.transforms.Normalize(mean, std)
                ])
        te_dataset = patd.PatchDataset(path_to_images=args.data_root,
                                        fold='valid',
                                        transform=transform_test)
        te_loader = DataLoader(te_dataset, batch_size=args.batch_size, shuffle=False, num_workers=24)
             
        trainer.train(model, tr_loader, te_loader, args.adv_train)
    
    elif args.todo == 'test':
        te_dataset = patd.PatchDataset(path_to_images=args.data_root,
                                        fold='test',
                                        transform=tv.transforms.Compose([
                                            tv.transforms.Resize(256),
                                            tv.transforms.ToTensor(),
                                            ]))
        te_loader = DataLoader(te_dataset, batch_size=1, shuffle=False, num_workers=1)
        checkpoint = torch.load(args.load_checkpoint)
        model.load_state_dict(checkpoint)
        std_acc, adv_acc = trainer.test(model, te_loader, adv_test=True, use_pseudo_label=False, if_AUC=True)
        print("std acc: %.4f, adv_acc: %.4f" % (std_acc * 100, adv_acc * 100))

    else:
        raise NotImplementedError
Beispiel #6
0
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    # Using a WideResNet model
    model = WideResNet(depth=34, num_classes=10, widen_factor=1, dropRate=0.0)
    flop, param = get_model_infos(model, (1, 3, 32, 32))
    logger.info('Model Info: FLOP = {:.2f} M, Params = {:.2f} MB'.format(
        flop, param))

    # Configuring the train attack mode
    if args.adv_train_mode == 'FGSM':
        train_attack = FastGradientSignUntargeted(model,
                                                  args.epsilon,
                                                  args.alpha,
                                                  min_val=0,
                                                  max_val=1,
                                                  max_iters=args.k,
                                                  _type=args.perturbation_type,
                                                  logger=logger)
    elif args.adv_train_mode == 'CW':
        mean = [0]
        std = [1]
        inputs_box = (min((0 - m) / s for m, s in zip(mean, std)),
                      max((1 - m) / s for m, s in zip(mean, std)))
        train_attack = carlini_wagner_L2.L2Adversary(targeted=False,
                                                     confidence=0.0,
                                                     search_steps=10,
                                                     optimizer_lr=5e-4,
                                                     logger=logger)

    # Configuring the test attack mode
    if args.adv_test_mode == 'FGSM':
        test_attack = FastGradientSignUntargeted(model,
                                                 args.epsilon,
                                                 args.alpha,
                                                 min_val=0,
                                                 max_val=1,
                                                 max_iters=args.k,
                                                 _type=args.perturbation_type,
                                                 logger=logger)
    elif args.adv_test_mode == 'CW':
        mean = [0]
        std = [1]
        inputs_box = (min((0 - m) / s for m, s in zip(mean, std)),
                      max((1 - m) / s for m, s in zip(mean, std)))
        test_attack = carlini_wagner_L2.L2Adversary(targeted=False,
                                                    confidence=0.0,
                                                    search_steps=10,
                                                    optimizer_lr=5e-4,
                                                    logger=logger)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, train_attack, test_attack)

    if args.todo == 'train':
        transform_train = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Lambda(lambda x: F.pad(
                x.unsqueeze(0),
                (4, 4, 4, 4), mode='constant', value=0).squeeze()),
            tv.transforms.ToPILImage(),
            tv.transforms.RandomCrop(32),
            tv.transforms.RandomHorizontalFlip(),
            tv.transforms.ToTensor(),
        ])
        tr_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=True,
                                         transform=transform_train,
                                         download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        pass
    else:
        raise NotImplementedError
Beispiel #7
0
    for k in range(args.n_train_data):
        file = os.path.join(args.data_folder, 'searchPlay-' + str(k))
        dataList = dataProcessor.retrieveData(file)
        totalDataList.extend(dataList)
    currentModel = 0
    trainWorker = NetworkTraining()
    trainWorker.train(args.trainepochs, currentModel, totalDataList)


if __name__ == '__main__':
    argument.initialize_args()
    args = argument.get_args()
    logger.initialize_logger(args.log_folder, args.todo, 'info')
    logger = logger.get_logger()
    timer.init()
    argument.print_args(args, logger)

    dataProcessor.initSimulator(Board.Board(args.size, args.numberForWin))

    args.save_folder = 'test_visual'

    if args.todo == 'selfplaytrain':
        train(args, logger, dataProcessor)
    elif args.todo == 'visualize':
        visualize(args, logger, dataProcessor)
    elif args.todo == 'experiment':
        experiment(args, logger, dataProcessor)
    elif args.todo == 'sampledata':
        sampledata(args, logger, dataProcessor)
    elif args.todo == 'supervisedtrain':
        supervisedtrain(args, logger, dataProcessor)
Beispiel #8
0
        return

    elif args.todo == "class_wise":
        # Generate class wise error minimizing noise
        perturbation.class_wise(train_loader,
                                num_classes=10,
                                save=args.delta_path + args.save)
        return

    else:
        raise NotImplementedError


if __name__ == "__main__":
    args = argument.parser()
    argument.print_args(args)

    # Set seed and device
    seed = 0

    if torch.cuda.is_available() and args.gpu != 0:
        device = torch.device("cuda")
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    else:
        device = torch.device("cpu")

    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.benchmark = True