Esempio n. 1
0
def get_classifier(mode, n_classes=10):
    if mode == 'resnet18':
        classifier = ResNet18(num_classes=n_classes)
    elif mode == 'resnet34':
        classifier = ResNet34(num_classes=n_classes)
    elif mode == 'resnet50':
        classifier = ResNet50(num_classes=n_classes)
    elif mode == 'resnet18_imagenet':
        classifier = resnet18(num_classes=n_classes)
    elif mode == 'resnet50_imagenet':
        classifier = resnet50(num_classes=n_classes)
    elif mode == 'live':
        classifier = FeatherNet(input_size=128, se=True, avgdown=True)
#        classifier = ResNet18(num_classes=n_classes)
#        classifier = LiveModel()
    else:
        raise NotImplementedError()

    return classifier
Esempio n. 2
0
def load_mod_dl(args):
    """
    :param args:
    :return:
    """
    if args.dataset == 'cifar10':
        imsize, in_channel, num_classes = 32, 3, 10
        train_loader, val_loader, test_loader = data_loaders.load_cifar10(args.batch_size, val_split=True,
                                                                          augmentation=args.data_augmentation,
                                                                          subset=[args.train_size, args.val_size,
                                                                                  args.test_size])
    elif args.dataset == 'cifar100':
        imsize, in_channel, num_classes = 32, 3, 100
        train_loader, val_loader, test_loader = data_loaders.load_cifar100(args.batch_size, val_split=True,
                                                                           augmentation=args.data_augmentation,
                                                                           subset=[args.train_size, args.val_size,
                                                                                   args.test_size])
    elif args.dataset == 'mnist':
        imsize, in_channel, num_classes = 28, 1, 10
        num_train = 50000
        train_loader, val_loader, test_loader = data_loaders.load_mnist(args.batch_size,
                                                           subset=[args.train_size, args.val_size, args.test_size],
                                                           num_train=num_train, only_split_train=False)


    if args.model == 'resnet18':
        cnn = ResNet18(num_classes=num_classes, num_channels=in_channel)
    elif args.model == 'cbr':
        cnn = CBRStudent(in_channel, num_classes)
        
    mixup_mat = -1*torch.ones([num_classes,num_classes]).cuda()
    mixup_mat.requires_grad = True

    checkpoint = None
    if args.load_baseline_checkpoint:
        checkpoint = torch.load(args.load_baseline_checkpoint)
        cnn.load_state_dict(checkpoint['model_state_dict'])

    model = cnn.cuda()
    model.train()
    return model, mixup_mat, train_loader, val_loader, test_loader, checkpoint
Esempio n. 3
0
def get_model(args):

    if args.dataset == 'cifar-10':
        num_classes = 10
    elif args.dataset == 'cifar-100':
        num_classes = 100
    else:
        raise NotImplementedError

    if 'contrastive' in args.train_type or 'linear_eval' in args.train_type:
        contrastive_learning = True
    else:
        contrastive_learning = False

    if args.model == 'ResNet18':
        model = ResNet18(num_classes, contrastive_learning)
        print('ResNet18 is loading ...')
    elif args.model == 'ResNet50':
        model = ResNet50(num_classes, contrastive_learning)
        print('ResNet 50 is loading ...')
    return model
Esempio n. 4
0
def train_cifar_models():
    """
    Trains the baselines teacher/students
    :return:
    """

    # ResNet training
    net = ResNet18()
    net.cuda()
    train_cifar10_model(net,
                        learning_rates=[0.001, 0.0001],
                        iters=[50, 50],
                        output_path='models/resnet18_cifar10.model')

    # Cifar Tiny
    net = Cifar_Tiny()
    net.cuda()
    train_cifar10_model(net,
                        learning_rates=[0.001, 0.0001],
                        iters=[50, 50],
                        output_path='models/tiny_cifar10.model')
Esempio n. 5
0
def get_model(model):
    model_path = '../saved'
    if model == 'LeNet-5':
        net = LeNet()
        model_name = 'lenet.pth'
    elif model == 'VGG-16':
        net = Vgg16_Net()
        model_name = 'vgg16.pth'
    elif model == 'ResNet18':
        net = ResNet18()
        model_name = 'resnet18.pth'
    elif model == 'ResNet34':
        net = ResNet34()
        model_name = 'resnet34.pth'
    elif model == 'ResNet50':
        net = ResNet50()
        model_name = 'resnet50.pth'
    else:
        net = ResNet101()
        model_name = 'resnet101.pth'
    return net, os.path.join(model_path, model_name)
Esempio n. 6
0
 def model_init(self, args):
     # Network
     if args.dataset == 'MNIST':
         print("MNIST")
         self.net = cuda(ToyNet_MNIST(y_dim=self.y_dim), self.cuda)
     elif args.dataset == 'CIFAR10':
         print("Dataset used CIFAR10")
         if args.network_choice == 'ToyNet':
             self.net = cuda(ToyNet_CIFAR10(y_dim=self.y_dim), self.cuda)
         elif args.network_choice == 'ResNet18':
             self.net = cuda(ResNet18(), self.cuda)
         elif args.network_choice == 'ResNet34':
             self.net = cuda(ResNet34(), self.cuda)
         elif args.network_choice == 'ResNet50':
             self.net = cuda(ResNet50(), self.cuda)
     self.net.weight_init(_type='kaiming')
     # setup optimizer
     self.optim = optim.Adam([{
         'params': self.net.parameters(),
         'lr': self.lr
     }],
                             betas=(0.5, 0.999))
Esempio n. 7
0
def main():
    global args
    args = parser.parse_args()
    print(args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    print(torch.cuda.device_count())
    if args.arch == 'vgg16':
        from models.vgg import VGG
        model = nn.DataParallel(VGG('VGG16', nclass=args.num_classes))
    elif args.arch == 'resnet18':
        from models.resnet import ResNet18
        model = nn.DataParallel(ResNet18().cuda())
    else:
        raise NotImplementedError('Invalid model')


############################################################
############# Modify It To Current Student Model #################

    checkpoint = torch.load(
        './active_student_models/cifar10_vgg_student_model_1000.pth')
    model.load_state_dict(checkpoint)
    ############################################################
    cudnn.benchmark = True
    model.cuda()

    # Data loading code
    mix_img_val_loader = torch.utils.data.DataLoader(
        mix_img_query_loader(transforms.Compose([
            transforms.ToTensor(),
        ])),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    ## image extraction
    logits_extract(mix_img_val_loader, model)
def test(args):
    base_dir = args.base_dir
    num_classes = args.num_classes
    model_name = str.lower(args.model)

    if model_name == 'lenet_baseline':
        input_shape = (32, 32, 3)
        model = LeNet_baseline(input_shape=input_shape,
                               num_classes=num_classes)
    elif model_name == 'lenet_modified':
        input_shape = (32, 32, 3)
        model = LeNet_modified(input_shape=input_shape,
                               num_classes=num_classes)
    elif model_name == 'alexnet':
        input_shape = (227, 227, 3)
        model = AlexNet(input_shape=input_shape, num_classes=num_classes)
    elif model_name == 'vgg16':
        input_shape = (224, 224, 3)
        model = VGG16(input_shape=input_shape, num_classes=num_classes)
    elif model_name == 'vgg19':
        input_shape = (224, 224, 3)
        model = VGG19(input_shape=input_shape, num_classes=num_classes)
    elif model_name == 'resnet18':
        input_shape = (112, 112, 3)
        model = ResNet18(input_shape=input_shape, num_classes=num_classes)
    else:
        print('Please choose an implemented model!')
        sys.exit()

    # load test set
    x, y = load_test(base_dir, input_shape, num_classes)
    print('Test images loaded')

    model.load_weights(args.pretrained_model)
    pred = model.predict_classes(x)
    accuracy = accuracy_score(y, pred)
    print('Accuracy: {}'.format(accuracy))
    print(classification_report(y, pred))
Esempio n. 9
0
def load_baseline_model(args):
    """

    :param args:
    :return:
    """
    if args.dataset == 'cifar10':
        num_classes = 10
        train_loader, val_loader, test_loader = data_loaders.load_cifar10(args.batch_size, val_split=True,
                                                                          augmentation=args.data_augmentation)
    elif args.dataset == 'cifar100':
        num_classes = 100
        train_loader, val_loader, test_loader = data_loaders.load_cifar100(args.batch_size, val_split=True,
                                                                           augmentation=args.data_augmentation)
    elif args.dataset == 'mnist':
        args.datasize, args.valsize, args.testsize = 100, 100, 100
        num_train = args.datasize
        if args.datasize == -1:
            num_train = 50000

        from data_loaders import load_mnist
        train_loader, val_loader, test_loader = load_mnist(args.batch_size,
                                                           subset=[args.datasize, args.valsize, args.testsize],
                                                           num_train=num_train)

    if args.model == 'resnet18':
        cnn = ResNet18(num_classes=num_classes)
    elif args.model == 'wideresnet':
        cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10, dropRate=0.3)

    checkpoint = None
    if args.load_baseline_checkpoint:
        checkpoint = torch.load(args.load_baseline_checkpoint)
        cnn.load_state_dict(checkpoint['model_state_dict'])

    model = cnn.cuda()
    model.train()
    return model, train_loader, val_loader, test_loader, checkpoint
Esempio n. 10
0
def fetch_specified_model(model_name, activation):
    """
    Inits and returns the specified model
    """

    # Specific hard-coding for CIFAR100
    in_ch, num_classes = 3, 100
    act_fact = kdm.get_activation_factory(activation)

    if model_name == "basenet":
        model = BaseNet(in_ch, num_classes, act_fact)
    elif model_name == "resnet18":
        model = ResNet18(in_ch, num_classes, act_fact)
    elif model_name == "resnet34":
        model = ResNet34(in_ch, num_classes, act_fact)
    elif model_name == "mobnet2":
        model = MobileNetV2(in_ch, num_classes, act_fact)
    elif model_name == "sqnet":
        model = SqueezeNet(in_ch, num_classes, act_fact)
    else:
        assert False, "Unsupported base model: {}".format(model_name)

    return model
Esempio n. 11
0
    def get_classifier(self, is_multi_class):
        if self.params.model == 'resnet18':
            from models.resnet import ResNet18
            classifier = ResNet18(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet34':
            from models.resnet import ResNet34
            classifier = ResNet34(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet50':
            from models.resnet import ResNet50
            classifier = ResNet50(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet18_imagenet':
            if is_multi_class:
                from models.resnet_imagenet_multiclass_infer import resnet18
            else:
                from models.resnet_imagenet import resnet18
            classifier = resnet18(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet50_imagenet':
            from models.resnet_imagenet import resnet50
            classifier = resnet50(num_classes=self.params.n_classes)
        else:
            raise NotImplementedError()

        return classifier
Esempio n. 12
0
def Model(args):

    # TODO: Fix args.pretrained
    if args.model == "softmax":
        model = Softmax(args.image_size, args.no_of_classes)
    elif args.model == "twolayernn":
        model = TwoLayerNN(args.image_size, args.no_of_classes)
    elif args.model == "threelayernn":
        model = ThreeLayerNN(args.image_size, args.no_of_classes)
    elif args.model == "onelayercnn":
        model = OneLayerCNN(args.image_size, args.no_of_classes)
    elif args.model == "twolayercnn":
        model = TwoLayerCNN(args.image_size, args.no_of_classes)
    elif args.model == "vggnet":
        model = VGGNet(args.image_size, args.no_of_classes)
    elif args.model == "alexnet":
        model = AlexNet(args.image_size, args.no_of_classes)
    elif args.model == "resnet":
        model = ResNet18()
        # self.model = models.resnet18(pretrained=True)
    else:
        raise Exception("Unknown model {}".format(args.model))

    return model
Esempio n. 13
0
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision_qinxiao.datasets.CIFAR10(root='../Embedding_data', train=True, download=True, transform=transform_train)
    train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)

    testset = torchvision_qinxiao.datasets.CIFAR10(root='../Embedding_data', train=False, download=True, transform=transform_test)
    test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Model
    print('==> Building model..')

    net = ResNet18()
    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        if os.path.isfile('./checkpoint/NCA_ckpt.t7'):
            checkpoint = torch.load('./checkpoint/NCA_ckpt.t7')
            best_acc = checkpoint['acc']
        else:
            checkpoint = torch.load('./checkpoint/classification_ckpt.t7')
            best_acc = 0
Esempio n. 14
0
import argparse

from models.resnet import ResNet18

cmd = 'art/dist/init_module/init_module'
os.system(cmd)

parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume',
                    '-r',
                    action='store_true',
                    help='resume from checkpoint')
args = parser.parse_args()

model = ResNet18()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
Esempio n. 15
0
def train(**kwargs):
    opt._parse(kwargs)
    # vis = Visualizer(opt.env, port=opt.vis_port)

    # data
    root = opt.data_root
    batchsize = opt.batch_size
    download_cifar10 = opt.download_data
    data_root = os.path.join(root, 'cifar-10-batches-py/')
    if (os.path.exists(data_root)) is True:
        if (os.listdir(data_root)) is not None:
            # not cifar10 dir or mnist is empyt dir
            download_cifar10 = False

    # prepare triple data for training  and the test data to calculate the accuracy
    triple_data_loader = train_dataloader(root, batchsize, 10, opt.normalize)
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        opt.normalize
    ])

    testset_query = CIFAR10_test_query(root=root, train=False, transform=transform_test, query_num=100)
    base_index = testset_query.base_index
    testset_base = CIFAR10_test_base(root='./data', train=False, transform=transform_test, base_index=base_index)

    testloader_query = torch.utils.data.DataLoader(testset_query, batch_size=1, shuffle=False, num_workers=2)
    testloader_base = torch.utils.data.DataLoader(testset_base, batch_size=1, shuffle=False, num_workers=2)

    # init PQN
    feature_net = ResNet18()
    feature_net.load_state_dict(torch.load(opt.fearure_net_pth), strict=False)
    feature_net.to(opt.device)

    # prepare data for initial the code book
    init_data_loader = init_loader(root, opt.init_batchsize, DOWNLOAD_CIFAR10=download_cifar10)
    init_c = torch.zeros((opt.d, opt.K)).float()

    kmeans_data = torch.Tensor(opt.init_iter_num * opt.init_batchsize, opt.d)
    for ii, (init_data, _) in enumerate(init_data_loader):
        if ii == opt.init_iter_num:
            break
        input = init_data.to(opt.device)
        output = feature_net(input)
        kmeans_data[ii * opt.init_batchsize:(ii+1) * opt.init_batchsize] = output.data.cpu()

    # use the K-means method to initial the code book
    cnt = int(opt.d/opt.M)
    for i in range(opt.M-1):
        d = kmeans_data[:, i * cnt:(i+1)*cnt]
        r = Kmeans(d, opt.K, device=0, tol=1e-3)
        init_c[i * cnt:(i+1)*cnt] = r.permute(1, 0)

    d = kmeans_data[:, (opt.M-1) * cnt:]
    r = Kmeans(d, opt.K, device=0, tol=1e-3)
    init_c[(opt.M-1) * cnt:] = r.permute(1, 0)

    # init codebook opt.d*K
    init_c = codeBook_L2(init_c, opt.d, opt.M)

    # init model
    model = Total_Model(init_c, opt.alpha, opt.d, opt.M, opt.K, opt.fearure_net_pth, opt.batch_size)
    model.to(opt.device)
    feature_net = model.resnet

    init_c = init_c.to(opt.device)
    m, mRecall = mAP(feature_net, testloader_query, testloader_base, init_c)
    print('mAP', m, 'mRecall', mRecall)

    # init loss
    ALT_loss = AsymmetricTripleLoss()

    # set optimizer
    lr = opt.lr
    optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=opt.weight_decay)

    loss_meter = meter.AverageValueMeter()
    previous_loss = 1e100
    model.train()

    for epoch in range(opt.max_epoch):
        loss_meter.reset()
        print('epoch', epoch)

        for ii, (q_input, p_input, n_input) in enumerate(triple_data_loader):

            q_input = q_input.to(opt.device)
            p_input = p_input.to(opt.device)
            n_input = n_input.to(opt.device)

            optimizer.zero_grad()
            q_output, p_output, n_output = model(q_input, p_input, n_input)
            loss = ALT_loss(q_output, p_output, n_output)
            loss.backward()
            optimizer.step()
            loss_meter.add(loss.item())

            # L2 normalize the code book
            s = model.state_dict()['pqn.c']
            s = codeBook_L2(s, opt.d, opt.M)
            model.state_dict()['pqn.c'] = s

            # print loss
            if (ii + 1) % opt.print_freq == 0:
                # vis.plot('loss', loss_meter.value()[0])
                print('iter', ii, 'loss', loss_meter.value()[0])
            else:
                pass

            if (ii + 1) % 250 == 0:
                print('calculate the mAP ... ')
                model.eval()
                feature_net = model.resnet
                code_book = s
                m, mRecall = mAP(feature_net, testloader_query, testloader_base, code_book)
                print('mAP', m, 'mRecall', mRecall)
            else:
                pass

        # save model
        now = int(time.time())
        timeStruct = time.localtime(now)
        strTime = time.strftime("%Y-%m-%d_%H:%M:%S", timeStruct)
        save_path = os.path.join(opt.save_model_root, 'model_parameter_{}'.format(epoch)+strTime+'.pth')
        torch.save(model.state_dict(), save_path)

        # update learning rate
        if loss_meter.value()[0] > previous_loss:
            print('adjust the learning rate')
            lr = lr * opt.lr_decay
            # 第二种降低学习率的方法:不会有moment等信息的丢失
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        previous_loss = loss_meter.value()[0]
Esempio n. 16
0
 def select(self, model, args):
     """
     Selector utility to create models from model directory
     :param model: which model to select. Currently choices are: (cnn | resnet | preact_resnet | densenet | wresnet)
     :return: neural network to be trained
     """
     if model == 'cnn':
         net = SimpleModel(in_shape=self.in_shape,
                           activation=args.activation,
                           num_classes=self.num_classes,
                           filters=args.filters,
                           strides=args.strides,
                           kernel_sizes=args.kernel_sizes,
                           linear_widths=args.linear_widths,
                           use_batch_norm=args.use_batch_norm)
     else:
         assert (args.dataset != 'MNIST' and args.dataset != 'Fashion-MNIST'), \
             "Cannot use resnet or densenet for mnist style data"
         if model == 'resnet':
             assert args.resdepth in [18, 34, 50, 101, 152], \
                 "Non-standard and unsupported resnet depth ({})".format(args.resdepth)
             if args.resdepth == 18:
                 net = ResNet18(self.num_classes)
             elif args.resdepth == 34:
                 net = ResNet34(self.num_classes)
             elif args.resdepth == 50:
                 net = ResNet50(self.num_classes)
             elif args.resdepth == 101:
                 net = ResNet101(self.num_classes)
             else:
                 net = ResNet152()
         elif model == 'densenet':
             assert args.resdepth in [121, 161, 169, 201], \
                 "Non-standard and unsupported densenet depth ({})".format(args.resdepth)
             if args.resdepth == 121:
                 net = DenseNet121(
                     growth_rate=12, num_classes=self.num_classes
                 )  # NB NOTE: growth rate controls cifar implementation
             elif args.resdepth == 161:
                 net = DenseNet161(growth_rate=12,
                                   num_classes=self.num_classes)
             elif args.resdepth == 169:
                 net = DenseNet169(growth_rate=12,
                                   num_classes=self.num_classes)
             else:
                 net = DenseNet201(growth_rate=12,
                                   num_classes=self.num_classes)
         elif model == 'preact_resnet':
             assert args.resdepth in [18, 34, 50, 101, 152], \
                 "Non-standard and unsupported preact resnet depth ({})".format(args.resdepth)
             if args.resdepth == 18:
                 net = PreActResNet18(self.num_classes)
             elif args.resdepth == 34:
                 net = PreActResNet34(self.num_classes)
             elif args.resdepth == 50:
                 net = PreActResNet50(self.num_classes)
             elif args.resdepth == 101:
                 net = PreActResNet101(self.num_classes)
             else:
                 net = PreActResNet152()
         elif model == 'wresnet':
             assert ((args.resdepth - 4) % 6 == 0), \
                 "Wideresnet depth of {} not supported, must fulfill: (depth - 4) % 6 = 0".format(args.resdepth)
             net = WideResNet(depth=args.resdepth,
                              num_classes=self.num_classes,
                              widen_factor=args.widen_factor)
         else:
             raise NotImplementedError(
                 'Model {} not supported'.format(model))
     return net
Esempio n. 17
0
        m.bias.data.fill_(0)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# LOAD ALL FEATURE EXTRACTORS/ENCODERS
netEnc = []
curNetEncId = -1
for netEncType in opt.netEncType:
    curNetEncId += 1

    if netEncType == 'resnet18':  # pretrained resnet18
        netEnc.append(
            ResNet18(get_perceptual_feats=True,
                     num_classes=opt.numClassesInFtrExt,
                     image_size=opt.imageSize))
        state = torch.load(opt.netEnc[curNetEncId])
        net = state['net']
        netEnc[-1].load_state_dict(net.state_dict())

    elif netEncType == 'vgg19':  # CIFAR-10 pretrained vgg19
        netEnc.append(
            VGG19(get_perceptual_feats=True,
                  num_classes=opt.numClassesInFtrExt,
                  image_size=opt.imageSize,
                  classifier_depth=opt.vggClassifierDepth))
        log.info("Reading Feat Exatractor #{} from {}".format(
            curNetEncId, opt.netEnc[curNetEncId]))
        state = torch.load(opt.netEnc[curNetEncId])
        net = state['net']
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    print_training_params(args=args, txt_file_path=txt_file_path)

    # Data
    print(f'==> Preparing dataset {args.dataset}')
    if args.dataset in ['cifar10', 'cifar100']:
        detph = 28
        widen_factor = 10
        dropout = 0.3
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor()
        ])

    elif args.dataset == 'tiny-imagenet':
        transform_train = transforms.Compose([
            transforms.ToTensor()
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor()
        ])

    elif args.dataset == 'imagenet':
        transform_train = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])
        transform_test = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor()
        ])

    elif args.dataset == 'mnist':
        transform_train = transforms.Compose([
            transforms.ToTensor()
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor()
        ])

    elif args.dataset == 'SVHN':
        detph = 16
        widen_factor = 4
        dropout = 0.4
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.ToTensor()
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor()
        ])

    print(f'Running on dataset {args.dataset}')
    if args.dataset in ['cifar10', 'cifar100', 'mnist']:
        if args.dataset == 'cifar10':
            dataloader = datasets.CIFAR10
            num_classes = 10
        elif args.dataset == 'cifar100':
            dataloader = datasets.CIFAR100
            num_classes = 100
        elif args.dataset == 'mnist':
            dataloader = datasets.MNIST
            num_classes = 10

        trainset = dataloader(root='.data', train=True, download=True, transform=transform_train)
        testset = dataloader(root='.data', train=False, download=False, transform=transform_test)

    elif args.dataset == 'imagenet':
        trainset = datasets.ImageFolder('imagenet/train', transform=transform_train)
        testset = datasets.ImageFolder('imagenet/val', transform=transform_test)
        num_classes = 1000

    elif args.dataset == 'SVHN':
        trainset = datasets.SVHN('data', split='train', transform=transform_train, download=True)
        testset = datasets.SVHN('data', split='test', transform=transform_test, download=True)
        num_classes = 10
    
    trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
    testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch == 'vgg16':
        model = VGG16(
            dataset=args.dataset,
            num_classes=num_classes,
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta,
            finetune=args.finetune
        )

    elif args.arch == 'resnet18':
        model = ResNet18(
            dataset=args.dataset,
            num_classes=num_classes,
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta,
            finetune=args.finetune
        )

    elif args.arch == 'madry':
        model = MadryNet(
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta
        )

    elif args.arch == 'lenet':
        model = LeNet(
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta
        )

    elif args.arch == 'alexnet':
        model = AlexNet(
            dataset=args.dataset,
            num_classes=num_classes,
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta
        )

    elif args.arch == 'wide-resnet':
        model = Wide_ResNet(
            dataset=args.dataset,
            num_classes=num_classes,
            kernels1=args.kernels1,
            kernels2=args.kernels2,
            kernels3=args.kernels3,
            orientations=args.orientations,
            learn_theta=args.learn_theta,
            finetune=args.finetune,
            depth=detph,
            widen_factor=widen_factor,
            dropout_rate=dropout,
            use_7x7=args.use_7x7
        )

    print('Model:')
    print(model)
    print_to_log(text=repr(model), txt_file_path=txt_file_path)
    
    if device == 'cuda':
        model = torch.nn.DataParallel(model).to(device)
    
    # Compute number of parameters and print them
    param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
    param_txt = f'    Total trainable params: {param_num:d}'
    print_to_log(text=param_txt, txt_file_path=txt_file_path)
    print(param_txt)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # Resume
    title = f'{args.dataset}-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint...')
        assert osp.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = osp.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch, device)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(
            trainloader, model, criterion, optimizer, epoch, device, train_adv=args.train_adv, args=args)
        test_loss, test_acc = test(
            testloader, model, criterion, epoch, device)

        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

        if args.kernels1 is not None:
            plot_kernels(model, args.checkpoint, epoch, device)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)

    print('Training finished. Running attack')
    main_attack(args)

    print('Running SVD computation')
    main_svs_computation(args)
Esempio n. 19
0
 def __init__(self, init_c, alpha, d, M, K, pre_model_pth, batch_size):
     super(Total_Model, self).__init__()
     self.resnet = ResNet18()
     self.resnet.load_state_dict(torch.load(pre_model_pth), strict=False)
     self.pqn = PQN(init_c, alpha, d, M, K, batch_size)
Esempio n. 20
0
def get_network(name: str, num_classes: int) -> None:
    return \
        AlexNet(
            num_classes=num_classes) if name == 'AlexNet' else\
        DenseNet201(
            num_classes=num_classes) if name == 'DenseNet201' else\
        DenseNet169(
            num_classes=num_classes) if name == 'DenseNet169' else\
        DenseNet161(
            num_classes=num_classes) if name == 'DenseNet161' else\
        DenseNet121(
            num_classes=num_classes) if name == 'DenseNet121' else\
        DenseNet121CIFAR(
            num_classes=num_classes) if name == 'DenseNet121CIFAR' else\
        GoogLeNet(
            num_classes=num_classes) if name == 'GoogLeNet' else\
        InceptionV3(
            num_classes=num_classes) if name == 'InceptionV3' else\
        MNASNet_0_5(
            num_classes=num_classes) if name == 'MNASNet_0_5' else\
        MNASNet_0_75(
            num_classes=num_classes) if name == 'MNASNet_0_75' else\
        MNASNet_1(
            num_classes=num_classes) if name == 'MNASNet_1' else\
        MNASNet_1_3(
            num_classes=num_classes) if name == 'MNASNet_1_3' else\
        MobileNetV2(
            num_classes=num_classes) if name == 'MobileNetV2' else\
        ResNet18(
            num_classes=num_classes) if name == 'ResNet18' else\
        ResNet34(
            num_classes=num_classes) if name == 'ResNet34' else\
        ResNet34CIFAR(
            num_classes=num_classes) if name == 'ResNet34CIFAR' else\
        ResNet50CIFAR(
            num_classes=num_classes) if name == 'ResNet50CIFAR' else\
        ResNet101CIFAR(
            num_classes=num_classes) if name == 'ResNet101CIFAR' else\
        ResNet18CIFAR(
            num_classes=num_classes) if name == 'ResNet18CIFAR' else\
        ResNet50(
            num_classes=num_classes) if name == 'ResNet50' else\
        ResNet101(
            num_classes=num_classes) if name == 'ResNet101' else\
        ResNet152(
            num_classes=num_classes) if name == 'ResNet152' else\
        ResNeXt50(
            num_classes=num_classes) if name == 'ResNext50' else\
        ResNeXtCIFAR(
            num_classes=num_classes) if name == 'ResNeXtCIFAR' else\
        ResNeXt101(
            num_classes=num_classes) if name == 'ResNext101' else\
        WideResNet50(
            num_classes=num_classes) if name == 'WideResNet50' else\
        WideResNet101(
            num_classes=num_classes) if name == 'WideResNet101' else\
        ShuffleNetV2_0_5(
            num_classes=num_classes) if name == 'ShuffleNetV2_0_5' else\
        ShuffleNetV2_1(
            num_classes=num_classes) if name == 'ShuffleNetV2_1' else\
        ShuffleNetV2_1_5(
            num_classes=num_classes) if name == 'ShuffleNetV2_1_5' else\
        ShuffleNetV2_2(
            num_classes=num_classes) if name == 'ShuffleNetV2_2' else\
        SqueezeNet_1(
            num_classes=num_classes) if name == 'SqueezeNet_1' else\
        SqueezeNet_1_1(
            num_classes=num_classes) if name == 'SqueezeNet_1_1' else\
        VGG11(
            num_classes=num_classes) if name == 'VGG11' else\
        VGG11_BN(
            num_classes=num_classes) if name == 'VGG11_BN' else\
        VGG13(
            num_classes=num_classes) if name == 'VGG13' else\
        VGG13_BN(
            num_classes=num_classes) if name == 'VGG13_BN' else\
        VGG16(
            num_classes=num_classes) if name == 'VGG16' else\
        VGG16_BN(
            num_classes=num_classes) if name == 'VGG16_BN' else\
        VGG19(
            num_classes=num_classes) if name == 'VGG19' else\
        VGG19_BN(
            num_classes=num_classes) if name == 'VGG19_BN' else \
        VGGCIFAR('VGG16',
                 num_classes=num_classes) if name == 'VGG16CIFAR' else \
        EfficientNetB4(
            num_classes=num_classes) if name == 'EfficientNetB4' else \
        EfficientNetB0CIFAR(
            num_classes=num_classes) if name == 'EfficientNetB0CIFAR' else\
        None
Esempio n. 21
0
    def select(self, model, path_fc=False, upsample='pixel'):
        if model == 'cnn':
            net = SimpleModel(
                in_shape=self.in_shape,
                activation=self.activation,
                num_classes=self.num_classes,
                filters=self.filters,
            )
        else:
            assert (self.dataset != 'MNIST' and self.dataset != 'Fashion-MNIST'
                    ), "Cannot use resnet or densenet for mnist style data"
            if model == 'resnet':
                assert self.resdepth in [
                    18, 34, 50, 101, 152
                ], "Non-standard and unsupported resnet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 18:
                    net = ResNet18()
                elif self.resdepth == 34:
                    net = ResNet34()
                elif self.resdepth == 50:
                    net = ResNet50()
                elif self.resdepth == 101:
                    net = ResNet101()
                else:
                    net = ResNet152()
            elif model == 'densenet':
                assert self.resdepth in [
                    121, 161, 169, 201
                ], "Non-standard and unsupported densenet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 121:
                    net = DenseNet121()
                elif self.resdepth == 161:
                    net = DenseNet161()
                elif self.resdepth == 169:
                    net = DenseNet169()
                else:
                    net = DenseNet201()
            elif model == 'preact_resnet':
                assert self.resdepth in [
                    10, 18, 34, 50, 101, 152
                ], "Non-standard and unsupported preact resnet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 10:
                    net = PreActResNet10(path_fc=path_fc,
                                         num_classes=self.num_classes,
                                         upsample=upsample)
                elif self.resdepth == 18:
                    net = PreActResNet18()
                elif self.resdepth == 34:
                    net = PreActResNet34()
                elif self.resdepth == 50:
                    net = PreActResNet50()
                elif self.resdepth == 101:
                    net = PreActResNet101()
                else:
                    net = PreActResNet152()
            elif model == 'wresnet':
                assert (
                    (self.resdepth - 4) % 6 == 0
                ), "Wideresnet depth of {} not supported, must fulfill: (depth - 4) % 6 = 0".format(
                    self.resdepth)
                net = WideResNet(depth=self.resdepth,
                                 num_classes=self.num_classes,
                                 widen_factor=self.widen_factor)

        return net
Esempio n. 22
0
if args.resume:
    # Load checkpoint
    print("==> Resuming from checkpoint..")
    assert os.path.isdir("checkpoint"), "Error: no checkpoint directory found!"
    checkpoint = torch.load('./checkpoint/ckpt.t7' + args.name + '_'
        + str(args.seed))

    net = checkpoint['net']
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch'] + 1
    rng_state = checkpoint['rng_state']
    torch.set_rng_state(rng_state)
else:
    print("==> Building model..")
    net = ResNet18(n_classes=n_classes)

if not os.path.isdir("results"):
    os.mkdir("results")

logname = ('results/log_' + net.__class__.__name__ + '_' + args.name + '_'
           + str(args.seed) + '.csv')

net = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.decay)


def train(epoch, update=True, topk=(1,)):
    print("\nEpoch: %d" % epoch)
    net.train()
    train_transform.transforms.append(
        Cutout(n_holes=args.n_holes, length=args.length))

test_transform = transforms.Compose([transforms.ToTensor(), normalize])

if args.dataset == 'cifar10':
    num_classes = 10
    train_loader, val_loader, test_loader = data_loaders.load_cifar10(
        args.batch_size, val_split=True, augmentation=args.data_augmentation)
elif args.dataset == 'cifar100':
    num_classes = 100
    train_loader, val_loader, test_loader = data_loaders.load_cifar100(
        args.batch_size, val_split=True, augmentation=args.data_augmentation)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28,
                     num_classes=num_classes,
                     widen_factor=10,
                     dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()

if args.optimizer == 'sgdm':
    cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                    lr=args.lr,
                                    momentum=0.9,
                                    nesterov=True,
                                    weight_decay=args.wdecay)
Esempio n. 24
0
    donor_net = donor_creator()
    load_model(donor_net, donor_path)

    train_loader, test_loader, train_loader_raw = transfer_loader(
        batch_size=batch_size)
    output_path = 'models/' + net_name + '_' + donor_name + '_hint_optimized_' + transfer_name + '.model'
    results_path = 'results/' + net_name + '_' + donor_name + '_hint_optimized_' + '_' + transfer_name + '.pickle'
    perform_transfer_knowledge(net,
                               donor_net,
                               transfer_loader=train_loader,
                               transfer_method='hint_optimized',
                               output_path=output_path,
                               iters=[iters],
                               learning_rates=[0.0001])
    evaluate_model_retrieval(net=Cifar_Tiny(num_classes=10),
                             path=output_path,
                             result_path=results_path)


if __name__ == '__main__':
    evaluate_kt_methods(lambda: Cifar_Tiny(10),
                        lambda: ResNet18(num_classes=10),
                        'models/resnet18_cifar10.model',
                        cifar10_loader,
                        batch_size=128,
                        donor_name='resnet18_cifar10',
                        transfer_name='cifar10',
                        iters=20,
                        net_name='cifar_tiny',
                        init_model_path='models/tiny_cifar10.model')
Esempio n. 25
0
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              num_workers=4,
                              shuffle=True)
    val_dataset = Pet_Dataset(image_path,
                              csv_path,
                              phase="val",
                              transform=data_transform["val"])
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            num_workers=4,
                            shuffle=False)
    data_loader = {"train": train_loader, "val": val_loader}

    if cfg.model.type == "resnet18":
        model = ResNet18(label_number=37)
    if os.path.exists(cfg.model.weights):
        model.load_state_dict(torch.load(cfg.model.weights))
    # model = models.resnet18(num_classes=37)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=base_lr,
                           weight_decay=weight_decay)

    model, hist = train_model(model,
                              data_loader,
                              criterion=criterion,
                              optimizer=optimizer,
                              num_epochs=num_epochs)
Esempio n. 26
0
for trial in range(args.trial):
    ##########################################
    # set up subsampled cifar10 train loader
    ##########################################
    trainsubset = get_subsample_dataset_label_noise(trainset,
                                                    permute_index[trial],
                                                    noise_size=args.noise_size)
    trainloader = torch.utils.data.DataLoader(trainsubset,
                                              batch_size=128,
                                              shuffle=True)

    ##########################################
    # set up model and optimizer
    ##########################################
    if args.arch == 'resnet18':
        net = ResNet18(width=args.width).cuda()
    elif args.arch == 'resnet34':
        net = ResNet34(width=args.width).cuda()
    elif args.arch == 'resnet50':
        net = ResNet50(width=args.width).cuda()
    elif args.arch == 'resnext':
        net = ResNeXt29(width=args.width).cuda()
    elif args.arch == 'resnext_1d':
        net = ResNeXt29_1d(width=args.width).cuda()
    elif args.arch == 'vgg':
        net = VGG11(width=args.width).cuda()
    elif args.arch == 'resnet26_bottle':
        net = ResNet26_bottle(width=args.width).cuda()
    elif args.arch == 'resnet38_bottle':
        net = ResNet38_bottle(width=args.width).cuda()
    elif args.arch == 'resnet50_bottle':
Esempio n. 27
0
    def __init__(self, base_model, out_dim):
        super(ResNetSimCLR, self).__init__()
        self.resnet_dict = {"resnet18": ResNet18(num_classes=out_dim),
                            "resnet50": ResNet50(num_classes=out_dim)}

        self.backbone = self._get_basemodel(base_model)
Esempio n. 28
0
def main_attack(args):
    # Use CUDA
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    use_cuda = torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'

    # Random seed
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.seed)

    # Data
    print(f'==> Preparing dataset {args.dataset}')
    if args.dataset in ['cifar10', 'cifar100']:
        detph = 28
        widen_factor = 10
        dropout = 0.3
        transform_test = transforms.Compose([transforms.ToTensor()])

    elif args.dataset == 'tiny-imagenet':
        transform_test = transforms.Compose([transforms.ToTensor()])

    elif args.dataset == 'imagenet':
        transform_test = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor()
        ])
    elif args.dataset == 'mnist':
        transform_test = transforms.Compose([transforms.ToTensor()])
    elif args.dataset == 'SVHN':
        detph = 16
        widen_factor = 4
        dropout = 0.4
        transform_train = transforms.Compose(
            [transforms.RandomCrop(32, padding=4),
             transforms.ToTensor()])
        transform_test = transforms.Compose([transforms.ToTensor()])

    print(f'Running on dataset {args.dataset}')
    if args.dataset in ['cifar10', 'cifar100', 'mnist']:
        if args.dataset == 'cifar10':
            dataloader = datasets.CIFAR10
            num_classes = 10
        elif args.dataset == 'cifar100':
            dataloader = datasets.CIFAR100
            num_classes = 100
        elif args.dataset == 'mnist':
            dataloader = datasets.MNIST
            num_classes = 10

        testset = dataloader(root='.data',
                             train=False,
                             download=False,
                             transform=transform_test)

    elif args.dataset == 'tiny-imagenet':
        testset = datasets.ImageFolder('tiny-imagenet-200/val',
                                       transform=transform_test)
        num_classes = 200

    elif args.dataset == 'imagenet':
        testset = datasets.ImageFolder('imagenet/val',
                                       transform=transform_test)
        num_classes = 1000

    elif args.dataset == 'SVHN':
        testset = datasets.SVHN('data',
                                split='test',
                                transform=transform_test,
                                download=True)
        num_classes = 10

    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)
    # Model
    if args.arch == 'vgg16':
        model = VGG16(dataset=args.dataset,
                      num_classes=num_classes,
                      kernels1=args.kernels1,
                      kernels2=args.kernels2,
                      kernels3=args.kernels3,
                      orientations=args.orientations,
                      learn_theta=args.learn_theta)
    elif args.arch == 'resnet18':
        model = ResNet18(dataset=args.dataset,
                         num_classes=num_classes,
                         kernels1=args.kernels1,
                         kernels2=args.kernels2,
                         kernels3=args.kernels3,
                         orientations=args.orientations,
                         learn_theta=args.learn_theta)
    elif args.arch == 'madry':
        model = MadryNet(kernels1=args.kernels1,
                         kernels2=args.kernels2,
                         kernels3=args.kernels3,
                         orientations=args.orientations,
                         learn_theta=args.learn_theta)
    elif args.arch == 'lenet':
        model = LeNet(kernels1=args.kernels1,
                      kernels2=args.kernels2,
                      kernels3=args.kernels3,
                      orientations=args.orientations,
                      learn_theta=args.learn_theta)
    elif args.arch == 'alexnet':
        model = AlexNet(dataset=args.dataset,
                        num_classes=num_classes,
                        kernels1=args.kernels1,
                        kernels2=args.kernels2,
                        kernels3=args.kernels3,
                        orientations=args.orientations,
                        learn_theta=args.learn_theta)
    elif args.arch == 'wide-resnet':
        model = Wide_ResNet(dataset=args.dataset,
                            num_classes=num_classes,
                            kernels1=args.kernels1,
                            kernels2=args.kernels2,
                            kernels3=args.kernels3,
                            orientations=args.orientations,
                            learn_theta=args.learn_theta,
                            finetune=False,
                            depth=detph,
                            widen_factor=widen_factor,
                            dropout_rate=dropout,
                            use_7x7=args.use_7x7)

    print('Model:')
    print(model)

    if use_cuda:
        model = torch.nn.DataParallel(model).cuda()

    # Compute number of parameters and print them
    param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
    param_txt = f'    Total trainable params: {param_num:d}'
    print(param_txt)

    criterion = nn.CrossEntropyLoss()
    # Resume
    # Load checkpoint.
    print('==> Resuming from checkpoint...')
    checkpoint_filename = osp.join(args.checkpoint, 'model_best.pth.tar')
    assert osp.isfile(
        checkpoint_filename), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(checkpoint_filename)
    start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])

    print('\nEvaluation only')
    test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                               use_cuda)
    print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
    print(f'Running {args.attack} attack!')

    if args.attack == 'cw':
        c_vals = torch.logspace(start=-2, end=2, steps=9)
        for c in c_vals:
            print(f'Running attack with c = {c:5.3f}')
            attack_cw(model, testloader, device=device, c=c)
            print('\n')
    else:
        if args.dataset == 'mnist':
            epsilons = [.1, .2, .3, .4]
        else:
            epsilons = [2 / 255, 8 / 255, 16 / 255, .1]
        print(f'Epsilons are: {epsilons}')
        minimum = 0.
        maximum = 1.
        print(f'Images maxima: {maximum} -- minima: {minimum}')
        df = {
            'epsilons': [
                0.,
            ],
            'test_set_accs': [
                test_acc,
            ],
            'flip_rates': [
                0.,
            ],
        }
        for eps in epsilons:
            print(f'Running attack with epsilon = {eps:5.3f}')
            acc_test_set, flip_rate = attack_pgd(model,
                                                 testloader,
                                                 device=device,
                                                 minimum=minimum,
                                                 maximum=maximum,
                                                 eps=eps)
            df['epsilons'].append(eps)
            df['test_set_accs'].append(acc_test_set)
            df['flip_rates'].append(flip_rate)
            print('\n')
        df = pd.DataFrame.from_dict(df)
        print('Overall results: \n', df)
        filename = osp.join(args.checkpoint, 'attack_results.csv')
        df.to_csv(filename, index=False)
Esempio n. 29
0
                                            transform=transform_test,
                                            train=False,
                                            download=True)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           num_workers=4,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=128,
                                          num_workers=4,
                                          shuffle=False)

if args.network == 'sqnxt':
    net = SqNxt_23_1x(10, ODEBlock)
elif args.network == 'resnet':
    net = ResNet18(ODEBlock)

net.apply(conv_init)
print(net)
if is_use_cuda:
    net.cuda()  #to(device)
    net = nn.DataParallel(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)


def train(epoch):
    net.train()
Esempio n. 30
0
def main():
    #---数据加载+数据增强---
    train_set = CIFAR10Loader(root='./Data', train=True, transform=True)
    test_set = CIFAR10Loader(root='./Data', train=False, transform=False)
    # print("trainset: ", len(trainset))
    # print("testset: ", len(testset))
    train_loader = DataLoader(train_set,
                              batch_size=CFG.batch_size,
                              shuffle=True,
                              num_workers=2)
    test_loader = DataLoader(test_set,
                             batch_size=CFG.batch_size,
                             shuffle=False,
                             num_workers=2)

    #---网络模型定义及加载---
    model = ResNet18()
    # model = models.resnet18(pretrained=True)
    # model.fc = nn.Linear(in_features=512, out_features=10)
    # params = list(model.parameters())
    # print(model)
    #
    # for para in params[:-1]:
    #     para.requires_grad = False
    # print(len(params))
    # print(params)
    # model = Net1st()
    if CFG.resume:
        # pth_file = os.path.join(CFG.ckpt_dir, CFG.file_name)
        if os.path.exists(CFG.pth_file):
            logging.info("resume mode from {} ...".format(CFG.pth_file))
            ckpt = torch.load(CFG.pth_file)
            checkpoint = torch.load(CFG.pth_file)
            model.load_state_dict(checkpoint['model'])
            CFG.best_acc = checkpoint['acc']  # 对全局变量赋值
            CFG.resume_epoch = checkpoint['epoch'] + 1
            logging.info("resume from epoch {}, best acc is {} ...".format(
                CFG.resume_epoch, CFG.best_acc))

    #---训练环境配置---
    if CFG.use_gpu:
        device = "cuda" if torch.cuda.is_available() else "cpu"
    else:
        device = "cpu"

    if device == 'cuda' and torch.cuda.device_count() > 1:
        logging.info("train on {} gpus ...".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
        cudnn.benchmark = True
    model = model.to(device)  # to device

    #---优化器损失函数定义+学习率调整策略设置---
    optimizer = optim.SGD(model.parameters(),
                          lr=CFG.learning_rate,
                          momentum=0.9,
                          weight_decay=5e-4)
    # optimizer = optim.Adadelta(model.parameters(), lr=CFG.learning_rate)
    # optimizer = optim.Adam(model.parameters(), lr=CFG.learning_rate)
    criterion = nn.CrossEntropyLoss()  # 损失函数
    # scheduler = ExponentialLR(optimizer, 0.9) #学习率调整策略
    scheduler = StepLR(optimizer, 100, gamma=0.1)

    #---train---
    for epoch in range(CFG.resume_epoch, CFG.resume_epoch + CFG.num_epoch):
        logging.info("epoch: {}, learning rate: {}".format(
            epoch, scheduler.get_lr()))
        # logging.info("epoch: {}".format(epoch))
        train(model,
              device,
              train_loader,
              optimizer,
              criterion,
              epoch,
              log_iter=CFG.log_iter)
        test(model, device, test_loader, criterion, epoch)
        scheduler.step()
        logging.info('best_acc: {}'.format(CFG.best_acc))

    logging.info('Finished Training !!!')