def construct_model(arch, dataset):
    if dataset in ["CIFAR-10", "CIFAR-100", "MNIST", "FashionMNIST"]:
        network = MetaLearnerModelBuilder.construct_cifar_model(arch, dataset)
    elif dataset == "TinyImageNet":
        network = MetaLearnerModelBuilder.construct_tiny_imagenet_model(arch, dataset)
    elif dataset == "ImageNet":
        network = MetaLearnerModelBuilder.construct_imagenet_model(arch, dataset)
    return network
def main_train_worker(args):
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
    print("=> creating model '{}'".format(args.arch))
    network = MetaLearnerModelBuilder.construct_cifar_model(args.arch, args.dataset)
    model_path = '{}/train_pytorch_model/real_image_model/{}@{}@epoch_{}@lr_{}@batch_{}.pth.tar'.format(
       PY_ROOT, args.dataset, args.arch, args.epochs, args.lr, args.batch_size)
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    print("after train, model will be saved to {}".format(model_path))
    network.cuda()
    image_classifier_loss = nn.CrossEntropyLoss().cuda()
    optimizer = RAdam(network.parameters(), args.lr, weight_decay=args.weight_decay)
    cudnn.benchmark = True
    train_loader = DataLoaderMaker.get_img_label_data_loader(args.dataset, args.batch_size, True)
    val_loader = DataLoaderMaker.get_img_label_data_loader(args.dataset, args.batch_size, False)

    for epoch in range(0, args.epochs):
        # adjust_learning_rate(optimizer, epoch, args)
        # train_simulate_grad_mode for one epoch
        train(train_loader, network, image_classifier_loss, optimizer, epoch, args)
        # evaluate_accuracy on validation set
        validate(val_loader, network, image_classifier_loss, args)
        # remember best acc@1 and save checkpoint
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': network.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, filename=model_path)
def load_models(dataset):
    archs = []
    model_path_list = []
    if dataset == "CIFAR-10" or dataset == "CIFAR-100":
        for arch in ["resnet-110","WRN-28-10","WRN-34-10","resnext-8x64d","resnext-16x64d"]:
            test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(
                PY_ROOT, dataset, arch)
            if os.path.exists(test_model_path):
                archs.append(arch)
                model_path_list.append(test_model_path)
            else:
                log.info(test_model_path + " does not exist!")
    elif dataset == "TinyImageNet":
        # for arch in ["vgg11_bn","resnet18","vgg16_bn","resnext64_4","densenet121"]:
        for arch in MODELS_TEST_STANDARD[dataset]:
            test_model_path = "{}/train_pytorch_model/real_image_model/{}@{}@*.pth.tar".format(
                PY_ROOT, dataset, arch)
            test_model_path = list(glob.glob(test_model_path))[0]
            if os.path.exists(test_model_path):
                archs.append(arch)
                model_path_list.append(test_model_path)
            else:
                log.info(test_model_path + "does not exist!")
    else:
        for arch in ["inceptionv3","inceptionv4", "inceptionresnetv2","resnet101", "resnet152"]:
            test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth".format(
                PY_ROOT, dataset, arch)
            test_model_path = list(glob.glob(test_model_list_path))
            if len(test_model_path) == 0:  # this arch does not exists in args.dataset
                continue
            archs.append(arch)
            model_path_list.append(test_model_path[0])
    models = []
    print("begin construct model")
    if dataset == "TinyImageNet":
        for idx, arch in enumerate(archs):
            model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(arch, dataset)
            model_path = model_path_list[idx]
            model.load_state_dict(torch.load(model_path, map_location=lambda storage, location: storage)["state_dict"])
            model.cuda()
            model.eval()
            models.append(model)
    else:
        for arch in archs:
            model = StandardModel(dataset, arch, no_grad=True)
            model.cuda()
            model.eval()
            models.append(model)
    print("end construct model")
    return models
Beispiel #4
0
 def get_arch(self, arch):
     if arch in self.arch_pool:
         return self.arch_pool[arch]
     if self.dataset in ["CIFAR-10", "MNIST", "FashionMNIST"]:
         model = MetaLearnerModelBuilder.construct_cifar_model(
             arch, self.dataset)
     elif self.dataset == "TinyImageNet":
         model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(
             arch, self.dataset)
     else:
         model = MetaLearnerModelBuilder.construct_imagenet_model(arch)
     model_load_path = "{}/train_pytorch_model/real_image_model/{}@{}@epoch_*@lr_*@batch_*.pth.tar".format(
         PY_ROOT, self.dataset, arch)
     model_load_path = glob.glob(model_load_path)[0]
     assert os.path.exists(model_load_path), model_load_path
     model.load_state_dict(
         torch.load(
             model_load_path,
             map_location=lambda storage, location: storage)["state_dict"])
     model.eval()
     model.cuda()
     self.arch_pool[arch] = model
     return model
def main_train_worker(args):
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
    print("=> creating model '{}'".format(args.arch))
    if args.dataset.startswith("CIFAR"):
        network = MetaLearnerModelBuilder.construct_cifar_model(args.arch, args.dataset)
    elif args.dataset == "TinyImageNet":
        network = get_tinyimagenet_model(args.arch, args.dataset)
    target_str = "untargeted" if not args.targeted else "targeted"
    model_path = '{}/train_pytorch_model/vanilla_simulator/{}@{}_norm_{}@{}@epoch_{}@lr_{}@batch_{}.pth.tar'.format(
       PY_ROOT, args.dataset, args.norm, target_str, args.arch, args.epochs, args.lr, args.batch_size)
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    print("after train, model will be saved to {}".format(model_path))
    network.cuda()
    mse_loss = nn.MSELoss().cuda()
    optimizer = Adam(network.parameters(), args.lr, weight_decay=args.weight_decay)
    cudnn.benchmark = True
    train_loader =  torch.utils.data.DataLoader(QueryLogitsDataset(args.dataset, args.norm,"cw", SPLIT_DATA_PROTOCOL.TRAIN_I_TEST_II,
                                                                    args.targeted, without_resnet=False
                                                                    ), batch_size=args.batch_size//2,
                                                            shuffle=True,num_workers=5,drop_last=True)
    for epoch in range(0, args.epochs):
        # adjust_learning_rate(optimizer, epoch, args)
        # train_simulate_grad_mode for one epoch
        train(train_loader, network, mse_loss, optimizer, epoch, args)
        # evaluate_accuracy on validation set
        # validate(val_loader, network, image_classifier_loss, args)
        # remember best acc@1 and save checkpoint
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'norm': args.norm,
            'targeted':args.targeted,
            'state_dict': network.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, filename=model_path)
    os.environ['CUDA_VISIBLE_DEVICE'] = str(args.gpu)
    os.environ[
        "TORCH_HOME"] = "/home1/machen/meta_perturbations_black_box_attack/train_pytorch_model/real_image_model/ImageNet-pretrained"

    pattern = re.compile("(.*?)@(.*?)@.*tar")
    set_log_file(args.dir_path + "/check_{}.log".format(args.dataset))
    log.info("using GPU {}".format(args.gpu))
    log.info(args.dir_path + "/{}*.tar".format(args.dataset))

    for abs_path in glob.glob(args.dir_path + "/{}*.tar".format(args.dataset)):
        f = os.path.basename(abs_path)
        ma = pattern.match(f)
        dataset = args.dataset
        arch = ma.group(2)
        if dataset in ["CIFAR-10", "CIFAR-100", "MNIST", "FashionMNIST"]:
            model = MetaLearnerModelBuilder.construct_cifar_model(
                arch, dataset)
        elif dataset == "TinyImageNet":
            model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(
                arch, dataset)
        elif dataset == "ImageNet":
            if arch not in pretrainedmodels.__dict__:
                print("arch {} not in pretrained models".format(arch))
                continue
            model = MetaLearnerModelBuilder.construct_imagenet_model(
                arch, dataset)
        if dataset != "ImageNet":
            model.load_state_dict(
                torch.load(abs_path,
                           map_location=lambda storage, location: storage)
                ["state_dict"])
        model.cuda()