print_args(args)
    defaults = json.load(open(args.json_config))[args.dataset]
    arg_vars = vars(args)
    arg_vars = {k: arg_vars[k] for k in arg_vars if arg_vars[k] is not None}
    defaults.update(arg_vars)
    args = SimpleNamespace(**defaults)
    if args.norm == "linf":
        args.epsilon = defaults["linf_epsilon"]
    args.surrogate_arch = "resnet-110" if args.dataset.startswith(
        "CIFAR") else "resnet101"
    surrogate_model = StandardModel(args.dataset, args.surrogate_arch, False)
    trn_data_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.batch_size, is_train=True)  # 生成的是训练集而非测试集
    archs = []
    for arch in MODELS_TRAIN_STANDARD[args.dataset]:
        if StandardModel.check_arch(arch, args.dataset):
            archs.append(arch)
    print("It will be use {} architectures".format(",".join(archs)))
    model_to_data = partition_dataset(archs, trn_data_loader,
                                      args.total_images)
    for arch in archs:
        model = StandardModel(args.dataset, arch, True)
        attacker = PriorRGFAttack(args.dataset, model, surrogate_model,
                                  args.targeted, args.target_type)
        log.info("Begin attack {}".format(arch))
        with torch.no_grad():
            attacker.attack_dataset(args, model_to_data, arch, save_dir_path)
        model.cpu()
        log.info("Attack {} with surrogate model {} done!".format(
            arch, args.surrogate_arch))
def main():
    parser = argparse.ArgumentParser(
        description='Square Attack Hyperparameters.')
    parser.add_argument('--norm',
                        type=str,
                        required=True,
                        choices=['l2', 'linf'])
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument(
        '--gpu',
        type=str,
        required=True,
        help='GPU number. Multiple GPUs are possible for PT models.')
    parser.add_argument(
        '--p',
        type=float,
        default=0.05,
        help=
        'Probability of changing a coordinate. Note: check the paper for the best values. '
        'Linf standard: 0.05, L2 standard: 0.1. But robust models require higher p.'
    )
    parser.add_argument('--epsilon', type=float, help='Radius of the Lp ball.')
    parser.add_argument('--max_queries', type=int, default=1000)
    parser.add_argument(
        '--json-config',
        type=str,
        default=
        '/home1/machen/meta_perturbations_black_box_attack/configures/square_attack_conf.json',
        help='a configures file to be passed in instead of arguments')
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--targeted', action="store_true")
    parser.add_argument('--target_type',
                        type=str,
                        default='random',
                        choices=['random', 'least_likely', "increment"])
    parser.add_argument('--loss', type=str)

    args = parser.parse_args()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.json_config:
        # If a json file is given, use the JSON file as the base, and then update it with args
        defaults = json.load(open(args.json_config))[args.dataset][args.norm]
        arg_vars = vars(args)
        arg_vars = {
            k: arg_vars[k]
            for k in arg_vars if arg_vars[k] is not None
        }
        defaults.update(arg_vars)
        args = SimpleNamespace(**defaults)

    if args.targeted and args.dataset == "ImageNet":
        args.max_queries = 10000

    save_dir_path = "{}/data_square_attack/{}/{}".format(
        PY_ROOT, args.dataset,
        "targeted_attack" if args.targeted else "untargeted_attack")
    os.makedirs(save_dir_path, exist_ok=True)
    loss_type = "cw" if not args.targeted else "xent"
    args.loss = loss_type
    log_path = osp.join(
        save_dir_path,
        get_log_path(args.dataset, loss_type, args.norm, args.targeted,
                     args.target_type))

    set_log_file(log_path)

    log.info('Command line is: {}'.format(' '.join(sys.argv)))
    log.info("Log file is written in {}".format(log_path))
    log.info('Called with args:')
    print_args(args)
    trn_data_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.batch_size, is_train=True)
    models = []
    for arch in MODELS_TRAIN_STANDARD[args.dataset]:
        if StandardModel.check_arch(arch, args.dataset):
            model = StandardModel(args.dataset, arch, True)
            model = model.eval()
            models.append({"arch_name": arch, "model": model})
    model_data_dict = defaultdict(list)
    for images, labels in trn_data_loader:
        model_info = random.choice(models)
        arch = model_info["arch_name"]
        model = model_info["model"]
        if images.size(-1) != model.input_size[-1]:
            images = F.interpolate(images,
                                   size=model.input_size[-1],
                                   mode='bilinear',
                                   align_corners=True)
        model_data_dict[(arch, model)].append((images, labels))

    log.info("Assign data to multiple models over!")
    attacker = SquareAttack(args.dataset,
                            args.targeted,
                            args.target_type,
                            args.epsilon,
                            args.norm,
                            max_queries=args.max_queries)
    attacker.attack_all_images(args, model_data_dict, save_dir_path)
    log.info("All done!")
    def __init__(self, tot_num_tasks, dataset, inner_batch_size, protocol):
        """
        Args:
            num_samples_per_class: num samples to generate "per class" in one batch
            batch_size: size of meta batch size (e.g. number of functions)
        """
        self.img_size = IMAGE_SIZE[dataset]
        self.dataset = dataset

        if protocol == SPLIT_DATA_PROTOCOL.TRAIN_I_TEST_II:
            self.model_names = MODELS_TRAIN_STANDARD[self.dataset]
        elif protocol == SPLIT_DATA_PROTOCOL.TRAIN_II_TEST_I:
            self.model_names = MODELS_TEST_STANDARD[self.dataset]
        elif protocol == SPLIT_DATA_PROTOCOL.TRAIN_ALL_TEST_ALL:
            self.model_names = MODELS_TRAIN_STANDARD[
                self.dataset] + MODELS_TEST_STANDARD[self.dataset]

        self.model_dict = {}
        for arch in self.model_names:
            if StandardModel.check_arch(arch, dataset):
                model = StandardModel(dataset, arch, no_grad=False).eval()
                if dataset != "ImageNet":
                    model = model.cuda()
                self.model_dict[arch] = model
        is_train = True
        preprocessor = DataLoaderMaker.get_preprocessor(
            IMAGE_SIZE[dataset], is_train)
        if dataset == "CIFAR-10":
            train_dataset = CIFAR10(IMAGE_DATA_ROOT[dataset],
                                    train=is_train,
                                    transform=preprocessor)
        elif dataset == "CIFAR-100":
            train_dataset = CIFAR100(IMAGE_DATA_ROOT[dataset],
                                     train=is_train,
                                     transform=preprocessor)
        elif dataset == "MNIST":
            train_dataset = MNIST(IMAGE_DATA_ROOT[dataset],
                                  train=is_train,
                                  transform=preprocessor)
        elif dataset == "FashionMNIST":
            train_dataset = FashionMNIST(IMAGE_DATA_ROOT[dataset],
                                         train=is_train,
                                         transform=preprocessor)
        elif dataset == "TinyImageNet":
            train_dataset = TinyImageNet(IMAGE_DATA_ROOT[dataset],
                                         preprocessor,
                                         train=is_train)
        elif dataset == "ImageNet":
            preprocessor = DataLoaderMaker.get_preprocessor(
                IMAGE_SIZE[dataset], is_train, center_crop=True)
            sub_folder = "/train" if is_train else "/validation"  # Note that ImageNet uses pretrainedmodels.utils.TransformImage to apply transformation
            train_dataset = ImageFolder(IMAGE_DATA_ROOT[dataset] + sub_folder,
                                        transform=preprocessor)
        self.train_dataset = train_dataset
        self.total_num_images = len(train_dataset)
        self.all_tasks = dict()
        all_images_indexes = np.arange(self.total_num_images).tolist()
        for i in range(tot_num_tasks):
            self.all_tasks[i] = {
                "image": random.sample(all_images_indexes, inner_batch_size),
                "arch": random.choice(list(self.model_dict.keys()))
            }