Exemplo n.º 1
0
def main_train_worker(args):
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
    print("=> creating model '{}'".format(args.arch))
    network = MetaLearnerModelBuilder.construct_cifar_model(args.arch, args.dataset)
    model_path = '{}/train_pytorch_model/real_image_model/{}@{}@epoch_{}@lr_{}@batch_{}.pth.tar'.format(
       PY_ROOT, args.dataset, args.arch, args.epochs, args.lr, args.batch_size)
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    print("after train, model will be saved to {}".format(model_path))
    network.cuda()
    image_classifier_loss = nn.CrossEntropyLoss().cuda()
    optimizer = RAdam(network.parameters(), args.lr, weight_decay=args.weight_decay)
    cudnn.benchmark = True
    train_loader = DataLoaderMaker.get_img_label_data_loader(args.dataset, args.batch_size, True)
    val_loader = DataLoaderMaker.get_img_label_data_loader(args.dataset, args.batch_size, False)

    for epoch in range(0, args.epochs):
        # adjust_learning_rate(optimizer, epoch, args)
        # train_simulate_grad_mode for one epoch
        train(train_loader, network, image_classifier_loss, optimizer, epoch, args)
        # evaluate_accuracy on validation set
        validate(val_loader, network, image_classifier_loss, args)
        # remember best acc@1 and save checkpoint
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': network.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, filename=model_path)
Exemplo n.º 2
0
    def __init__(self, args, directions_generator):
        self.rank_transform = not args.no_rank_transform
        self.random_mask = args.random_mask

        self.image_split = args.image_split
        self.sub_num_sample = args.sub_num_sample
        self.sigma = args.sigma
        self.starting_eps = args.starting_eps
        self.eps = args.epsilon
        self.sample_per_draw = args.sample_per_draw
        self.directions_generator = directions_generator
        self.max_iter = args.max_queries
        self.delta_eps = args.delta_eps
        self.max_lr = args.max_lr
        self.min_lr = args.min_lr
        self.targeted = args.targeted
        self.norm = args.norm

        self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
            args.dataset, 1)
        self.total_images = len(self.dataset_loader.dataset)
        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(
            self.query_all
        )  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
        self.not_done_prob_all = torch.zeros_like(self.query_all)
        self.dataset_name = args.dataset
 def __init__(self, dataset_name, model, surrogate_model, meta_model,
              targeted, target_type, meta_predict_steps, finetune_times,
              finetune_lr):
     self.dataset_name = dataset_name
     self.data_loader = DataLoaderMaker.get_test_attacked_data(
         args.dataset, 1)
     self.image_height = IMAGE_SIZE[self.dataset_name][0]
     self.image_width = IMAGE_SIZE[self.dataset_name][1]
     self.in_channels = IN_CHANNELS[self.dataset_name]
     self.model = model
     self.surrogate_model = surrogate_model
     self.model.cuda().eval()
     self.surrogate_model.cuda().eval()
     self.targeted = targeted  # only support untargeted attack now
     self.target_type = target_type
     self.clip_min = 0.0
     self.clip_max = 1.0
     self.meta_predict_steps = meta_predict_steps
     self.finetune_times = finetune_times
     self.meta_model_for_q1 = meta_model
     self.meta_model_for_q2 = copy.deepcopy(meta_model)
     self.finetune_lr = finetune_lr
     self.pretrained_meta_weights = self.meta_model_for_q1.state_dict(
     ).copy()
     self.meta_optimizer_q1 = Adam(self.meta_model_for_q1.parameters(),
                                   lr=self.finetune_lr)
     self.meta_optimizer_q2 = Adam(self.meta_model_for_q2.parameters(),
                                   lr=self.finetune_lr)
     self.mse_loss = nn.MSELoss(reduction="mean")
    def __init__(self, dataset, batch_size, targeted, target_type, epsilon, norm, lower_bound=0.0, upper_bound=1.0,
                 max_queries=10000):
        """
            :param epsilon: perturbation limit according to lp-ball
            :param norm: norm for the lp-ball constraint
            :param lower_bound: minimum value data point can take in any coordinate
            :param upper_bound: maximum value data point can take in any coordinate
            :param max_queries: max number of calls to model per data point
            :param max_crit_queries: max number of calls to early stopping criterion  per data poinr
        """
        assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
        self.epsilon = epsilon
        self.norm = norm
        self.max_queries = max_queries

        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self._proj = None
        self.is_new_batch = False
        # self.early_stop_crit_fct = lambda model, x, y: 1 - model(x).max(1)[1].eq(y)
        self.targeted = targeted
        self.target_type = target_type

        self.dataset_loader = DataLoaderMaker.get_test_attacked_data(dataset, batch_size)
        self.total_images = len(self.dataset_loader.dataset)

        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(self.query_all)  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
        self.not_done_prob_all = torch.zeros_like(self.query_all)

        self.lowest_change_ratio = 0.05 # the percentage of pixels that changes
Exemplo n.º 5
0
 def __init__(self,
              dataset,
              batch_size,
              targeted,
              target_type,
              epsilon,
              norm,
              lower_bound=0.0,
              upper_bound=1.0,
              max_queries=10000):
     assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
     self.epsilon = epsilon
     self.norm = norm
     self.max_queries = max_queries
     self.lower_bound = lower_bound
     self.upper_bound = upper_bound
     self.targeted = targeted
     self.target_type = target_type
     self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
         dataset, batch_size)
     self.total_images = len(self.dataset_loader.dataset)
     self.query_all = torch.zeros(self.total_images)
     self.correct_all = torch.zeros_like(self.query_all)  # number of images
     self.not_done_all = torch.zeros_like(
         self.query_all
     )  # always set to 0 if the original image is misclassified
     self.success_all = torch.zeros_like(self.query_all)
     self.success_query_all = torch.zeros_like(self.query_all)
     self.not_done_prob_all = torch.zeros_like(self.query_all)
Exemplo n.º 6
0
    def __init__(self, args, dataset, targeted, target_type, epsilon, norm, lower_bound=0.0, upper_bound=1.0,
                 max_queries=10000):
        """
            :param epsilon: perturbation limit according to lp-ball
            :param norm: norm for the lp-ball constraint
            :param lower_bound: minimum value data point can take in any coordinate
            :param upper_bound: maximum value data point can take in any coordinate
            :param max_queries: max number of calls to model per data point
            :param max_crit_queries: max number of calls to early stopping criterion  per data poinr
        """
        assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
        self.epsilon = epsilon
        self.norm = norm
        self.max_queries = max_queries

        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        # self.early_stop_crit_fct = lambda model, x, y: 1 - model(x).max(1)[1].eq(y)
        self.targeted = targeted
        self.target_type = target_type

        self.data_loader = DataLoaderMaker.get_test_attacked_data(dataset, args.batch_size)
        self.total_images = len(self.data_loader.dataset)
        self.att_iter = args.max_queries
        self.correct_all = torch.zeros(self.total_images)  # number of images
        self.not_done_all = torch.zeros(self.total_images)  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros(self.total_images)
        self.not_done_prob_all = torch.zeros(self.total_images)
        self.stop_iter_all = torch.zeros(self.total_images)
        self.ord =  args.norm # linf, l1, l2
        self.clip_min = args.clip_min
        self.clip_max = args.clip_max
        self.lr = args.lr
        self.beta1 = args.beta1
        self.loss_fn = nn.CrossEntropyLoss().cuda()
 def __init__(self, dataset, batch_size, targeted, target_type, epsilon, norm, lower_bound=0.0, upper_bound=1.0,
              max_queries=10000, surrogate_model_names=None):
     assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
     self.epsilon = epsilon
     self.norm = norm
     self.max_queries = max_queries
     self.lower_bound = lower_bound
     self.upper_bound = upper_bound
     self.targeted = targeted
     self.target_type = target_type
     self.dataset_loader = DataLoaderMaker.get_test_attacked_data(dataset, batch_size)
     self.total_images = len(self.dataset_loader.dataset)
     self.query_all = torch.zeros(self.total_images)
     self.correct_all = torch.zeros_like(self.query_all)  # number of images
     self.not_done_all = torch.zeros_like(self.query_all)  # always set to 0 if the original image is misclassified
     self.success_all = torch.zeros_like(self.query_all)
     self.success_query_all = torch.zeros_like(self.query_all)
     self.not_done_prob_all = torch.zeros_like(self.query_all)
     # self.cos_similarity_all = torch.zeros(self.total_images, max_queries)   # N, T
     self.increase_loss_from_last_iter_with_1st_model_grad_record_all = OrderedDict()
     self.increase_loss_from_last_iter_with_2nd_model_grad_record_all = OrderedDict()
     self.increase_loss_from_last_iter_after_switch_record_all = OrderedDict()
     self.surrogate_model_record_all = OrderedDict()
     self.loss_x_pos_temp_record_all = OrderedDict()
     self.loss_x_neg_temp_record_all = OrderedDict()
     self.loss_after_switch_grad_record_all = OrderedDict()
     self.surrogate_model_names = surrogate_model_names
Exemplo n.º 8
0
    def __init__(self,
                 dataset,
                 targeted,
                 target_type,
                 epsilon,
                 norm,
                 batch_size,
                 lower_bound=0.0,
                 upper_bound=1.0,
                 max_queries=10000,
                 max_crit_queries=np.inf):
        """
            :param epsilon: perturbation limit according to lp-ball
            :param norm: norm for the lp-ball constraint
            :param lower_bound: minimum value data point can take in any coordinate
            :param upper_bound: maximum value data point can take in any coordinate
            :param max_queries: max number of calls to model per data point
            :param max_crit_queries: max number of calls to early stopping criterion  per data poinr
        """
        assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
        assert not (np.isinf(max_queries) and np.isinf(max_crit_queries)
                    ), "one of the budgets has to be finite!"
        self.epsilon = epsilon
        self.norm = norm
        self.max_queries = max_queries
        self.max_crit_queries = max_crit_queries

        self.best_est_deriv = None
        self.xo_t = None
        self.sgn_t = None
        self.h = np.zeros(batch_size).astype(np.int32)
        self.i = np.zeros(batch_size).astype(np.int32)
        self.exhausted = [False for _ in range(batch_size)]

        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self._proj = None
        self.is_new_batch = False
        # self.early_stop_crit_fct = lambda model, x, y: 1 - model(x).max(1)[1].eq(y)
        self.targeted = targeted
        self.target_type = target_type

        self.data_loader = DataLoaderMaker.get_test_attacked_data(
            dataset, args.batch_size)
        self.total_images = len(self.data_loader.dataset)
        self.image_height = IMAGE_SIZE[dataset][0]
        self.image_width = IMAGE_SIZE[dataset][1]
        self.in_channels = IN_CHANNELS[dataset]

        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(
            self.query_all
        )  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
        self.not_done_prob_all = torch.zeros_like(self.query_all)
Exemplo n.º 9
0
 def __init__(self, args):
     self.dataset_loader = DataLoaderMaker.get_test_attacked_data(args.dataset, args.batch_size)
     self.total_images = len(self.dataset_loader.dataset)
     self.query_all = torch.zeros(self.total_images)
     self.correct_all = torch.zeros_like(self.query_all)  # number of images
     self.not_done_all = torch.zeros_like(self.query_all)  # always set to 0 if the original image is misclassified
     self.success_all = torch.zeros_like(self.query_all)
     self.success_query_all = torch.zeros_like(self.query_all)
     self.not_done_loss_all = torch.zeros_like(self.query_all)
     self.not_done_prob_all = torch.zeros_like(self.query_all)
Exemplo n.º 10
0
def main_train_worker(args):
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
    if args.dataset.startswith("CIFAR"):
        compress_mode = 2
        use_tanh = False
        resize = None
        img_size = 32
    if args.dataset == "ImageNet":
        compress_mode = 3
        use_tanh = True
        resize = 128
        img_size = 299
    elif args.dataset in ["MNIST", "FashionMNIST"]:
        compress_mode = 1
        use_tanh = False
        resize = None
        img_size = 28
    network = Codec(img_size,
                    IN_CHANNELS[args.dataset],
                    compress_mode,
                    resize=resize,
                    use_tanh=use_tanh)
    model_path = '{}/train_pytorch_model/AutoZOOM/AutoEncoder_{}@compress_{}@use_tanh_{}@epoch_{}@lr_{}@batch_{}.pth.tar'.format(
        PY_ROOT, args.dataset, compress_mode, use_tanh, args.epochs, args.lr,
        args.batch_size)
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    print("Model will be saved to {}".format(model_path))
    network.cuda()
    mse_loss_fn = nn.MSELoss().cuda()
    optimizer = RAdam(network.parameters(),
                      args.lr,
                      weight_decay=args.weight_decay)
    cudnn.benchmark = True
    train_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.batch_size, True, (img_size, img_size))
    # val_loader = DataLoaderMaker.get_img_label_data_loader(args.dataset, args.batch_size, False)

    for epoch in range(0, args.epochs):
        # adjust_learning_rate(optimizer, epoch, args)
        # train_simulate_grad_mode for one epoch
        train(train_loader, network, mse_loss_fn, optimizer, epoch, args,
              use_tanh)
        # evaluate_accuracy on validation set
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'encoder': network.encoder.state_dict(),
                'decoder': network.decoder.state_dict(),
                "compress_mode": compress_mode,
                "use_tanh": use_tanh,
                'optimizer': optimizer.state_dict(),
            },
            filename=model_path)
Exemplo n.º 11
0
    def __init__(self,
                 dataset,
                 batch_size,
                 pixel_attack,
                 freq_dims,
                 stride,
                 order,
                 max_iters,
                 targeted,
                 target_type,
                 norm,
                 pixel_epsilon,
                 l2_bound,
                 linf_bound,
                 lower_bound=0.0,
                 upper_bound=1.0):
        """
            :param pixel_epsilon: perturbation limit according to lp-ball
            :param norm: norm for the lp-ball constraint
            :param lower_bound: minimum value data point can take in any coordinate
            :param upper_bound: maximum value data point can take in any coordinate
            :param max_crit_queries: max number of calls to early stopping criterion  per data poinr
        """
        assert norm in ['linf', 'l2'], "{} is not supported".format(norm)
        self.pixel_epsilon = pixel_epsilon
        self.dataset = dataset
        self.norm = norm
        self.pixel_attack = pixel_attack
        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self.freq_dims = freq_dims
        self.stride = stride
        self.order = order
        self.linf_bound = linf_bound
        self.l2_bound = l2_bound
        # self.early_stop_crit_fct = lambda model, x, y: 1 - model(x).max(1)[1].eq(y)
        self.max_iters = max_iters
        self.targeted = targeted
        self.target_type = target_type

        self.data_loader = DataLoaderMaker.get_test_attacked_data(
            dataset, batch_size)
        self.total_images = len(self.data_loader.dataset)
        self.image_height = IMAGE_SIZE[dataset][0]
        self.image_width = IMAGE_SIZE[dataset][1]
        self.in_channels = IN_CHANNELS[dataset]

        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
Exemplo n.º 12
0
 def __init__(self, dataset_name, targeted):
     self.dataset_name = dataset_name
     self.num_classes = CLASS_NUM[self.dataset_name]
     self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
         dataset_name, 1)
     self.total_images = len(self.dataset_loader.dataset)
     self.targeted = targeted
     self.query_all = torch.zeros(self.total_images)
     self.correct_all = torch.zeros_like(self.query_all)  # number of images
     self.not_done_all = torch.zeros_like(
         self.query_all
     )  # always set to 0 if the original image is misclassified
     self.success_all = torch.zeros_like(self.query_all)
     self.success_query_all = torch.zeros_like(self.query_all)
Exemplo n.º 13
0
 def __init__(self, dataset_name, model, surrogate_model, targeted, target_type):
     self.dataset_name = dataset_name
     self.data_loader = DataLoaderMaker.get_test_attacked_data(args.dataset, 1)
     self.image_height = IMAGE_SIZE[self.dataset_name][0]
     self.image_width =IMAGE_SIZE[self.dataset_name][1]
     self.in_channels = IN_CHANNELS[self.dataset_name]
     self.model = model
     self.surrogate_model = surrogate_model
     self.model.cuda().eval()
     self.surrogate_model.cuda().eval()
     self.targeted = targeted # only support untargeted attack now
     self.target_type = target_type
     self.clip_min = 0.0
     self.clip_max = 1.0
Exemplo n.º 14
0
    def __init__(self, function, config, device):
        self.config = config
        self.batch_size = config['batch_size']
        self.function = function
        self.model = function.model
        self.device = device
        self.epsilon = self.config['epsilon']
        self.gp = attack_bayesian_EI.Attack(
            f=self,
            dim=4,
            max_evals=1000,
            verbose=True,
            use_ard=True,
            max_cholesky_size=2000,
            n_training_steps=30,
            device=device,
            dtype="float32",
        )
        self.query_limit = self.config['query_limit']
        self.max_iters = self.config['max_iters']
        self.init_iter = self.config["init_iter"]
        self.init_batch = self.config["init_batch"]
        self.memory_size = self.config["memory_size"]
        self.channels = self.config["channels"]
        self.image_height = self.config["image_height"]
        self.image_width = self.config["image_width"]
        self.gp_emptyX = torch.zeros((1, 4), device=device)
        self.gp_emptyfX = torch.zeros((1), device=device)
        self.local_forget_threshold = self.config['local_forget_threshold']
        self.lr = self.config['lr']

        self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
            args.dataset, args.batch_size)
        self.total_images = len(self.dataset_loader.dataset)
        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(
            self.query_all
        )  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
        self.maximum_queries = self.config["max_queries"]
Exemplo n.º 15
0
    def __init__(self,
                 pop_size=5,
                 generations=1000,
                 cross_rate=0.7,
                 mutation_rate=0.001,
                 max_queries=2000,
                 epsilon=8. / 255,
                 iters=10,
                 ensemble_models=None,
                 targeted=False):
        self.loss_fn = nn.CrossEntropyLoss()
        self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
            args.dataset, 1)
        self.total_images = len(self.dataset_loader.dataset)
        # parameters about evolution algorithm
        self.pop_size = pop_size
        self.generations = generations
        self.cross_rate = cross_rate
        self.mutation_rate = mutation_rate
        # parameters about attack
        self.epsilon = epsilon
        self.clip_min = 0
        self.clip_max = 1
        # ensemble MI-FGSM parameters, use ensemble MI-FGSM attack generate adv as initial population
        self.ensemble_models = ensemble_models
        self.iters = iters
        self.targeted = targeted
        self.max_queries = max_queries
        self.idx = np.random.choice(np.arange(self.pop_size),
                                    size=2,
                                    replace=False)
        self.is_change = np.zeros(self.pop_size)
        self.pop_fitness = np.zeros(self.pop_size)

        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(
            self.query_all
        )  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
Exemplo n.º 16
0
    def __init__(self, dataset_name, targeted):
        self.dataset_name = dataset_name
        self.num_classes = CLASS_NUM[self.dataset_name]
        self.dataset_loader = DataLoaderMaker.get_test_attacked_data(
            dataset_name, 1)

        log.info("label index dict data build begin")
        # if self.dataset_name == "TinyImageNet":
        #     self.candidate_loader = DataLoaderMaker.get_candidate_attacked_data(dataset_name, 1)
        #     self.dataset = self.candidate_loader.dataset
        # else:
        self.dataset = self.dataset_loader.dataset
        self.label_data_index_dict = self.get_label_dataset(self.dataset)
        log.info("label index dict data build over!")
        self.total_images = len(self.dataset_loader.dataset)
        self.targeted = targeted
        self.query_all = torch.zeros(self.total_images)
        self.correct_all = torch.zeros_like(self.query_all)  # number of images
        self.not_done_all = torch.zeros_like(
            self.query_all
        )  # always set to 0 if the original image is misclassified
        self.success_all = torch.zeros_like(self.query_all)
        self.success_query_all = torch.zeros_like(self.query_all)
def generate(datasetname, batch_size):
    save_dir_path = "{}/data_adv_defense/guided_denoiser".format(PY_ROOT)
    os.makedirs(save_dir_path, exist_ok=True)
    set_log_file(save_dir_path + "/generate_{}.log".format(datasetname))
    data_loader = DataLoaderMaker.get_img_label_data_loader(datasetname, batch_size, is_train=True)
    attackers = []
    for model_name in MODELS_TRAIN_STANDARD[datasetname] + MODELS_TEST_STANDARD[datasetname]:
        model = StandardModel(datasetname, model_name, no_grad=False)
        model = model.cuda().eval()
        linf_PGD_attack =LinfPGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.031372, nb_iter=30,
                      eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False)
        l2_PGD_attack = L2PGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"),eps=4.6,
                                    nb_iter=30,clip_min=0.0, clip_max=1.0, targeted=False)
        FGSM_attack = FGSM(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"))
        momentum_attack = MomentumIterativeAttack(model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.031372, nb_iter=30,
                      eps_iter=0.01, clip_min=0.0, clip_max=1.0, targeted=False)
        attackers.append(linf_PGD_attack)
        attackers.append(l2_PGD_attack)
        attackers.append(FGSM_attack)
        attackers.append(momentum_attack)
        log.info("Create model {} done!".format(model_name))

    generate_and_save_adv_examples(datasetname, data_loader, attackers, save_dir_path)
def main():
    parser = argparse.ArgumentParser(
        description='Square Attack Hyperparameters.')
    parser.add_argument('--norm',
                        type=str,
                        required=True,
                        choices=['l2', 'linf'])
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument(
        '--gpu',
        type=str,
        required=True,
        help='GPU number. Multiple GPUs are possible for PT models.')
    parser.add_argument(
        '--p',
        type=float,
        default=0.05,
        help=
        'Probability of changing a coordinate. Note: check the paper for the best values. '
        'Linf standard: 0.05, L2 standard: 0.1. But robust models require higher p.'
    )
    parser.add_argument('--epsilon', type=float, help='Radius of the Lp ball.')
    parser.add_argument('--max_queries', type=int, default=1000)
    parser.add_argument(
        '--json-config',
        type=str,
        default=
        '/home1/machen/meta_perturbations_black_box_attack/configures/square_attack_conf.json',
        help='a configures file to be passed in instead of arguments')
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--targeted', action="store_true")
    parser.add_argument('--target_type',
                        type=str,
                        default='random',
                        choices=['random', 'least_likely', "increment"])
    parser.add_argument('--loss', type=str)

    args = parser.parse_args()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.json_config:
        # If a json file is given, use the JSON file as the base, and then update it with args
        defaults = json.load(open(args.json_config))[args.dataset][args.norm]
        arg_vars = vars(args)
        arg_vars = {
            k: arg_vars[k]
            for k in arg_vars if arg_vars[k] is not None
        }
        defaults.update(arg_vars)
        args = SimpleNamespace(**defaults)

    if args.targeted and args.dataset == "ImageNet":
        args.max_queries = 10000

    save_dir_path = "{}/data_square_attack/{}/{}".format(
        PY_ROOT, args.dataset,
        "targeted_attack" if args.targeted else "untargeted_attack")
    os.makedirs(save_dir_path, exist_ok=True)
    loss_type = "cw" if not args.targeted else "xent"
    args.loss = loss_type
    log_path = osp.join(
        save_dir_path,
        get_log_path(args.dataset, loss_type, args.norm, args.targeted,
                     args.target_type))

    set_log_file(log_path)

    log.info('Command line is: {}'.format(' '.join(sys.argv)))
    log.info("Log file is written in {}".format(log_path))
    log.info('Called with args:')
    print_args(args)
    trn_data_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.batch_size, is_train=True)
    models = []
    for arch in MODELS_TRAIN_STANDARD[args.dataset]:
        if StandardModel.check_arch(arch, args.dataset):
            model = StandardModel(args.dataset, arch, True)
            model = model.eval()
            models.append({"arch_name": arch, "model": model})
    model_data_dict = defaultdict(list)
    for images, labels in trn_data_loader:
        model_info = random.choice(models)
        arch = model_info["arch_name"]
        model = model_info["model"]
        if images.size(-1) != model.input_size[-1]:
            images = F.interpolate(images,
                                   size=model.input_size[-1],
                                   mode='bilinear',
                                   align_corners=True)
        model_data_dict[(arch, model)].append((images, labels))

    log.info("Assign data to multiple models over!")
    attacker = SquareAttack(args.dataset,
                            args.targeted,
                            args.target_type,
                            args.epsilon,
                            args.norm,
                            max_queries=args.max_queries)
    attacker.attack_all_images(args, model_data_dict, save_dir_path)
    log.info("All done!")
def generate_attacked_dataset(dataset, num_sample, models):
    selected_images = []
    selected_images_big = []
    selected_true_labels = []
    selected_img_id = []
    total_count = 0
    data_loader = DataLoaderMaker.get_imgid_img_label_data_loader(dataset, 500, False, seed=1234)
    print("begin select")
    if dataset != "ImageNet":
        for image_id, images, labels in data_loader:
            images_gpu = images.cuda()
            pred_eq_true_label = []
            for model in models:
                with torch.no_grad():
                    logits = model(images_gpu)
                pred = logits.max(1)[1]
                correct = pred.detach().cpu().eq(labels).long()
                pred_eq_true_label.append(correct.detach().cpu().numpy())
            pred_eq_true_label = np.stack(pred_eq_true_label).astype(np.uint8) # M, B
            pred_eq_true_label = np.bitwise_and.reduce(pred_eq_true_label, axis=0)  # 1,0,1,1,1
            current_select_count = len(np.where(pred_eq_true_label)[0])
            total_count += current_select_count
            selected_image = images.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]]
            selected_images.append(selected_image)
            selected_true_labels.append(labels.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]])
            selected_img_id.append(image_id.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]])
            if total_count >= num_sample:
                break
    else:
        for image_id, images, images_big, labels in data_loader:
            images_gpu = images.cuda()
            pred_eq_true_label = []
            for model in models:
                with torch.no_grad():
                    logits = model(images_gpu)
                pred = logits.max(1)[1]
                correct = pred.detach().cpu().eq(labels).long()
                pred_eq_true_label.append(correct.detach().cpu().numpy())
            pred_eq_true_label = np.stack(pred_eq_true_label).astype(np.uint8) # M, B
            pred_eq_true_label = np.bitwise_and.reduce(pred_eq_true_label, axis=0)  # 1,0,1,1,1
            current_select_count = len(np.where(pred_eq_true_label)[0])
            total_count += current_select_count
            selected_image = images.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]]
            selected_images.append(selected_image)

            selected_image_big = images_big.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]]
            selected_images_big.append(selected_image_big)
            selected_true_labels.append(labels.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]])
            selected_img_id.append(image_id.detach().cpu().numpy()[np.where(pred_eq_true_label)[0]])
            if total_count >= num_sample:
                break
    selected_images = np.concatenate(selected_images, 0)
    if dataset=="ImageNet":
        selected_images_big = np.concatenate(selected_images_big, 0)
        selected_images_big = selected_images_big[:num_sample]
    selected_true_labels = np.concatenate(selected_true_labels, 0)
    selected_img_id = np.concatenate(selected_img_id, 0)

    selected_images = selected_images[:num_sample]
    selected_true_labels = selected_true_labels[:num_sample]
    selected_img_id = selected_img_id[:num_sample]
    return selected_images, selected_images_big, selected_true_labels, selected_img_id
def main():
    global args
    global state
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    model = FeatureDefenseModel(args.dataset, args.arch, no_grad=False)
    model = model.cuda()
    model_path = '{}/train_pytorch_model/adversarial_train/pl_loss/pcl_pgd_adv_train_{}@{}.pth.tar'.format(
        PY_ROOT, args.dataset, args.arch)

    set_log_file(
        os.path.dirname(model_path) +
        "/adv_train_{}_{}.log".format(args.dataset, args.arch))
    log.info('Command line is: {}'.format(' '.join(sys.argv)))
    log.info('Called with args:')
    print_args(args)
    log.info(
        "After trained over, the model will be saved to {}".format(model_path))
    train_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.train_batch, True)
    test_loader = DataLoaderMaker.get_img_label_data_loader(
        args.dataset, args.test_batch, False)
    use_gpu = torch.cuda.is_available()
    num_classes = CLASS_NUM[args.dataset]
    criterion_xent = nn.CrossEntropyLoss()
    criterion_prox_1024 = Proximity(num_classes=num_classes,
                                    feat_dim=1024,
                                    use_gpu=use_gpu)
    criterion_prox_256 = Proximity(num_classes=num_classes,
                                   feat_dim=256,
                                   use_gpu=use_gpu)

    criterion_conprox_1024 = Con_Proximity(num_classes=num_classes,
                                           feat_dim=1024,
                                           use_gpu=use_gpu)
    criterion_conprox_256 = Con_Proximity(num_classes=num_classes,
                                          feat_dim=256,
                                          use_gpu=use_gpu)

    optimizer_model = torch.optim.SGD(model.parameters(),
                                      lr=args.lr_model,
                                      weight_decay=1e-04,
                                      momentum=0.9)

    optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(),
                                          lr=args.lr_prox)
    optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(),
                                         lr=args.lr_prox)

    optimizer_conprox_1024 = torch.optim.SGD(
        criterion_conprox_1024.parameters(), lr=args.lr_conprox)
    optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(),
                                            lr=args.lr_conprox)

    softmax_model_path = '{}/train_pytorch_model/adversarial_train/pl_loss/benign_image_{}@{}.pth.tar'.format(
        PY_ROOT, args.dataset, args.arch)
    assert os.path.exists(softmax_model_path), "{} does not exist!".format(
        softmax_model_path)
    state_dict = torch.load(softmax_model_path,
                            map_location=lambda storage, location: storage)
    model.cnn.load_state_dict(state_dict["state_dict"])
    optimizer_model.load_state_dict(state_dict["optimizer"])
    log.info("Load softmax pretrained model from {} done".format(
        softmax_model_path))

    start_time = time.time()
    resume_epoch = 0
    if os.path.exists(model_path):
        state_dict = torch.load(model_path,
                                map_location=lambda storage, location: storage)
        resume_epoch = state_dict["epoch"]
        model.load_state_dict(state_dict["state_dict"])
        optimizer_model.load_state_dict(state_dict["optimizer_model"])
        optimizer_prox_1024.load_state_dict(state_dict["optimizer_prox_1024"])
        optimizer_prox_256.load_state_dict(state_dict["optimizer_prox_256"])
        optimizer_conprox_1024.load_state_dict(
            state_dict["optimizer_conprox_1024"])
        optimizer_conprox_256.load_state_dict(
            state_dict["optimizer_conprox_256"])
        log.info("Load model from {} (epoch:{})".format(
            model_path, resume_epoch))

    for epoch in range(resume_epoch, args.max_epoch):
        adjust_learning_rate(args, optimizer_model, epoch)
        adjust_learning_rate_prox(args, optimizer_prox_1024, epoch)
        adjust_learning_rate_prox(args, optimizer_prox_256, epoch)

        adjust_learning_rate_conprox(args, optimizer_conprox_1024, epoch)
        adjust_learning_rate_conprox(args, optimizer_conprox_256, epoch)
        train(args, model, criterion_xent, criterion_prox_1024,
              criterion_prox_256, criterion_conprox_1024,
              criterion_conprox_256, optimizer_model, optimizer_prox_1024,
              optimizer_prox_256, optimizer_conprox_1024,
              optimizer_conprox_256, train_loader, use_gpu, num_classes, epoch)
        if args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (
                epoch + 1) == args.max_epoch:
            log.info("==> Test")  # Tests after every 10 epochs
            acc, err = test(model, test_loader)
            log.info("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

        state_ = {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer_model': optimizer_model.state_dict(),
            'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
            'optimizer_prox_256': optimizer_prox_256.state_dict(),
            'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
            'optimizer_conprox_256': optimizer_conprox_256.state_dict(),
        }

        torch.save(state_, model_path)
        elapsed = round(time.time() - start_time)
        elapsed = str(datetime.timedelta(seconds=elapsed))
        log.info("Finished epoch {}. Total elapsed time (h:m:s): {}".format(
            epoch + 1, elapsed))
Exemplo n.º 21
0
             test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth".format(
                 PY_ROOT, args.dataset, arch)
             test_model_list_path = list(glob.glob(test_model_list_path))
             if len(test_model_list_path
                    ) == 0:  # this arch does not exists in args.dataset
                 continue
             archs.append(arch)
 else:
     assert args.arch is not None
     archs = [args.arch]
 args.arch = ", ".join(archs)
 log.info('Command line is: {}'.format(' '.join(sys.argv)))
 log.info("Log file is written in {}".format(log_file_path))
 log.info('Called with args:')
 print_args(args)
 data_loader = DataLoaderMaker.get_test_attacked_data(args.dataset, 1)
 for arch in archs:
     if args.attack_defense:
         save_result_path = args.exp_dir + "/{}_{}_result.json".format(
             arch, args.defense_model)
     else:
         save_result_path = args.exp_dir + "/{}_result.json".format(arch)
     if os.path.exists(save_result_path):
         continue
     log.info("Begin attack {} on {}, result will be saved to {}".format(
         arch, args.dataset, save_result_path))
     if args.attack_defense:
         model = DefensiveModel(args.dataset,
                                arch,
                                no_grad=True,
                                defense_model=args.defense_model)
Exemplo n.º 22
0
     save_name = "train_pytorch_model/TREMBA/{}_{}_generator.pth.tar".format(
         args.dataset, targeted_str)
 else:
     save_name = "train_pytorch_model/TREMBA/{}_{}_generator.pth.tar".format(
         args.dataset, targeted_str)
 set_log_file(
     os.path.dirname(save_name) +
     "/train_{}_{}.log".format(args.dataset, targeted_str))
 with open(args.config) as config_file:
     state = json.load(config_file)["train"][targeted_str]
     state = SimpleNamespace(**state)
     state.targeted = args.targeted
     state.dataset = args.dataset
     state.batch_size = args.batch_size
 device = torch.device(args.gpu)
 train_loader = DataLoaderMaker.get_img_label_data_loader(
     args.dataset, state.batch_size, True)
 val_loader = DataLoaderMaker.get_img_label_data_loader(
     args.dataset, state.batch_size, False)
 nets = []
 log.info("Initialize pretrained models.")
 for model_name in MODELS_TRAIN_STANDARD[args.dataset]:
     pretrained_model = StandardModel(args.dataset,
                                      model_name,
                                      no_grad=False)
     # pretrained_model.cuda()
     pretrained_model.eval()
     nets.append(pretrained_model)
 log.info("Initialize over!")
 model = nn.Sequential(ImagenetEncoder(), ImagenetDecoder(args.dataset))
 model = model.cuda()
 optimizer_G = torch.optim.SGD(model.parameters(),
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
    parser.add_argument('--arch', type=str, required=True, help="The arch used to generate adversarial images for testing")
    parser.add_argument("--gpu",type=str,required=True)
    parser.add_argument("--dataset", type=str, required=True)
    parser.add_argument('--test_mode', default=0, type=int, choices=list(range(10)))
    # parser.add_argument('--model', default='res', type=str)
    parser.add_argument('--n_epoch', default=200, type=int)
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--test_batch_size', default=10, type=int)
    parser.add_argument('--lambd', default=0.0001, type=float)
    parser.add_argument('--noise_dev', default=20.0, type=float)
    parser.add_argument('--Linfinity', default=8/255, type=float)
    parser.add_argument('--binary_threshold', default=0.5, type=float)
    parser.add_argument('--lr_mode', default=0, type=int)
    parser.add_argument('--test_interval', default=100, type=int)
    parser.add_argument('--lr', default=1e-3, type=float)
    parser.add_argument("--use_res_net",action="store_true")
    args = parser.parse_args()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cudnn.deterministic = True
    model_path = '{}/train_pytorch_model/adversarial_train/com_defend/{}@{}@epoch_{}@batch_{}.pth.tar'.format(
        PY_ROOT, args.dataset, args.arch, args.n_epoch, args.batch_size)
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    set_log_file(os.path.dirname(model_path) + "/train_{}.log".format(args.dataset))
    log.info('Command line is: {}'.format(' '.join(sys.argv)))
    log.info('Called with args:')
    print_args(args)

    in_channels = IN_CHANNELS[args.dataset]
    # if args.use_res_net:
    #     if args.test_mode == 0:
    #         com_defender = ModelRes(in_channels=in_channels, com_disable=True,rec_disable=True)
    #         args.save_model = 'normal'
    #     elif args.test_mode == 1:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=1,n_rec=3,com_disable=False,rec_disable=True)
    #         args.save_model = '1_on_off'
    #     elif args.test_mode == 2:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=2,n_rec=3,com_disable=False,rec_disable=True)
    #         args.save_model = '2_on_off'
    #     elif args.test_mode == 3:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=3,n_rec=3,com_disable=False,rec_disable=True)
    #         args.save_model = '3_on_off'
    #     elif args.test_mode == 4:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=3,n_rec=1,com_disable=True,rec_disable=False)
    #         args.save_model = 'off_on_1'
    #     elif args.test_mode == 5:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=3,n_rec=2,com_disable=True,rec_disable=False)
    #         args.save_model = 'off_on_2'
    #     elif args.test_mode == 6:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=3,n_rec=3,com_disable=True,rec_disable=False)
    #         args.save_model = 'off_on_3'
    #     elif args.test_mode == 7:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=1,n_rec=1,com_disable=False,rec_disable=False)
    #         args.save_model = '1_1'
    #     elif args.test_mode == 8:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=2,n_rec=2,com_disable=False,rec_disable=False)
    #         args.save_model = '2_2'
    #     elif args.test_mode == 9:
    #         com_defender = ModelRes(in_channels=in_channels, n_com=3,n_rec=3,com_disable=False,rec_disable=False)
    #         args.save_model = '3_3'
    # else:
    com_defender = ComDefend(in_channels, args.noise_dev)
    args.save_model = "normal_network"
    log.info('test mode: {}, model name: {}'.format(args.test_mode, args.save_model))

    if args.gpu is not None:
        log.info("Use GPU: {} for training".format(args.gpu))
    log.info("=> creating model '{}'".format(args.arch))

    log.info("after train, model will be saved to {}".format(model_path))
    com_defender.cuda()
    cudnn.benchmark = True
    train_loader = DataLoaderMaker.get_imgid_img_label_data_loader(args.dataset, args.batch_size, True, seed=1234)
    test_attack_dataset_loader = DataLoaderMaker.get_test_attacked_data(args.dataset, args.batch_size)
    log.info("Begin generate the adversarial examples.")
    target_model, adv_images, adv_true_labels = test_attack(args.Linfinity, args.arch, args.dataset,
                                                      test_attack_dataset_loader)  # 这些图片被用来验证

    log.info("Generate adversarial examples done!")
    best_acc = torch.zeros(1)
    for epoch in range(0, args.n_epoch):
        train(args, train_loader, com_defender, epoch, target_model, adv_images, adv_true_labels,best_acc, model_path)
Exemplo n.º 24
0
 if args.test_archs:
     if args.attack_defense:
         log_file_path = os.path.join(
             args.exp_dir, 'run_defense_{}.log'.format(args.defense_model))
     else:
         log_file_path = os.path.join(args.exp_dir, 'run.log')
 elif args.arch is not None:
     if args.attack_defense:
         log_file_path = os.path.join(
             args.exp_dir,
             'run_defense_{}_{}.log'.format(args.arch, args.defense_model))
     else:
         log_file_path = os.path.join(args.exp_dir,
                                      'run_{}.log'.format(args.arch))
 set_log_file(log_file_path)
 DataLoaderMaker.setup_seed(args.seed)
 archs = []
 dataset = args.dataset
 if args.test_archs:
     if dataset == "CIFAR-10" or dataset == "CIFAR-100":
         for arch in MODELS_TEST_STANDARD[dataset]:
             test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(
                 PY_ROOT, dataset, arch)
             if os.path.exists(test_model_path):
                 archs.append(arch)
             else:
                 log.info(test_model_path + " does not exists!")
     elif args.dataset == "TinyImageNet":
         for arch in MODELS_TEST_STANDARD[dataset]:
             test_model_list_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}*.pth.tar".format(
                 root=PY_ROOT, dataset=args.dataset, arch=arch)
Exemplo n.º 25
0
                    arch)
        if os.path.exists(save_result_path):
            continue
        log.info("Begin attack {} on {}, result will be saved to {}".format(
            arch, args.dataset, save_result_path))

        if args.attack_defense:
            model = DefensiveModel(args.dataset,
                                   arch,
                                   no_grad=True,
                                   defense_model=args.defense_model)
        else:
            model = StandardModel(args.dataset, arch, no_grad=True)
        model.cuda()
        model.eval()
        dataset_loader = DataLoaderMaker.get_test_attacked_data(
            args.dataset, args.batch_size)
        success_all = torch.zeros(dataset_loader.dataset.__len__()).float()
        correct_all = torch.zeros(dataset_loader.dataset.__len__()).float()
        query_all = torch.zeros(dataset_loader.dataset.__len__()).float()
        for batch_idx, data_tuple in enumerate(dataset_loader):
            if args.dataset == "ImageNet":
                if model.input_size[-1] >= 299:
                    images, true_labels = data_tuple[1], data_tuple[2]
                else:
                    images, true_labels = data_tuple[0], data_tuple[2]
            else:
                images, true_labels = data_tuple[0], data_tuple[1]
            if args.targeted:
                target_labels = torch.fmod(true_labels + 1,
                                           CLASS_NUM[args.dataset])
            else:
Exemplo n.º 26
0
def main():
    args = get_args()
    with open(args.config) as config_file:
        state = json.load(config_file)["attack"][args.targeted]
        state = SimpleNamespace(**state)
    if args.save_prefix is not None:
        state.save_prefix = args.save_prefix
    if args.arch is not None:
        state.arch = args.arch
    if args.test_archs is not None:
        state.test_archs = args.test_archs
    state.OSP = args.OSP
    state.targeted = args.targeted

    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    targeted_str = "untargeted" if not state.targeted else "targeted"
    if state.targeted:
        save_name = "{}/train_pytorch_model/TREMBA/{}_{}_generator.pth.tar".format(
            PY_ROOT, args.dataset, targeted_str)
    else:
        save_name = "{}/train_pytorch_model/TREMBA/{}_{}_generator.pth.tar".format(
            PY_ROOT, args.dataset, targeted_str)
    weight = torch.load(save_name, map_location=device)["state_dict"]
    data_loader = DataLoaderMaker.get_test_attacked_data(
        args.dataset, args.batch_size)
    encoder_weight = {}
    decoder_weight = {}
    for key, val in weight.items():
        if key.startswith('0.'):
            encoder_weight[key[2:]] = val
        elif key.startswith('1.'):
            decoder_weight[key[2:]] = val
    archs = []
    if args.test_archs:
        if args.dataset == "CIFAR-10" or args.dataset == "CIFAR-100":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(
                    PY_ROOT, args.dataset, arch)
                if os.path.exists(test_model_path):
                    archs.append(arch)
                else:
                    log.info(test_model_path + " does not exists!")
        elif args.dataset == "TinyImageNet":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}*.pth.tar".format(
                    root=PY_ROOT, dataset=args.dataset, arch=arch)
                test_model_path = list(glob.glob(test_model_list_path))
                if test_model_path and os.path.exists(test_model_path[0]):
                    archs.append(arch)
        else:
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth".format(
                    PY_ROOT, args.dataset, arch)
                test_model_list_path = list(glob.glob(test_model_list_path))
                if len(test_model_list_path
                       ) == 0:  # this arch does not exists in args.dataset
                    continue
                archs.append(arch)
        args.arch = ",".join(archs)
    else:
        archs.append(args.arch)

    args.exp_dir = get_exp_dir_name(args.dataset, args.norm, args.targeted,
                                    args.target_type, args)
    for arch in archs:
        if args.attack_defense:
            save_result_path = args.exp_dir + "/{}_{}_result.json".format(
                arch, args.defense_model)
        else:
            save_result_path = args.exp_dir + "/{}_result.json".format(arch)
        if os.path.exists(save_result_path):
            continue
        if args.OSP:
            if state.source_model_name == "Adv_Denoise_Resnet152":
                source_model = resnet152_denoise()
                loaded_state_dict = torch.load((os.path.join(
                    '{}/train_pytorch_model/TREMBA'.format(PY_ROOT),
                    state.source_model_name + ".pth.tar")))
                source_model.load_state_dict(loaded_state_dict)
                mean = np.array([0.485, 0.456, 0.406])
                std = np.array([0.229, 0.224, 0.225])
                # FIXME 仍然要改改
                source_model = nn.Sequential(Normalize(mean, std),
                                             source_model)
                source_model.to(device)
                source_model.eval()

        if args.attack_defense:
            model = DefensiveModel(args.dataset,
                                   arch,
                                   no_grad=True,
                                   defense_model=args.defense_model)
        else:
            model = StandardModel(args.dataset, arch, no_grad=True)

        model.eval()
        encoder = ImagenetEncoder()
        decoder = ImagenetDecoder(args.dataset)
        encoder.load_state_dict(encoder_weight)
        decoder.load_state_dict(decoder_weight)
        model.to(device)
        encoder.to(device)
        encoder.eval()
        decoder.to(device)
        decoder.eval()
        F = Function(model, state.batch_size, state.margin,
                     CLASS_NUM[args.dataset], state.targeted)
        total_success = 0
        count_total = 0
        queries = []
        not_done = []
        correct_all = []

        for i, (images, labels) in enumerate(data_loader):
            images = images.to(device)
            labels = labels.to(device)
            logits = model(images)
            correct = torch.argmax(logits, dim=1).eq(labels).item()
            correct_all.append(int(correct))
            if correct:
                if args.targeted:
                    if args.target_type == 'random':
                        target_labels = torch.randint(
                            low=0,
                            high=CLASS_NUM[args.dataset],
                            size=labels.size()).long().cuda()
                        invalid_target_index = target_labels.eq(labels)
                        while invalid_target_index.sum().item() > 0:
                            target_labels[
                                invalid_target_index] = torch.randint(
                                    low=0,
                                    high=logit.shape[1],
                                    size=target_labels[invalid_target_index].
                                    shape).long().cuda()
                            invalid_target_index = target_labels.eq(labels)
                    elif args.target_type == 'least_likely':
                        with torch.no_grad():
                            logit = model(images)
                        target_labels = logit.argmin(dim=1)
                    elif args.target_type == "increment":
                        target_labels = torch.fmod(
                            labels + 1, CLASS_NUM[args.dataset]).cuda()
                    labels = target_labels[0].item()
                else:
                    labels = labels[0].item()
                if args.OSP:
                    hinge_loss = MarginLossSingle(state.white_box_margin,
                                                  state.target)
                    images.requires_grad = True
                    latents = encoder(images)
                    for k in range(state.white_box_iters):
                        perturbations = decoder(latents) * state.epsilon
                        logits = source_model(
                            torch.clamp(images + perturbations, 0, 1))
                        loss = hinge_loss(logits, labels)
                        grad = torch.autograd.grad(loss, latents)[0]
                        latents = latents - state.white_box_lr * grad
                    with torch.no_grad():
                        success, adv, query_count = EmbedBA(
                            F, encoder, decoder, images[0], labels, state,
                            latents.view(-1))
                else:
                    with torch.no_grad():
                        success, adv, query_count = EmbedBA(
                            F, encoder, decoder, images[0], labels, state)
                not_done.append(1 - int(success))
                total_success += int(success)
                count_total += int(correct)
                if success:
                    queries.append(query_count)
                else:
                    queries.append(args.max_queries)

                log.info(
                    "image: {} eval_count: {} success: {} average_count: {} success_rate: {}"
                    .format(i, F.current_counts, success, F.get_average(),
                            float(total_success) / float(count_total)))
                F.new_counter()
            else:
                queries.append(0)
                not_done.append(1)
                log.info("The {}-th image is already classified incorrectly.".
                         format(i))
        correct_all = np.concatenate(correct_all, axis=0).astype(np.int32)
        query_all = np.array(queries).astype(np.int32)
        not_done_all = np.array(not_done).astype(np.int32)
        success = (1 - not_done_all) * correct_all
        success_query = success * query_all
        meta_info_dict = {
            "query_all":
            query_all.tolist(),
            "not_done_all":
            not_done_all.tolist(),
            "correct_all":
            correct_all.tolist(),
            "mean_query":
            np.mean(success_query[np.nonzero(success)[0]]).item(),
            "max_query":
            np.max(success_query[np.nonzero(success)[0]]).item(),
            "median_query":
            np.median(success_query[np.nonzero(success)[0]]).item(),
            "avg_not_done":
            np.mean(not_done_all[np.nonzero(correct_all)[0]].astype(
                np.float32)).item(),
            "args":
            vars(args)
        }

        with open(save_result_path, "w") as result_file_obj:
            json.dump(meta_info_dict, result_file_obj, sort_keys=True)
        log.info("Done, write stats info to {}".format(save_result_path))
Exemplo n.º 27
0
# settings
model_dir = args.model_dir
if not os.path.exists(model_dir):
    os.makedirs(model_dir)

# setup data loader
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])
transform_test = transforms.Compose([
    transforms.ToTensor(),
])
train_loader = DataLoaderMaker.get_imgid_img_label_data_loader(args.dataset,
                                                               args.batch_size,
                                                               True,
                                                               seed=1234)
test_loader = DataLoaderMaker.get_imgid_img_label_data_loader(args.dataset,
                                                              args.batch_size,
                                                              False,
                                                              seed=1234)


def train(args, model, device, train_loader, optimizer, epoch):
    model.train()
    for batch_idx, (_, data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        optimizer.zero_grad()

        # calculate robust loss
 log.info("All the data will be saved into {}".format(save_dir_path))
 log.info("Using GPU {}".format(args.gpu))
 log.info('Command line is: {}'.format(' '.join(sys.argv)))
 log.info('Called with args:')
 print_args(args)
 defaults = json.load(open(args.json_config))[args.dataset]
 arg_vars = vars(args)
 arg_vars = {k: arg_vars[k] for k in arg_vars if arg_vars[k] is not None}
 defaults.update(arg_vars)
 args = SimpleNamespace(**defaults)
 if args.norm == "linf":
     args.epsilon = defaults["linf_epsilon"]
 args.surrogate_arch = "resnet-110" if args.dataset.startswith(
     "CIFAR") else "resnet101"
 surrogate_model = StandardModel(args.dataset, args.surrogate_arch, False)
 trn_data_loader = DataLoaderMaker.get_img_label_data_loader(
     args.dataset, args.batch_size, is_train=True)  # 生成的是训练集而非测试集
 archs = []
 for arch in MODELS_TRAIN_STANDARD[args.dataset]:
     if StandardModel.check_arch(arch, args.dataset):
         archs.append(arch)
 print("It will be use {} architectures".format(",".join(archs)))
 model_to_data = partition_dataset(archs, trn_data_loader,
                                   args.total_images)
 for arch in archs:
     model = StandardModel(args.dataset, arch, True)
     attacker = PriorRGFAttack(args.dataset, model, surrogate_model,
                               args.targeted, args.target_type)
     log.info("Begin attack {}".format(arch))
     with torch.no_grad():
         attacker.attack_dataset(args, model_to_data, arch, save_dir_path)
     model.cpu()
Exemplo n.º 29
0
    args.num_classes = 10
    args.image_size = 32
if args.dataset == 'TinyImageNet':
    print('------------Tiny ImageNet---------')
    args.num_classes = CLASS_NUM[args.dataset]
    args.image_size = 64

device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 0
model_path = '{}/train_pytorch_model/adversarial_train/feature_scatter/{}@{}@epoch_{}@batch_{}.pth.tar'.format(
    PY_ROOT, args.dataset, args.arch, args.max_epoch, args.batch_size_train)
print("model will be saved to {}".format(model_path))
os.makedirs(os.path.dirname(model_path), exist_ok=True)
# Data
print('==> Preparing data..')
train_loader = DataLoaderMaker.get_img_label_data_loader(
    args.dataset, args.batch_size_train, True)

print('==> Building model..')
basic_net = StandardModel(args.dataset, args.arch,
                          no_grad=False).train().cuda()
basic_net.apply(initialize_weights)


def print_para(net):
    for name, param in net.named_parameters():
        if param.requires_grad:
            print(name)
            print(param.data)
        break

])
# test_preprocessor = transforms.ToTensor()
dataset = args.dataset
if dataset == "CIFAR-10":
    train_dataset = CIFAR10(IMAGE_DATA_ROOT[dataset],
                            train=True,
                            transform=train_preprocessor)
    # test_dataset =  CIFAR10(IMAGE_DATA_ROOT[dataset], train=False, transform=test_preprocessor)
elif dataset == "CIFAR-100":
    train_dataset = CIFAR100(IMAGE_DATA_ROOT[dataset],
                             train=True,
                             transform=train_preprocessor)
    # test_dataset = CIFAR100(IMAGE_DATA_ROOT[dataset], train=False, transform=test_preprocessor)
elif dataset == "ImageNet":
    train_preprocessor = DataLoaderMaker.get_preprocessor(IMAGE_SIZE[dataset],
                                                          True,
                                                          center_crop=False)
    # test_preprocessor = DataLoaderMaker.get_preprocessor(IMAGE_SIZE[dataset], False, center_crop=True)
    train_dataset = ImageFolder(IMAGE_DATA_ROOT[dataset] + "/train",
                                transform=train_preprocessor)
elif dataset == "TinyImageNet":
    train_dataset = TinyImageNet(IMAGE_DATA_ROOT[dataset],
                                 train_preprocessor,
                                 train=True)

batch_size = args.batch_size
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=0)