Esempio n. 1
0
    # Setting up the optimizer
    total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
    opt = tf.train.MomentumOptimizer(learning_rate, momentum)
    gv = opt.compute_gradients(total_loss)
    train_step = opt.apply_gradients(gv, global_step=global_step)

num_epochs = (max_num_training_steps * batch_size) // num_train_data
print("num_epochs: {:d}".format(num_epochs))
print("max_num_training_steps", max_num_training_steps)
print("step_size_schedule", step_size_schedule)

# Set up adversary
grad = compute_grad(model)
train_attack_configs = [np.asarray(config["attacks"])[i] for i in config["train_attacks"]]
eval_attack_configs = [np.asarray(config["attacks"])[i] for i in config["eval_attacks"]]
train_attacks = [PGDAttack(model, a_config, x_min, x_max, grad) for a_config in train_attack_configs]

# Optimization that works well on MNIST: do a first epoch with a lower epsilon
start_small = config.get("start_small", False)
if start_small:
    train_attack_configs_small = [a.copy() for a in train_attack_configs]
    for attack in train_attack_configs_small:
        if 'epsilon' in attack:
            attack['epsilon'] /= 3.0
        else:
            attack['spatial_limits'] = [s/3.0 for s in attack['spatial_limits']]
    train_attacks_small = [PGDAttack(model, a_config, x_min, x_max, grad) for a_config in train_attack_configs_small] 
print('start_small', start_small)

eval_attacks = [PGDAttack(model, a_config, x_min, x_max, grad) for a_config in eval_attack_configs]
Esempio n. 2
0
    eval_attack_configs = [np.asarray(config["attacks"])[i] for i in config["eval_attacks"]]
    print(eval_attack_configs)

    dataset = config["data"]
    if dataset == "mnist":
        from model import Model
        model = Model(config)

        x_min, x_max = 0.0, 1.0
    else:
        from cifar10_model import Model
        model = Model(config)
        x_min, x_max = 0.0, 255.0

    grad = compute_grad(model)
    eval_attacks = [PGDAttack(model, a_config, x_min, x_max, grad) for a_config in eval_attack_configs]

    global_step = tf.contrib.framework.get_or_create_global_step()

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    eval_dir = os.path.join(model_dir, 'eval')
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)

    saver = tf.train.Saver()

    if args.epoch is not None:
        ckpts = tf.train.get_checkpoint_state(model_dir).all_model_checkpoint_paths
        ckpt = [c for c in ckpts if c.endswith('checkpoint-{}'.format(args.epoch))]
        assert len(ckpt) == 1
Esempio n. 3
0
import os


models = [
    ('path_to_model', -1),
]

attack_configs = [
    {"type": "linf", "epsilon": 0.3, "k": 100, "random_start": True, "reps": 40},
    {"type": "l1", "epsilon": 10, "k": 100, "random_start": True, "perc": 99, "a": 0.5, "reps": 40},
    {"type": "l2", "epsilon": 2, "k": 100, "random_start": True, "reps": 40},
]

model = Model({"model_type": "cnn"})
grad = compute_grad(model)
attacks = [PGDAttack(model, a_config, 0.0, 1.0, grad) for a_config in attack_configs]

saver = tf.train.Saver()
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
config_tf.gpu_options.per_process_gpu_memory_fraction = 0.5


eval_config = {"data": "mnist", 
               "num_eval_examples": 200,
               "eval_batch_size": 200}

nat_accs = np.zeros(len(models))
adv_accs = np.zeros((len(models), len(attacks) + 5))

any_attack = np.ones((len(models), eval_config["num_eval_examples"])).astype(np.bool)
Esempio n. 4
0
    "eval_batch_size": 100
}

eval_wide = sys.argv[1] == "wide"

if eval_wide:
    models = models_wide
    eval_config["filters"] = [16, 160, 320, 640]
else:
    models = models_slim
    eval_config["filters"] = [16, 16, 32, 64]

model = Model(eval_config)
grad = compute_grad(model)
attacks = [
    PGDAttack(model, a_config, 0.0, 255.0, grad) for a_config in attack_configs
]

saver = tf.train.Saver()
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
config_tf.gpu_options.per_process_gpu_memory_fraction = 1.0

nat_accs = np.zeros(len(models))
adv_accs = np.zeros((len(models), len(attacks) + 2))

any_attack = np.ones(
    (len(models), eval_config["num_eval_examples"])).astype(np.bool)
any_l1 = np.ones(
    (len(models), eval_config["num_eval_examples"])).astype(np.bool)
any_linf = np.ones(
Esempio n. 5
0
    parser.add_argument('--perturb_steps', type=int, default=20,
                    help='iterations for pgd attack (default pgd20)')
    parser.add_argument('--model_name', type=str, default="")
    parser.add_argument('--model_path', type=str, default="./models/weights/model-wideres-pgdHE-wide10.pt")
    parser.add_argument('--gpu_id', type=str, default="0")
    return parser.parse_args()



if __name__=='__main__':
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id   #多卡机设置使用的gpu卡号
    gpu_num = max(len(args.gpu_id.split(',')), 1)
    device = torch.device('cuda')
    if args.model_name!="":
        model = get_model_for_attack(args.model_name).to(device)   # 根据model_name, 切换要攻击的model
        model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)])
        
    else:
        # 防御任务, Change to your model here
        model = WideResNet()
        model.load_state_dict(torch.load('models/weights/wideres34-10-pgdHE.pt'))
        model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)])
    #攻击任务:Change to your attack function here
    #Here is a attack baseline: PGD attack
    attack = PGDAttack(args.step_size, args.epsilon, args.perturb_steps)
    model.eval()
    test_loader = get_test_cifar(args.batch_size)
    natural_acc, robust_acc, distance = eval_model_with_attack(model, test_loader, attack, args.epsilon, device)
    print(f"Natural Acc: {natural_acc:.5f}, Robust acc: {robust_acc:.5f}, distance:{distance:.5f}")