Esempio n. 1
0
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
    train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)
    model = model.cifar10(n_channel=args.channel)
else:
    train_loader, test_loader = dataset.get100(batch_size=args.batch_size, num_workers=1)
    model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
    model.cuda()

# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# logger
misc.ensure_dir(args.loaddir)
misc.ensure_dir(args.savedir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader, test_loader = dataset.get10(batch_size=args.batch_size,
                                          data_root=args.data_root,
                                          num_workers=1)

algo = {'fgsm': fgsm_gt, 'bim': ifgsm_gt, 'pgd': pgd_gt}

attack_algo = algo[args.attack_algo] if args.attack_algo is not None else None
defend_algo = algo[args.defend_algo] if args.defend_algo is not None else None

defend_name = "None" if args.defend_algo is None else args.defend_algo

if args.prune_algo == "l0proj":
    prune_algo = l0proj
elif args.prune_algo is None:
    prune_algo = None
elif args.prune_algo == "baseline":
    prune_algo = l0proj
Esempio n. 3
0
    elif args.prune_algo is None:
        prune_algo = None
    elif args.prune_algo == "baseline":
        prune_algo = l0proj
    elif args.prune_algo == "model_size_prune":
        prune_algo = pt.prune_admm_ms
    elif args.prune_algo == "low_rank":
        prune_algo = None
    else:
        raise NotImplementedError

    if args.prune_algo == "baseline":
        prune_idx, Weight_shapes = prune_algo(model, args.prune_ratio, param_name=weight_name)
        # prune_idx, Weight_shapes = prune_algo(model, sparse_factor, normalized=False, param_name=weight_name)
        prune_lambda = lambda m: idxproj(m, z_idx=prune_idx, W_shapes=Weight_shapes)
    elif args.prune_algo == "l0proj":
        # prune_lambda = lambda m: prune_algo(m, sparse_factor, normalized=False, param_name=weight_name)
        prune_lambda = lambda m: prune_algo(m, args.prune_ratio, normalized=True, param_name=weight_name)
    elif args.prune_algo == 'low_rank':
        prune_lambda = None
    else:
        prune_lambda = None

    prune_lambda(model)

    # data loader:
    _, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)

    # run testing:
    output_file_path = os.path.join(args.loaddir, args.exp_logger)
    model_test(model=model, data_loader=test_loader, output_file_path=output_file_path, eps=eps)