def main(args, io): train_loader = DataLoader(S3DIS_cls(partition='train', num_points=args.num_points, test_area=args.test_area), num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True) test_loader = DataLoader(S3DIS_cls(partition='test', num_points=args.num_points, test_area=args.test_area), num_workers=8, batch_size=args.batch_size, shuffle=False, drop_last=False) model = DGCNN(args) model.cuda() model = nn.DataParallel(model) print("Let's use", torch.cuda.device_count(), "GPUs!") opt = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) scheduler = CosineAnnealingLR(opt, args.epochs) best_test_loss = 1e10 for epoch in range(args.epochs): io.cprint('Epoch [%d]' % (epoch + 1)) train(model, train_loader, opt, io) scheduler.step() test_loss = test(model, test_loader, io) if test_loss < best_test_loss: best_test_loss = test_loss # save in torch==1.4 readible style torch.save(model.state_dict(), 'checkpoints/%s/model_%s.t7' % (args.exp_name, args.test_area), _use_new_zipfile_serialization=False)
# load model weight state_dict = torch.load( BEST_WEIGHTS[args.model], map_location='cpu') print('Loading weight {}'.format(BEST_WEIGHTS[args.model])) try: model.load_state_dict(state_dict) except RuntimeError: # eliminate 'module.' in keys state_dict = {k[7:]: v for k, v in state_dict.items()} model.load_state_dict(state_dict) # distributed mode on multiple GPUs! # much faster than nn.DataParallel model = DistributedDataParallel( model.cuda(), device_ids=[args.local_rank]) # setup attack settings if args.adv_func == 'logits': adv_func = LogitsAdvLoss(kappa=args.kappa) else: adv_func = CrossEntropyAdvLoss() dist_func = L2Dist() # hyper-parameters from their official tensorflow code attacker = CWPerturb(model, adv_func, dist_func, attack_lr=args.attack_lr, init_weight=10., max_weight=80., binary_step=args.binary_step, num_iter=args.num_iter) # attack
print('Model not recognized') exit(-1) # load model weight state_dict = torch.load(BEST_WEIGHTS[args.model], map_location='cpu') print('Loading weight {}'.format(BEST_WEIGHTS[args.model])) try: model.load_state_dict(state_dict) except RuntimeError: # eliminate 'module.' in keys state_dict = {k[7:]: v for k, v in state_dict.items()} model.load_state_dict(state_dict) # distributed mode on multiple GPUs! # much faster than nn.DataParallel model = DistributedDataParallel(model.cuda(), device_ids=[args.local_rank]) # setup attack settings # budget, step_size, number of iteration # settings adopted from CVPR'20 paper GvG-P delta = args.budget args.budget = args.budget * \ np.sqrt(args.num_points * 3) # \delta * \sqrt(N * d) args.num_iter = int(args.num_iter) args.step_size = args.budget / float(args.num_iter) # which adv_func to use? if args.adv_func == 'logits': adv_func = LogitsAdvLoss(kappa=args.kappa) else: adv_func = CrossEntropyAdvLoss() clip_func = ClipPointsL2(budget=args.budget)
print('Model not recognized') exit(-1) # load model weight state_dict = torch.load(BEST_WEIGHTS[args.model], map_location='cpu') print('Loading weight {}'.format(BEST_WEIGHTS[args.model])) try: model.load_state_dict(state_dict) except RuntimeError: # eliminate 'module.' in keys state_dict = {k[7:]: v for k, v in state_dict.items()} model.load_state_dict(state_dict) # distributed mode on multiple GPUs! # much faster than nn.DataParallel model.cuda() # model = torch.nn.DataParallel( # model.cuda()) # setup attack settings # budget, step_size, number of iteration # settings adopted from CVPR'20 paper GvG-P delta = args.budget args.budget = args.budget * \ np.sqrt(args.num_points * 3) # \delta * \sqrt(N * d) args.num_iter = int(args.num_iter) args.step_size = args.budget / float(args.num_iter) # which adv_func to use? if args.adv_func == 'logits': adv_func = LogitsAdvLoss(kappa=args.kappa) else: