コード例 #1
0
    BATCH_SIZE = BATCH_SIZE[args.num_points]
    BEST_WEIGHTS = BEST_WEIGHTS[args.dataset][args.num_points]
    if args.batch_size == -1:
        args.batch_size = BATCH_SIZE[args.model]
    set_seed(1)
    print(args)

    dist.init_process_group(backend='nccl')
    torch.cuda.set_device(args.local_rank)
    cudnn.benchmark = True

    # build model
    if args.model.lower() == 'dgcnn':
        model = DGCNN(args.emb_dims, args.k, output_channels=40)
    elif args.model.lower() == 'pointnet':
        model = PointNetCls(k=40, feature_transform=args.feature_transform)
    elif args.model.lower() == 'pointnet2':
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    # load model weight
    state_dict = torch.load(
        BEST_WEIGHTS[args.model], map_location='cpu')
    print('Loading weight {}'.format(BEST_WEIGHTS[args.model]))
    try:
        model.load_state_dict(state_dict)
    except RuntimeError:
コード例 #2
0
        os.makedirs(save_path)
    save_name = '{}-budget_{}-iter_{}' \
                '-success_{:.4f}-rank_{}.npz'. \
        format('aoa', 0.5,
               200, success_num/total_num, 0)
    np.savez(os.path.join(save_path, save_name),
             test_pc=attacked_data.astype(np.float32),
             test_label=real_label.astype(np.uint8),
             target_label=target_label.astype(np.uint8))
    print("total attack success rate is", success_num/total_num)
    # utils.pc_heatmap(rx.transpose(2, 1)[0], R0[0].sum(-2).unsqueeze(-1))
    # print(x)


if __name__ == '__main__':
    global BATCH_SIZE, BEST_WEIGHTS
    BATCH_SIZE = BATCH_SIZE[1024]
    BEST_WEIGHTS = BEST_WEIGHTS['mn40'][1024]
    cudnn.benchmark = True
    # attack model
    model_at = PointNetCls(k=40, feature_transform=False)
    model_at = nn.DataParallel(model_at).cuda()
    model_at.eval()
    print('Loading weight {}'.format(BEST_WEIGHTS['pointnet']))
    state_dict = torch.load(BEST_WEIGHTS['pointnet'])
    try:
        model_at.load_state_dict(state_dict)
    except RuntimeError:
        model_at.module.load_state_dict(state_dict)
    main()
    print("End!!!")
コード例 #3
0
                                               drop_last=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=nworkers)

    #pn = PointNet(len(train_dataset.classnames), pool_size=1000, dropout_rate=0.5)
    pn = PointNetSmall(len(train_dataset.classnames),
                       pool_size=1000,
                       dropout_rate=0.1)
    #pn = PointNetDropout(len(train_dataset.classnames), pool_size=1000)

    transfer = False
    if transfer:
        pn = PointNetCls(k=len(train_dataset.classnames),
                         feature_transform=False)
        pretrained_dict = torch.load("cls_model_9.pth", map_location=device)
        model_dict = pn.state_dict()

        # 1. filter out unnecessary keys
        pretrained_dict = {k: v for k, v in pretrained_dict.items() \
                           if k in model_dict and \
                               model_dict[k].shape == pretrained_dict[k].shape}
        # 2. overwrite entries in the existing state dict
        model_dict.update(pretrained_dict)
        # 3. load the new state dict
        pn.load_state_dict(model_dict)

        for name, param in pn.named_parameters():
            if "feat" in name:
                param.requires_grad = False
コード例 #4
0
    data_augmentation=False)
train_dataset = ShapeNetDataset(
    root='shapenetcore_partanno_segmentation_benchmark_v0',
    split='train',
    classification=True,
    npoints=opt.num_points,
    data_augmentation=False)

test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=32,
                                              shuffle=True)
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=32,
                                               shuffle=True)

classifier = PointNetCls(k=len(test_dataset.classes))
classifier.cuda()
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

train_correct = 0
total_trainset = 0

# train data

for i, data in enumerate(train_dataloader, 0):

    points, target = data
    target = target[:, 0]
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
    data_augmentation=False)

testdataloader = torch.utils.data.DataLoader(
    test_dataset, batch_size=opt.batchSize,
    shuffle=True)  #, num_workers=int(opt.workers))

print(len(dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(k=num_classes,
                         feature_transform=opt.feature_transform)
if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(dataset) / opt.batchSize

best_val = 0
start_time = time.time()

for epoch in range(opt.nepoch):
    scheduler.step()

    train_correct = 0
    total_trainset = 0
コード例 #6
0
testset = ShapeNetDataset(datapath, classification=True, npoints=num_points)

dataloader = torch.utils.data.DataLoader(dataset,
                                         batchsize,
                                         shuffle=True,
                                         num_workers=workers)
testdataloader = torch.utils.data.DataLoader(testset,
                                             batchsize,
                                             shuffle=True,
                                             num_workers=workers)

num_classes = len(dataset.classes)
print(num_classes)

# classifier initialization
classifier = PointNetCls(k=num_classes)

# optimizer
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()

for epoch in range(epochsize):
    scheduler.step()
    print("epoch: ", epoch)
    for i, data in enumerate(dataloader, 0):
        points, target = data
        target = target[:, 0]
        points = points.transpose(2, 1)
        points = points.cuda()
        target = target.cuda()
コード例 #7
0
ファイル: LRP_Attack.py プロジェクト: code-roamer/IF-Defense
def main():
    # global BATCH_SIZE, BEST_WEIGHTS
    # BATCH_SIZE = BATCH_SIZE[1024]
    # BEST_WEIGHTS = BEST_WEIGHTS['mn40'][1024]
    # cudnn.benchmark = True

    # build model
    model = PointNetCls(k=40, feature_transform=False)
    model = nn.DataParallel(model).cuda()
    model.eval()

    # load model weight
    print('Loading weight {}'.format(BEST_WEIGHTS['pointnet']))
    state_dict = torch.load(BEST_WEIGHTS['pointnet'])
    try:
        model.load_state_dict(state_dict)
    except RuntimeError:
        model.module.load_state_dict(state_dict)

    # # attack model
    # model_at = PointNet2ClsSsg(num_classes=40)
    # model_at = nn.DataParallel(model_at).cuda()
    # model_at.eval()
    # print('Loading weight {}'.format(BEST_WEIGHTS['pointnet2']))
    # state_dict = torch.load(BEST_WEIGHTS['pointnet2'])
    # try:
    #     model_at.load_state_dict(state_dict)
    # except RuntimeError:
    #     model_at.module.load_state_dict(state_dict)

    # load dataset
    test_set = ModelNet40Attack('data/attack_data.npz',
                                num_points=1024,
                                normalize=True)
    test_loader = DataLoader(test_set,
                             batch_size=4,
                             shuffle=False,
                             num_workers=4,
                             pin_memory=True,
                             drop_last=False)

    ti = 412
    data_iter = iter(test_loader)
    for i in range(ti):
        data = next(data_iter)
    total_num = 0
    success_num = 0
    at_success_num = 0
    i = 0
    all_adv = []
    all_real_label = []
    all_target_label = []
    for x, label, target in tqdm(test_loader):
        # x, label, target = data
        x, label, target = x.cuda(), label.long().cuda(), target.long().cuda()
        x = x.transpose(2, 1).contiguous()
        x.requires_grad = True
        rx = x.clone()

        x_pred, _, _ = model(x)
        x_pred = torch.argmax(x_pred, dim=-1)
        # if x_pred != label:
        #     # all_adv.append(x_adv.transpose(1, 2).contiguous().detach().cpu().numpy())
        #     # all_real_label.append(label.detach().cpu().numpy())
        #     # all_target_label.append(target.detach().cpu().numpy())
        #     continue

        total_num += x.shape[0]
        x_adv = AOA_Attack(model, x, label, target)
        pred, _, _ = model(x_adv)
        pred = torch.argmax(pred, dim=-1)
        success_num += (pred != label).sum().cpu().item()
        logits_at = model_at(x_adv)
        pred_at = torch.argmax(logits_at, dim=-1)
        at_success_num += (pred_at != label).sum().cpu().item()
        i += 1
        if i % 20 == 0:
            print("current attack success rate is", success_num / total_num)
            print("current pointnet++ attack success rate is",
                  at_success_num / total_num)
        all_adv.append(
            x_adv.transpose(1, 2).contiguous().detach().cpu().numpy())
        all_real_label.append(label.detach().cpu().numpy())
        all_target_label.append(target.detach().cpu().numpy())
        # if i % 20 == 0:
        #     break
        # R0 = LRP_scores(model, x_adv, label, label)
        # R1 = LRP_scores(model, x, label, label)
        # utils.pc_heatmap(x_adv.transpose(2, 1)[0], R0[0].sum(-2).unsqueeze(-1))

    attacked_data = np.concatenate(all_adv, axis=0)  # [num_data, K, 3]
    real_label = np.concatenate(all_real_label, axis=0)  # [num_data]
    target_label = np.concatenate(all_target_label, axis=0)  # [num_data]
    # save results
    save_path = 'attack/results/{}_{}/AOA/{}'. \
        format('mn40', 1024, 'pointnet')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    save_name = '{}-budget_{}-iter_{}' \
                '-success_{:.4f}-rank_{}.npz'. \
        format('aoa', 0.5,
               200, success_num/total_num, 0)
    np.savez(os.path.join(save_path, save_name),
             test_pc=attacked_data.astype(np.float32),
             test_label=real_label.astype(np.uint8),
             target_label=target_label.astype(np.uint8))
    print("total attack success rate is", success_num / total_num)
コード例 #8
0
def main(args):
    blue = lambda x: '\033[94m' + x + '\033[0m'

    seeding(args.seed)

    if args.hfta:
        B = consolidate_hyperparams_and_determine_B(
            args,
            ['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
        )
    else:
        B = 0
        (args.lr, args.beta1, args.beta2, args.weight_decay, args.gamma,
         args.step_size) = (args.lr[0], args.beta1[0], args.beta2[0],
                            args.weight_decay[0], args.gamma[0],
                            args.step_size[0])

    if args.device == 'cuda':
        assert torch.cuda.is_available()
        torch.backends.cudnn.benchmark = True
        print('Enable cuDNN heuristics!')
    device = (xm.xla_device()
              if args.device == 'xla' else torch.device(args.device))

    dataset, test_dataset = build_dataset(args)
    dataloader, testdataloader = build_dataloader(args, dataset, test_dataset)

    print('len(dataset)={}'.format(len(dataset)),
          'len(test_dataset)={}'.format(len(test_dataset)))
    num_classes = len(dataset.classes)
    print('classes', num_classes)

    if args.outf is not None:
        try:
            os.makedirs(args.outf)
        except OSError:
            pass

    classifier = PointNetCls(
        k=num_classes,
        feature_transform=args.feature_transform,
        B=B,
        track_running_stats=(args.device != 'xla'),
    )

    if args.model != '':
        classifier.load_state_dict(torch.load(args.model))

    optimizer = get_hfta_optim_for(optim.Adam, B=B)(
        classifier.parameters(),
        lr=args.lr,
        betas=(args.beta1, args.beta2),
        weight_decay=args.weight_decay,
    )
    scheduler = get_hfta_lr_scheduler_for(optim.lr_scheduler.StepLR, B=B)(
        optimizer,
        step_size=args.step_size,
        gamma=args.gamma,
    )

    scaler = amp.GradScaler(enabled=(args.device == 'cuda' and args.amp))

    classifier.to(device)

    num_batch = len(dataloader)

    def loss_fn(output, label, batch_size, trans_feat):
        if B > 0:
            loss = B * F.nll_loss(output.view(B * batch_size, -1), label)
        else:
            loss = F.nll_loss(output, label)
        if args.feature_transform:
            loss += feature_transform_regularizer(trans_feat) * 0.001
        return loss

    classifier = classifier.train()
    epoch_timer = EpochTimer()

    # Training loop
    for epoch in range(args.epochs):
        num_samples_per_epoch = 0
        epoch_timer.epoch_start(epoch)
        for i, data in enumerate(dataloader, 0):
            if i > args.iters_per_epoch:
                break
            if args.warmup_data_loading:
                continue

            points, target = data
            target = target[:, 0]
            points, target = points.to(device), target.to(device)
            N = points.size(0)
            if B > 0:
                points = points.unsqueeze(0).expand(B, -1, -1, -1).contiguous()
                target = target.repeat(B)
            optimizer.zero_grad(set_to_none=True)
            if args.device == 'cuda':
                with amp.autocast(enabled=args.amp):
                    pred, trans, trans_feat = classifier(points)
                    loss = loss_fn(pred, target, N, trans_feat)
                scaler.scale(loss).backward()
                scaler.step(optimizer)
            else:
                pred, trans, trans_feat = classifier(points)
                loss = loss_fn(pred, target, N, trans_feat)
                loss.backward()
                if args.device == 'xla':
                    xm.optimizer_step(optimizer, barrier=True)
                else:
                    optimizer.step()

            print('[{}: {}/{}] train loss: {}'.format(epoch, i, num_batch,
                                                      loss.item()))
            num_samples_per_epoch += N * max(B, 1)
            scaler.update()
        scheduler.step()
        epoch_timer.epoch_stop(num_samples_per_epoch)
        print('Epoch {} took {} s!'.format(epoch,
                                           epoch_timer.epoch_latency(epoch)))

    if args.device == 'xla' and not args.eval:
        print(met.metrics_report())
    if args.outf is not None:
        epoch_timer.to_csv(args.outf)

    if args.eval:
        # Run validation loop.
        print("Running validation loop ...")
        classifier = classifier.eval()
        with torch.no_grad():
            total_correct = torch.zeros(max(B, 1), device=device)
            total_testset = 0
            for data in testdataloader:
                if args.warmup_data_loading:
                    continue
                points, target = data
                target = target[:, 0]
                points, target = points.to(device), target.to(device)
                N = points.size(0)
                if B > 0:
                    points = points.unsqueeze(0).expand(B, -1, -1,
                                                        -1).contiguous()
                    target = target.repeat(B)
                pred, _, _ = classifier(points)
                pred_choice = pred.argmax(-1)

                correct = pred_choice.eq(
                    target.view(B, N) if B > 0 else target).sum(-1)

                total_correct.add_(correct)
                total_testset += N

            final_accuracy = total_correct / total_testset
            final_accuracy = final_accuracy.cpu().tolist()
            if args.outf is not None:
                pd.DataFrame({
                    'acc': final_accuracy
                }).to_csv(os.path.join(args.outf, 'eval.csv'))

            # Return test_accuracy
            return final_accuracy