示例#1
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
                            batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    model = models[args.model]().to(device)
    model = nn.DataParallel(model) 
    
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_true = []
    test_pred = []

    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        logits = model(data)
        preds = logits.max(dim=1)[1] 
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())

    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc, avg_per_class_acc)
    io.cprint(outstr)
示例#2
0
def choose_dataset(args):
    if args.dataset == 'modelnet40':
        transform = None
        transform = transforms.Compose(
            [RotatePointCloud(), JitterPointCloud()])
        dataset = ModelNet40(args, transform)

    return dataset
示例#3
0
def choose_dataset(args):
    if args.dataset == 'modelnet40':
        transform = transforms.Compose(
            [RotatePointCloud(), JitterPointCloud()])
        dataset = ModelNet40(args, transform)

    elif args.dataset == 'modelnet_normal_resampled':
        if args.normal:
            transform = transforms.Compose(
                [RotatePointCloud_Normal, JitterPointCloud])
        else:
            transform = transforms.Compose(
                [RotatePointCloud(), JitterPointCloud()])
        dataset = ModelNet_Normal_Resampled(args, transform)

    return dataset
示例#4
0
    # load model weight
    if args.model_path:
        model.load_state_dict(torch.load(args.model_path))
    else:
        model.load_state_dict(torch.load(BEST_WEIGHTS[args.model]))

    # prepare data
    if args.mode == 'target':
        test_set = ModelNet40Attack(args.data_root,
                                    num_points=args.num_points,
                                    normalize=args.normalize_pc)
    else:
        test_set = ModelNet40(args.data_root,
                              num_points=args.num_points,
                              normalize=args.normalize_pc,
                              partition='test',
                              augmentation=False)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=8,
                             pin_memory=True,
                             drop_last=False)

    # test
    if args.mode == 'normal':
        test_normal()
    else:
        test_target()
示例#5
0
def train_and_eval():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Dataset
    train_dataset = ModelNet40(n=N, split='train')
    test_dataset = ModelNet40(n=N, split='test')
    train_data_loader = Data.DataLoader(train_dataset,
                                        batch_size=BATCH_SIZE,
                                        shuffle=True,
                                        num_workers=NUM_WORKERS)
    test_data_loader = Data.DataLoader(test_dataset,
                                       batch_size=BATCH_SIZE,
                                       num_workers=NUM_WORKERS)

    # Network Model
    categories = len(train_dataset.code2category)
    model = ClassificationPointNet(categories,
                                   input_transform=INPUT_TRANSFORM,
                                   feature_transform=FEATURE_TRANSFORM)
    model.to(device)

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)

    train_batch_num = math.ceil(len(train_dataset) / BATCH_SIZE)
    test_batch_num = math.ceil(len(test_dataset) / BATCH_SIZE)

    max_overall_accuracy, counterpart_avg_class_accuracy = 0.0, 0.0
    counterpart_overall_accuracy, max_avg_class_accuracy = 0.0, 0.0

    epoch1, epoch2 = 0, 0  # records the epoch of best performance for overall and avg. class accuracy

    for epoch in range(1, EPOCHS + 1):
        for i, (point_clouds, labels) in enumerate(train_data_loader):
            # a batch of data
            point_clouds = point_clouds.transpose(2, 1)
            labels = labels.view(-1)
            point_clouds, labels = point_clouds.to(device), labels.to(device)

            # forward-propagation
            optimizer.zero_grad()
            model.train()
            preds, transformer1, transformer2 = model(point_clouds)
            loss = F.nll_loss(preds, labels)
            if FEATURE_TRANSFORM:
                loss += feature_transform_regularization(transformer2) * 0.001

            # back-propagation and gradient descent
            loss.backward()
            optimizer.step()

            # metrics on train
            predictions = preds.data.max(1)[1]
            comparison = predictions.data.eq(labels).cpu()
            correct = torch.sum(comparison).item()

            if (i + 1) % 50 == 0:
                print(
                    "Training epoch {} iteration {}/{} ==> Loss: {}, Accuracy: {}"
                    .format(epoch, i + 1, train_batch_num, loss.item(),
                            correct / len(labels)))

        scheduler.step()

        test_total_correct = 0
        correct_nums_per_cat = np.zeros((categories, ))
        with torch.no_grad():
            for i, (data, target) in enumerate(test_data_loader):
                data = data.transpose(2, 1)
                target = target.view(-1)
                data, target = data.to(device), target.to(device)
                model.eval()
                preds, _, _ = model(data)
                predictions = preds.data.max(1)[1]
                comparison = predictions.data.eq(target).cpu()
                correct = torch.sum(comparison).item()
                test_total_correct += correct
                for index, cat in enumerate(target):
                    correct_nums_per_cat[cat] += comparison[index]

        overall_accuracy = test_total_correct / len(test_dataset)
        avg_class_accuracy = np.mean(correct_nums_per_cat /
                                     np.array(test_dataset.categories_nums))
        print("Evaluation on testing dataset ==> Overall Accuracy: {}, "
              "Avg. class Accuracy: {}".format(overall_accuracy,
                                               avg_class_accuracy))

        if overall_accuracy > max_overall_accuracy:
            max_overall_accuracy = overall_accuracy
            counterpart_avg_class_accuracy = avg_class_accuracy
            torch.save(model.state_dict(),
                       "max_overall_accuracy_epoch_{}.pth".format(epoch))
            epoch1 = epoch

        if avg_class_accuracy > max_avg_class_accuracy:
            max_avg_class_accuracy = avg_class_accuracy
            counterpart_overall_accuracy = overall_accuracy
            torch.save(model.state_dict(),
                       "max_avg_accuracy_epoch_{}.pth".format(epoch))
            epoch2 = epoch

    print(
        "Max overall accuracy {} in epoch {}, and the avg class accuracy is {}"
        .format(max_overall_accuracy, epoch1, counterpart_avg_class_accuracy))
    print("Max avg accuracy {} in epoch {}, and the overall accuracy is {}".
          format(max_avg_class_accuracy, epoch2, counterpart_overall_accuracy))
示例#6
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
                            batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                            batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    model = models[args.model]().to(device)
    model = nn.DataParallel(model)

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    
    criterion = cal_loss
    best_test_acc = 0

    for epoch in range(args.epochs):
        train_loss = 0.0
        count = 0.0  # numbers of data
        model.train()
        train_pred = []
        train_true = []
        idx = 0  # iterations
        total_time = 0.0
        for data, label in (train_loader):
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()

            start_time = time.time()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            end_time = time.time()
            total_time += (end_time - start_time)
            
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
            idx += 1
            
        print ('train total time is',total_time)
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                                 train_loss * 1.0 / count,
                                                                                 metrics.accuracy_score(train_true, train_pred),
                                                                                 metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        total_time = 0.0
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            start_time = time.time()
            logits = model(data)
            end_time = time.time()
            total_time += (end_time - start_time)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        print ('test total time is', total_time)
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
                                                                            test_loss*1.0/count,
                                                                            test_acc,
                                                                            avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
        
        scheduler.step()
示例#7
0
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    model = nn.DataParallel(model).cuda()

    # use Adam optimizer, cosine lr decay
    opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
    scheduler = CosineAnnealingLR(opt, T_max=args.epochs, eta_min=1e-5)

    # prepare data
    train_set = ModelNet40(args.data_root,
                           num_points=args.num_points,
                           normalize=True,
                           partition='train')
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True,
                              drop_last=True)

    test_set = ModelNet40(args.data_root,
                          num_points=args.num_points,
                          normalize=True,
                          partition='test')
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size * 2,
                             shuffle=False,