Ejemplo n.º 1
0
                    help='input batch size')

opt = parser.parse_args()
print(opt)

test_dataset = PartDataset(
    root='shapenetcore_partanno_segmentation_benchmark_v0',
    train=False,
    classification=True,
    npoints=opt.num_points)

testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=32,
                                             shuffle=True)

classifier = PointNetCls(k=len(test_dataset.classes),
                         num_points=opt.num_points)
classifier.cuda()
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

for i, data in enumerate(testdataloader, 0):
    points, target = data
    points, target = Variable(points), Variable(target[:, 0])
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
    pred, _ = classifier(points)
    loss = F.nll_loss(pred, target)

    pred_choice = pred.data.max(1)[1]
    correct = pred_choice.eq(target.data).cpu().sum()
    print('i:%d  loss: %f accuracy: %f' %
Ejemplo n.º 2
0
test_dataset = PartDataset(root=os.path.join(opt.input_path, 'test'),
                           task='classification',
                           mode='test',
                           npoints=opt.num_points,
                           min_pts=0,
                           load_in_memory=True,
                           num_seg_class=5)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=False,
                                             num_workers=opt.workers)
num_batch = len(test_dataset) / opt.batchSize

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

classifier = PointNetCls(k=len(test_dataset.classes)).to(device)

classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

total_test_correct = 0
n_log = 100

total_points = 0
for i, data in enumerate(test_dataset):
    point, target = data
    point = point.view(1, point.size(0), point.size(1))
    target = target.view(1, target.size(0))
    point, target = point.to(device, non_blocking=True), target[:, 0].to(
        device, non_blocking=True)
    point = point.transpose(2, 1)
Ejemplo n.º 3
0
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, train = False, npoints = opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass


classifier = PointNetCls(k = num_classes, num_points = opt.num_points)


if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))


optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target[:,0])
Ejemplo n.º 4
0
point, cls, seg = dataset[idx]
damaged_point, pt_idx = get_damaged_points(point.numpy())
damaged_point = torch.from_numpy(damaged_point)
damaged_seg = seg[pt_idx]

original_point = point.numpy()
damaged_point_np = damaged_point.numpy()
print(point.size(), seg.size())

print('loading segmentation network for damaged data')
seg_classifier = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier.load_state_dict(torch.load(opt.seg_model))

print('loading classification network for damaged data')
cls_classifier = PointNetCls(k=len(dataset.classes))
cls_classifier.load_state_dict(torch.load(opt.cls_model))

print('loading multi-task network for damaged data')
mt_classifier = PointNetMultiTask(cls_k=len(dataset.classes),
                                  seg_k=dataset.num_seg_classes)
mt_classifier.load_state_dict(torch.load(opt.mt_model))

print('loading segmentation network for non-damaged data')
seg_classifier_all = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier_all.load_state_dict(torch.load(opt.seg_all_model))

print('loading classification network for non-damaged data')
cls_classifier_all = PointNetCls(k=len(dataset.classes))
cls_classifier_all.load_state_dict(torch.load(opt.cls_all_model))
Ejemplo n.º 5
0
def train(config):
    print('Random seed: %d' % int(config.seed))
    torch.manual_seed(config.seed)
    
    torch.backends.cudnn.benchmark = True

    dset = config.dataset
    if dset == 'modelnet10' or dset == 'modelnet40':
        dataset = ClsDataset(root=config.root, npoints=config.npoints, train=True)
        test_dataset = ClsDataset(root=config.root, npoints=config.npoints, train=False)
    else:
        raise NotImplementedError('Dataset not supported.')
    
    print('Selected %s' % dset)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.batchsize, shuffle=True, 
                num_workers=config.workers)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batchsize, shuffle=True, 
        num_workers=config.workers)

    num_classes = dataset.num_classes
    print('number of classes: %d' % num_classes)
    print('train set size: %d | test set size: %d' % (len(dataset), len(test_dataset)))
    try:
        os.makedirs(config.outf)
    except:
        pass

    blue = lambda x: '\033[94m' + x + '\033[0m'
    yellow = lambda x: '\033[93m' + x + '\033[0m'
    red = lambda x: '\033[91m' + x + '\033[0m'

    classifier = PointNetCls(k=num_classes)

    if config.model != '':
        classifier.load_state_dict(torch.load(config.model))

    optimizer = optim.SGD(classifier.parameters(), lr=config.lr, momentum=config.momentum)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    classifier.to(device)
    if config.mgpu:
        classifier = torch.nn.DataParallel(classifier, device_ids=config.gpuids)

    num_batch = len(dataset) / config.batchsize

    lera.log_hyperparams({
        'title': dset, 
        'batchsize': config.batchsize, 
        'epochs': config.nepochs, 
        'npoints': config.npoints, 
        'optimizer': 'SGD', 
        'lr': config.lr, 
        })

    for epoch in range(config.nepochs):
        train_acc_epoch, test_acc_epoch = [], []
        for i, data in enumerate(dataloader):
            points, labels = data
            points = points.transpose(2, 1)
            labels = labels[:, 0]
            points, labels = points.to(device), labels.to(device)
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            # print(pred.size(), labels.size())
            loss = F.nll_loss(pred, labels)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(labels.data).cpu().sum()
            train_acc = correct.item() / float(config.batchsize)
            print('epoch %d: %d/%d | train loss: %f | train acc: %f' % (epoch+1, i+1, num_batch+1, loss.item(), train_acc))
            train_acc_epoch.append(train_acc)
            lera.log({
                'train loss': loss.item(), 
                'train acc': train_acc
                })

            if (i+1) % 10 == 0:
                j, data = next(enumerate(test_dataloader, 0))
                points, labels = data
                points = points.transpose(2, 1)
                labels = labels[:, 0]
                points, labels = points.to(device), labels.to(device)
                classifier = classifier.eval()
                with torch.no_grad():
                    pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                loss = F.nll_loss(pred, labels)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(labels.data).cpu().sum()
                test_acc = correct.item() / float(config.batchsize)
                print(blue('epoch %d: %d/%d | test loss: %f | test acc: %f') % (epoch+1, i+1, num_batch+1, loss.item(), test_acc))
                test_acc_epoch.append(test_acc)
                lera.log({
                    'test loss': loss.item(), 
                    'test acc': test_acc
                    })
        print(yellow('epoch %d | mean train acc: %f') % (epoch+1, np.mean(train_acc_epoch)))
        print(red('epoch %d | mean test acc: %f') % (epoch+1, np.mean(test_acc_epoch)))
        lera.log({
            'train acc epoch': np.mean(train_acc_epoch), 
            'test acc epoch': np.mean(test_acc_epoch)})
        torch.save(classifier.state_dict(), '%s/%s_model_%d.pth' % (config.outf, config.dataset, epoch))
Ejemplo n.º 6
0
dataset = PartDataset(root = opt.dataset, class_choice = [opt.class_choice], classification = True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))


cudnn.benchmark = True

num_classes = len(dataset.classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass


classifier = PointNetCls(k = 2, num_points = opt.num_points)
gen = PointGen(num_points = opt.num_points, latent_size=opt.noise_vec_size)


if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

print ("MODEL SUMMMARY")
print(classifier)
print(gen)

print ("TRAINING ON CLASS: " + opt.class_choice)

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
Ejemplo n.º 7
0
test_size = int(len(dataset) - train_size)
train_data, test_data = data.random_split(dataset, [train_size, test_size])

train_loader = data.DataLoader(
    dataset=train_data,
    batch_size=BATCH_SIZE,
    shuffle=True,
)

test_loader = data.DataLoader(
    dataset=test_data,
    batch_size=BATCH_SIZE,
)

# define something about training...
mynet = PointNetCls()
optimizer = torch.optim.Adam(mynet.parameters(), lr=LR)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.8)
loss_func = torch.nn.MSELoss()

# train
myepoch = tqdm(range(1, 500))
for epoch in myepoch:
    loss_list = []
    valid_loss_list = []
    for step, (features, targets) in enumerate(train_loader):
        mynet.cuda()
        mynet.train()
        features = features.transpose(2, 1)
        features, targets = features.cuda(), targets.cuda()
Ejemplo n.º 8
0
                    type=str,
                    default='feature.h5',
                    help='feature dump file')

opt = parser.parse_args()
print(opt)

dataset = 'SHREC'
if dataset == 'partnno':
    num_classes = len(test_dataset.classes)
    test_dataset = PartDataset(
        root='shapenetcore_partanno_segmentation_benchmark_v0',
        classification=True,
        train=False,
        npoints=2500)
    classifier = PointNetCls(k=num_classes, num_points=2500)
elif dataset == 'modelnet40_pcl':
    num_classes = 40
    test_dataset = Modelnet40_PCL_Dataset(data_dir='modelnet40_ply_hdf5_2048',
                                          train=False,
                                          npoints=2048)
    classifier = PointNetCls(k=num_classes, num_points=2048)
elif dataset == 'SHREC':
    num_classes = 55
    test_dataset = Modelnet40_PCL_Dataset(data_dir='shrec2017_4096',
                                          train=False,
                                          npoints=4096)
    classifier = PointNetCls(k=num_classes, num_points=4096)
else:
    assert 0
Ejemplo n.º 9
0
                                             batch_size=opt.batchSize,
                                             shuffle=False,
                                             num_workers=int(opt.workers))

# print(len(dataset), len(test_dataset))
print(len(test_dataset))
num_classes = len(test_dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

# Declare pointnet model.
classifier = PointNetCls(k=num_classes, num_points=opt.num_points)

# Load saved checkpoint.
if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

# Copy and setup to run model on GPU.
classifier.cuda()
# Set model for eval mode.
classifier.eval()

# Stats.
correct = 0
num = 0
best_test_acc = 0.0
Ejemplo n.º 10
0
                          task='classification',
                          mode='val',
                          npoints=opt.num_points,
                          min_pts=0,
                          load_in_memory=True,
                          num_seg_class=5)
valdataloader = torch.utils.data.DataLoader(val_dataset,
                                            batch_size=opt.batchSize,
                                            shuffle=True,
                                            num_workers=int(opt.workers))

print('train: {} val: {}'.format(len(dataset), len(val_dataset)))
num_classes = len(dataset.classes)
print('classes', num_classes)

classifier = PointNetCls(k=num_classes).to(device)
start_epoch = -1

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))
    # TODO update start_epoch from pre-trained

optimizer = optim.SGD(params=filter(lambda p: p.requires_grad,
                                    classifier.parameters()),
                      lr=0.01,
                      momentum=0.9)
lambda_lr = lambda epoch: 1 / (1 + (opt.lr_decay_rate * epoch))
lr_scheduler = LambdaLR(optimizer, lr_lambda=lambda_lr, last_epoch=start_epoch)

num_batch = len(dataset) / opt.batchSize
num_val_batch = len(val_dataset) / opt.batchSize
Ejemplo n.º 11
0
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, train = False, npoints = opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass


classifier = PointNetCls(k = num_classes, num_points = opt.num_points)


if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))


optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target[:,0])
Ejemplo n.º 12
0
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default = '',  help='model path')
parser.add_argument('--num_points', type=int, default=2500, help='input batch size')


opt = parser.parse_args()
print (opt)

test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0' , train = False, classification = True,  npoints = opt.num_points)

testdataloader = DataLoader(test_dataset, batch_size=32, shuffle = True)

ctx = mx.gpu()
classifier = PointNetCls(k = len(test_dataset.classes), num_points = opt.num_points)
classifier.load_parameters(opt.model, ctx=ctx)
L_loss = loss.SoftmaxCrossEntropyLoss(from_logits=True)


for i, data in enumerate(testdataloader, 0):
    points, target = data
    points = points.transpose((0,2, 1))
    pred, _ = classifier(points.as_in_context(ctx))
    loss = L_loss(pred, target)

    pred_choice = pred.argmax(1)
    correct = (target[:,0] == pred_choice.as_in_context(mx.cpu())).sum()
    print('i:%d  loss: %f accuracy: %f' %(i, loss.mean().asscaler(), correct.asscalar()/float(32)))
Ejemplo n.º 13
0
testdataloader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=opt.batchSize,
    shuffle=True,
    num_workers=int(opt.workers))  #,collate_fn=my_collate)

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(n=opt.base_features, k=num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

device = torch.cuda.current_device()
#optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=0.9)
optimizer = optim.Adam(classifier.parameters(), lr=opt.lr)

classifier.to(device)
classifier.eval()

num_batch = len(dataset) / opt.batchSize


def prepare(data):
Ejemplo n.º 14
0
def main(args):
    in_files = args.inList
    outDir = args.outDir
    lr = args.learningRate
    loss = nn.CrossEntropyLoss()
    '''PARAM. SET'''
    # num_point = [1024]
    num_point = 1024

    num_grid = 32
    kernel_size = 3
    out_channels = 32
    batch_size = 24
    num_cls = 6
    '''MODEL LOADING'''
    model = cls = PointNetCls(k=num_cls).cuda()
    # model = ResidualSemantic3DNet().cuda()
    weights = torch.ones(num_cls).cuda()
    weights[0] = 1
    weights[1] = 1
    criterion = get_loss().cuda()

    if args.continueModel is not None:
        try:
            checkpoint = torch.load(args.continueModel)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['model_state_dict'])
            print('Use pretrain model')
        except:
            print('No existing model, starting training from scratch...')
            start_epoch = 0
    '''SET OPTIMIZER'''
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(  #
            model.parameters(),
            lr=args.learningRate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.learningRate,
                                    momentum=0.9)

    start = time.time()

    for j in range(args.multiTrain):  #EPOCH
        for file_pattern in in_files:  # FINE FILE
            for file in glob.glob(file_pattern):  #FILE BY FILE
                print("Loading file %s" % file)
                d = dataset.kNNBatchDataset(file=file,
                                            undersampling=False,
                                            shuffle=False)
                # for i in range(100):  # sequential point by point
                for i in range(d.length):  #sequential point by point

                    voxels, labels = d.getBatches_Point(batch_size=batch_size,
                                                        num_point=num_point,
                                                        num_grid=num_grid)
                    optimizer.zero_grad()
                    model = model.train()
                    pred, _, _ = model(voxels.cuda())
                    labels = labels.long().cuda()
                    labels_flt = labels.view(-1, 1)[:, 0]
                    loss = criterion(pred, labels_flt, weights)
                    loss.backward()
                    # pred_error = loss(pred, labels_flt)
                    # pred_error.backward()
                    optimizer.step()

                    if (i % 10 == 0):
                        print("Processing batch %d/%d" %
                              (d.center_idx, d.length))
                        print(labels_flt)
                        print(pred)
                        batch_label = labels.view(-1, 1)[:,
                                                         0].cpu().data.numpy()
                        pred_choice = pred.cpu().data.max(1)[1].numpy()
                        print(loss.data)
                        print(
                            precision_score(batch_label,
                                            pred_choice,
                                            average=None))
                        print(
                            recall_score(batch_label,
                                         pred_choice,
                                         average=None))
                        print(f1_score(batch_label, pred_choice, average=None))

                    # d.center_idx += batch_size

                #一個のファイルを訓練するたびに保存
                elapsed_time = time.time() - start

                savepath = outDir + '/model.pth'
                print('Saving at %f' % elapsed_time)
                state = {
                    'epoch': j,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
Ejemplo n.º 15
0
                           npoints=opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.obj_list)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(k=num_classes, num_points=opt.num_points)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

classifier = classifier.cuda()

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
# if torch.cuda.is_available():
#     classifier.cuda()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
Ejemplo n.º 16
0
Archivo: main.py Proyecto: yuchongY/ddn
def main():
    # Download dataset for point cloud classification
    modelnet_dir = 'modelnet40_ply_hdf5_2048'
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    sys.path.append(BASE_DIR)
    DATA_DIR = os.path.join(BASE_DIR, 'data')
    if not os.path.exists(DATA_DIR):
        os.mkdir(DATA_DIR)
    if not os.path.exists(os.path.join(DATA_DIR, modelnet_dir)):
        www = 'https://shapenet.cs.stanford.edu/media/' + modelnet_dir + '.zip'
        zipfile = os.path.basename(www)
        os.system('wget %s; unzip %s' % (www, zipfile))
        os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
        os.system('rm %s' % (zipfile))

    datapath = './data/' + modelnet_dir + '/'

    args = parse_args()

    if args.robust_type == 'Q':
        type_string = 'quadratic'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'PH':
        type_string = 'pseudohuber'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'H':
        type_string = 'huber'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'W':
        type_string = 'welsch'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'TQ':
        type_string = 'truncatedquadratic'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    else:
        type_string = 'max'
        outlier_string = 'outliers_' + str(args.outlier_fraction)

    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIRS'''
    experiment_dir = Path('./tests/')
    if not experiment_dir.exists():
        experiment_dir.mkdir()
    type_dir = Path(str(experiment_dir) + '/' + type_string + '/')
    if not type_dir.exists():
        type_dir.mkdir()
    outlier_dir = Path(str(type_dir) + '/' + outlier_string + '/')
    if not outlier_dir.exists():
        outlier_dir.mkdir()
    checkpoints_dir = outlier_dir
    '''LOG'''
    logger = logging.getLogger("PointNet")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(checkpoints_dir) + '/' + 'train_%s_' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')) + '.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRAINING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])

    ## Replace a fraction of the points with outliers drawn uniformly from the unit sphere
    if args.outlier_fraction > 0.0:
        # Training set
        num_outliers = int(args.outlier_fraction * train_data.shape[1])
        print('Number of training set outliers per point cloud: {}'.format(
            num_outliers))
        for i in range(
                train_data.shape[0]):  # For each point cloud in the batch
            random_indices = np.random.choice(train_data.shape[1],
                                              num_outliers,
                                              replace=False)
            for j in range(num_outliers):  # For each point in outlier subset
                random_point = 2.0 * np.random.rand(3) - 1.0
                # Ensure outliers are within unit sphere:
                while np.linalg.norm(random_point) > 1.0:
                    random_point = 2.0 * np.random.rand(3) - 1.0
                train_data[i, random_indices[
                    j], :] = random_point  # Make an outlier, uniform distribution in [-1,1]^3
        # Testing set
        num_outliers = int(args.outlier_fraction * test_data.shape[1])
        print('Number of test set outliers per point cloud: {}'.format(
            num_outliers))
        for i in range(
                test_data.shape[0]):  # For each point cloud in the batch
            random_indices = np.random.choice(test_data.shape[1],
                                              num_outliers,
                                              replace=False)
            for j in range(num_outliers):  # For each point in outlier subset
                random_point = 2.0 * np.random.rand(3) - 1.0
                # Ensure outliers are within unit sphere:
                while np.linalg.norm(random_point) > 1.0:
                    random_point = 2.0 * np.random.rand(3) - 1.0
                test_data[i, random_indices[
                    j], :] = random_point  # Make an outlier, uniform distribution in [-1,1]^3

    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)
    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)
    '''MODEL LOADING'''
    num_class = 40
    classifier = PointNetCls(num_class, args.input_transform,
                             args.feature_transform, args.robust_type,
                             args.alpha).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.evaluate:
        acc, map, _ = test(classifier, testDataLoader, do_map=True)
        logger.info('Test Accuracy: %f', acc)
        logger.info('mAP: %f', map)
        logger.info('%f,%f' % (acc, map))
        print('Test Accuracy:\n%f' % acc)
        print('mAP:\n%f' % map)
        # print('%f,%f'%(acc, map))
        return

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TRAINING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans_feat = classifier(points)
            loss = F.nll_loss(pred, target.long())
            if args.feature_transform and args.model_name == 'pointnet':
                loss += feature_transform_regularizer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = test(classifier.eval(),
                         trainDataLoader) if args.train_metric else None
        acc, map, _ = test(classifier, testDataLoader, do_map=True)

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        logger.info('Test Accuracy: %f', acc)
        logger.info('Test mAP: %f', map)
        print('\r Test %s: %f' % (blue('Accuracy'), acc))
        print('\r Test %s: %f' % (blue('mAP'), map))
        if args.train_metric:
            logger.info('%f,%f,%f' % (train_acc, acc, map))
            print('\r%f,%f,%f' % (train_acc, acc, map))
        else:
            logger.info('%f,%f' % (acc, map))
            print('\r%f,%f' % (acc, map))

        if (acc >= best_tst_accuracy):
            best_tst_accuracy = acc
        # Save every 10
        if (epoch + 1) % 10 == 0:
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1,
                            train_acc if args.train_metric else 0.0,
                            acc, map, classifier, optimizer,
                            str(checkpoints_dir), args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('Save final model...')
    save_checkpoint(global_epoch, train_acc if args.train_metric else 0.0, acc,
                    map, classifier, optimizer, str(checkpoints_dir),
                    args.model_name)
    print('Saving final model....')

    logger.info('End of training...')
Ejemplo n.º 17
0
from datasets import PartDataset
from pointnet import PointNetCls

model_path = 'cls/cls_model_9.pth'
num_points = 2500

test_dataset = PartDataset(
    root='shapenetcore_partanno_segmentation_benchmark_v0',
    train=False,
    classification=True,
    npoints=num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=128,
                                             shuffle=False)

classifier = PointNetCls(k=len(test_dataset.classes))
classifier.cuda()
classifier.load_state_dict(torch.load(model_path))
classifier.eval()

preds = []
labels = []

for i, data in enumerate(testdataloader, 0):
    with torch.no_grad():
        points, target = data
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        pred, _ = classifier(points)
        preds.append(pred.data.max(1)[1])
        labels.append(target.data)
Ejemplo n.º 18
0
opt = parser.parse_args()
print(opt)

test_dataset = PartDataset(
    root='shapenetcore_partanno_segmentation_benchmark_v0',
    train=False,
    classification=True,
    npoints=opt.num_points)

testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=32,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

classifier = PointNetCls(k=len(test_dataset.classes))
#classifier = PointNetCls(k = len(test_dataset.classes), num_points = opt.num_points)
classifier = nn.DataParallel(classifier, device_ids=[0, 1])
classifier.cuda()
classifier.load_state_dict(
    torch.load(opt.model, map_location=lambda storage, loc: storage))
classifier = classifier.module
print('model is loaded successfully!!!')
classifier.eval()

accuracy_sum = 0
for i, data in enumerate(testdataloader, 0):
    points, target = data
    points, target = Variable(points), Variable(target[:, 0])
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, train = False, npoints = opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass


classifier = PointNetCls(k = num_classes,views=opt.n_views)


if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))


optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=opt.momentum)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target[:,0])
Ejemplo n.º 20
0
                          npoints=opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(k=num_classes, num_points=opt.num_points)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))


def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    # lr = opt.lr * (0.8 ** (epoch // 20))
    lr = opt.lr * (0.8**(epoch // 50))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
        print 'learning rate = ', param_group['lr']


# optimizer = optim.SGD(classifier.parameters(), lr=0.001, momentum=0.9)