コード例 #1
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    trn_ds = PartDataset(root=opt.directory, npoints=opt.num_points, classification=False, class_choice=['pipe'])
    val_ds = PartDataset(root=opt.directory, npoints=opt.num_points, classification=False, class_choice=['pipe'], train=False)
    num_classes = trn_ds.num_seg_classes

    trn_dl = DataLoader(trn_ds, batch_size=opt.batch_size, shuffle=True, num_workers=0, pin_memory=True)
    val_dl = DataLoader(val_ds, batch_size=32, shuffle=False, num_workers=0, pin_memory=True)

    data = ModelData(opt.directory, trn_dl, val_dl)

    classifier = PointNetDenseCls(num_points=opt.num_points, k=num_classes)

    learn = ConvLearner.from_model_data(classifier.cuda(), data=data)
    learn.crit = nn.CrossEntropyLoss()
    learn.metrics = [accuracy]

    learn.clip = 1e-1
    learn.fit(1.5, 1, wds=1e-4, cycle_len=20, use_clr_beta=(12, 15, 0.95, 0.85))

    preds, targs = learn.TTA()
コード例 #2
0
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(num_points=num_points, k=num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

max_acc = -1
for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()   
        optimizer.zero_grad()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1,1)[:,0] - 1
        #print(pred.size(), target.size())
コード例 #3
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    num_points = 2048
    dataset = PartDataset(root='DATA/ARLab/objects',
                          npoints=num_points,
                          classification=False,
                          class_choice=['pipe'])
    # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers))
    dataloader = DataLoader(dataset,
                            batch_size=opt.batchSize,
                            shuffle=False,
                            num_workers=int(opt.workers))

    test_dataset = PartDataset(root='DATA/ARLab/objects',
                               npoints=num_points,
                               classification=False,
                               class_choice=['pipe'])
    testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=opt.batchSize,
                                                 shuffle=True,
                                                 num_workers=int(opt.workers))

    num_classes = dataset.num_seg_classes

    blue = lambda x: '\033[94m' + x + '\033[0m'

    classifier = PointNetDenseCls(num_points=num_points, k=num_classes)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))

    optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    for epoch in range(opt.nepoch):
        for i, data in enumerate(dataloader, 0):
            points, target = data
            points, target = Variable(points), Variable(target)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            print(points.shape)
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            # print(pred.size(), target.size())
            loss = F.nll_loss(pred, target)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' %
                  (epoch, i, num_batch, loss.item(),
                   correct.item() / float(list(target.shape)[0])))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                points, target = Variable(points), Variable(target)
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                target = target.view(-1, 1)[:, 0] - 1

                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                      (epoch, i, num_batch, blue('test'), loss.item(),
                       correct.item() / float(list(target.shape)[0])))

        torch.save(classifier.state_dict(),
                   '%s/seg_model_%d.pth' % (opt.outf, epoch))
コード例 #4
0
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(k = num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2,1) 
        points, target = points.cuda(), target.cuda()   
        optimizer.zero_grad()
        classifier = classifier.train()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1,1)[:,0] - 1
        #print(pred.size(), target.size())