Ejemplo n.º 1
0
if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2,1) 
        points, target = points.cuda(), target.cuda()   
        optimizer.zero_grad()
        classifier = classifier.train()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1,1)[:,0] - 1
        #print(pred.size(), target.size())
        loss = F.nll_loss(pred, target)
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))
        
        if i % 10 == 0:
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            points, target = Variable(points), Variable(target)
optimizer = optim.SGD(classifier.parameters(),
                      lr=opt.lr,
                      momentum=opt.momentum)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize
miou_list = list()
for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        optimizer.zero_grad()
        classifier = classifier.train()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1, 1)[:, 0] - 1
        #print(pred.size(), target.size())
        loss = F.nll_loss(pred, target)
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        print('[%d: %d/%d] train loss: %f accuracy: %f' %
              (epoch, i, num_batch, loss.item(),
               correct.item() / float(opt.batchSize * opt.num_points)))

        if i % 100 == 0:
Ejemplo n.º 3
0
test_loss = []


plots_dir = os.path.join(model_dir, 'plots')
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(plots_dir):
    os.makedirs(plots_dir)

for epoch in range(opt.nepoch):
    train_batch_num = 0
    test_batch_num = 0

    total_train_correct = 0
    total_train_loss = 0
    classifier.train()
    for i, data in enumerate(dataloader, 0):
        optimizer.zero_grad()
        train_batch_num += 1
        points, target = data
        points, target = points.to(device, non_blocking=True), target.to(device, non_blocking=True)
        points = points.transpose(2,1)
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1,1)[:,0] - 1
        loss = F.nll_loss(pred, target.long())
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.long().data).cpu().sum()
        total_train_correct += correct.item()