optimizer.zero_grad() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1 #print(pred.size(), target.size()) loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(list(target.shape)[0]))) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(list(target.shape)[0]))) if (correct.item()/float(list(target.shape)[0]) > max_acc): max_acc = correct.item() / float(list(target.shape)[0]) torch.save(classifier.state_dict(), '%s/seg_model_%d_%.3f.pth' % (opt.outf, epoch, max_acc)) # torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.data[0], correct / float(opt.batchSize * 2500))) if i % 10 == 0: j, data = enumerate(testdataloader, 0).next() points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.data[0], correct / float(opt.batchSize * 2500))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
#loss = F.nll_loss(pred, target) # torch.autograd.Variable([1]).float() loss = F.smooth_l1_loss(pred, target) #loss = F.mse_loss(pred, target) print('Pred {}'.format(pred[0,:])) print('Target {}'.format(target[0,:])) loss.backward() optimizer.step() #pred_choice = pred.data.max(1)[1] #correct = pred_choice.eq(target.data).cpu().sum() # print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss, correct/float(opt.batchSize * 2500))) print('[%d: %d/%d] train loss: %f ' %(epoch, i, num_batch, loss)) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) #target = target.view(-1,1)[:,0] - 1 target = target.view(-1,7) #last layer should be log softmax #loss = F.nll_loss(pred, target) loss = F.smooth_l1_loss(pred, target) #pred_choice = pred.data.max(1)[1] #correct = pred_choice.eq(target.data).cpu().sum() #print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss, correct/float(opt.batchSize * 2500))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
def main(argv=None): print('Hello! This is XXXXXX Program') num_points = 2048 dataset = PartDataset(root='DATA/ARLab/objects', npoints=num_points, classification=False, class_choice=['pipe']) # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers)) dataloader = DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers)) test_dataset = PartDataset(root='DATA/ARLab/objects', npoints=num_points, classification=False, class_choice=['pipe']) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) num_classes = dataset.num_seg_classes blue = lambda x: '\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(num_points=num_points, k=num_classes) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() num_batch = len(dataset) / opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() print(points.shape) pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 # print(pred.size(), target.size()) loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(list(target.shape)[0]))) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item() / float(list(target.shape)[0]))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
optimizer.zero_grad() classifier = classifier.train() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1 #print(pred.size(), target.size()) loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500))) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() classifier = classifier.eval() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize * 2500))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
target = target.view(-1, 1)[:, 0] - 1 #print(pred.size(), target.size()) loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] #极大似然 correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.data[0], correct / float(opt.batchSize * 2500))) if i % 10 == 0: j, data = enumerate(testdataloader, 0).next() points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.data[0], correct / float(opt.batchSize * 2500))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch)) #每个epoch保存一次模型torch.save(the_model, PATH)