num_classes = 10 print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass blue = lambda x:'\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(num_points=num_points, k=num_classes) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() num_batch = len(dataset)/opt.batchSize max_acc = -1 for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1
try: os.makedirs(opt.outf) except OSError: pass blue = lambda x: '\033[94m' + x + '\033[0m' opt.devices = map(int, opt.devices) print(opt.devices) classifier = PointNetDenseCls(k=num_classes) if opt.model != '': print("Finish Loading") classifier.load_state_dict(torch.load(opt.model)) classifier = nn.DataParallel(classifier, device_ids=opt.devices) optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=opt.momentum) classifier.cuda() num_batch = len(dataset) / opt.batchSize miou_list = list() for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred, _ = classifier(points)
if not os.path.exists('./debug1'): os.mkdir('./debug1') blue = lambda x:'\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(k = num_classes) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) classifier.cuda() num_batch = len(dataset)/opt.batchSize learning_rate = 0.01 for epoch in range(opt.nepoch): optimizer = optim.SGD(classifier.parameters(), lr=learning_rate, momentum=0.9) for i, data in enumerate(dataloader, 0): points, target, names = data # print("points shape: " ,points.shape) #(batch_size, point_num, 3(xyz)) # print("target shape: " ,target.shape) #(batch_size, point_num, label) points, target = Variable(points), Variable(target) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1 # print("pred shape: " ,pred.shape) #(batch_size*point_num, 4) # print("target shape: " ,target.shape) #(batch_size*point_num)
def main(argv=None): print('Hello! This is XXXXXX Program') num_points = 2048 dataset = PartDataset(root='DATA/ARLab/objects', npoints=num_points, classification=False, class_choice=['pipe']) # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers)) dataloader = DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers)) test_dataset = PartDataset(root='DATA/ARLab/objects', npoints=num_points, classification=False, class_choice=['pipe']) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) num_classes = dataset.num_seg_classes blue = lambda x: '\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(num_points=num_points, k=num_classes) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() num_batch = len(dataset) / opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() print(points.shape) pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 # print(pred.size(), target.size()) loss = F.nll_loss(pred, target) loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(list(target.shape)[0]))) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1, 1)[:, 0] - 1 loss = F.nll_loss(pred, target) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.data).cpu().sum() print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item() / float(list(target.shape)[0]))) torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))
num_classes = dataset.num_seg_classes print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass blue = lambda x:'\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(k = num_classes) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() num_batch = len(dataset)/opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred, _ = classifier(points) pred = pred.view(-1, num_classes) target = target.view(-1,1)[:,0] - 1
print('classes', num_classes) blue = lambda x:'\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(k = num_classes).to(device) start_epoch=-1 if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) # TODO update start_epoch from pre-trained optimizer = optim.SGD(params=filter(lambda p: p.requires_grad, classifier.parameters()), lr=0.01, momentum=0.9) lambda_lr = lambda epoch: 1 / (1 + (opt.lr_decay_rate * epoch)) lr_scheduler = LambdaLR(optimizer, lr_lambda=lambda_lr, last_epoch=start_epoch) num_batch = len(dataset)/opt.batchSize num_test_batch = len(val_dataset)/opt.batchSize n_log = 100 epochs = [] train_acc = [] train_loss = [] test_acc = [] test_loss = []