split='test', data_augmentation=False) idx = opt.idx print("model %d/%d" % (idx, len(d))) point, seg = d[idx] print(point.size(), seg.size()) point_np = point.numpy() cmap = plt.cm.get_cmap("hsv", 10) cmap = np.array([cmap(i) for i in range(10)])[:, :3] gt = cmap[seg.numpy() - 1, :] state_dict = torch.load(opt.model, map_location=torch.device('cpu')) classifier = PointNetDenseCls(k=state_dict['conv4.weight'].size()[0]) classifier.load_state_dict(state_dict) classifier.eval() point = point.transpose(1, 0).contiguous() point = Variable(point.view(1, point.size()[0], point.size()[1])) pred, _, _ = classifier(point) pred_choice = pred.data.max(2)[1] print(pred_choice) #print(pred_choice.size()) pred_color = cmap[pred_choice.numpy()[0], :] #print(pred_color.shape) showpoints(point_np, gt, pred_color)
batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) print(len(dataset), len(test_dataset)) #打印数据集和测试集的对象个数 num_classes = dataset.num_seg_classes #从misc/num_seg_classes.txt读取分割个数 print('classes', num_classes) #例如chair的分割个数是4 try: os.makedirs(opt.outf) #用于递归创建目录 except OSError: pass blue = lambda x: '\033[94m' + x + '\033[0m' #在训练时,将test设置成蓝色字底 classifier = PointNetDenseCls( k=num_classes, feature_transform=opt.feature_transform) #读取model.py的densecls函数 if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) #如果有预训练模型,加载预训练模型 optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999)) #优化器 scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) classifier.cuda() num_batch = len(dataset) / opt.batchSize #数据集分成batch的个数 for epoch in range(opt.nepoch): #执行一个epoch scheduler.step() for i, data in enumerate(dataloader, 0): #enumerate枚举
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) print(len(dataset), len(test_dataset)) num_classes = dataset.num_seg_classes print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass blue = lambda x: '\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(k=num_classes, feature_transform=opt.feature_transform) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999)) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) classifier.cuda() num_batch = len(dataset) / opt.batchSize for epoch in range(opt.nepoch): scheduler.step() for i, data in enumerate(dataloader, 0): points, target = data points = points.transpose(2, 1)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=int(opt.workers)) print(len(dataset), len(test_dataset)) try: os.makedirs(opt.outf) except OSError: pass blue = lambda x: '\033[94m' + x + '\033[0m' device = opt.device netG = PointNetDenseCls(device=device, feature_transform=opt.feature_transform) localD = LocalDiscriminator(k=2, device=device) globalD = GlobalDiscriminator(k=2, device=device) if opt.model != '': netG.load_state_dict(torch.load(opt.model)) optimizerG = optim.Adam(netG.parameters(), lr=0.001, betas=(0.9, 0.999)) optimizerD = optim.Adam(list(globalD.parameters()) + list(localD.parameters()), lr=0.0005, betas=(0.9, 0.999)) schedulerG = optim.lr_scheduler.StepLR(optimizerG, step_size=20, gamma=0.5) schedulerD = optim.lr_scheduler.StepLR(optimizerD, step_size=20, gamma=0.5) netG.to(device)
def train(lr=0.001): parser = argparse.ArgumentParser() opt = parser.parse_args() opt.nepoch = 1 opt.batchsize = 18 opt.workers = 0 opt.outf = 'completion' opt.dataset = '/home/cdi0/data/shape_net_core_uniform_samples_2048_split/' opt.feature_transform = False opt.model = '' opt.device = 'cuda:1' opt.lr = lr opt.manualSeed = random.randint(1, 10000) # fix seed print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) dataset = ShapeNetDataset(dir=opt.dataset, ) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchsize, shuffle=True, num_workers=int(opt.workers)) test_dataset = ShapeNetDataset( dir=opt.dataset, train='test', ) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=int(opt.workers)) print(len(dataset), len(test_dataset)) #try: # os.makedirs(opt.outf) #except OSError: # pass blue = lambda x: '\033[94m' + x + '\033[0m' device = opt.device netG = PointNetDenseCls(device=device, feature_transform=opt.feature_transform) localD = LocalDiscriminator(k=2, device=device) globalD = GlobalDiscriminator(k=2, device=device) if opt.model != '': netG.load_state_dict(torch.load(opt.model)) optimizerG = optim.Adam(netG.parameters(), lr=0.001, betas=(0.9, 0.999)) optimizerD = optim.Adam(list(globalD.parameters()) + list(localD.parameters()), lr=0.001, betas=(0.9, 0.999)) schedulerG = optim.lr_scheduler.StepLR(optimizerG, step_size=20, gamma=0.5) schedulerD = optim.lr_scheduler.StepLR(optimizerD, step_size=20, gamma=0.5) netG.to(device) localD.to(device) globalD.to(device) criterion = distChamfer Dcriterion = nn.BCELoss() #Dcriterion = F.nll_loss real_label = 1 fake_label = 0 num_batch = len(dataset) / opt.batchsize writer = SummaryWriter() for epoch in range(opt.nepoch): for i, data in (enumerate(dataloader, 0)): #k = 614 points, target, mask = data # Nx4 or Nx3 points = points.transpose(2, 1) # 4xN points, target = points.to(device, dtype=torch.float), target.to( device, dtype=torch.float) b_size = points.shape[0] mask_ = mask.unsqueeze(2).repeat(1, 1, 3) #print(mask_.any(dim = 2).sum(dim=1)) mask__ = ~mask_ #print(mask__.any(dim = 2).sum(dim=1)) mask__ = mask__.to(device, dtype=torch.float32) mask_ = mask_.to(device, dtype=torch.float32) optimizerD.zero_grad() localD = localD.train() globalD = globalD.train() ###### train D ###### #label_real = torch.stack((torch.zeros(b_size),torch.ones(b_size)), dim = 1).to(device, dtype = torch.long) #label_fake = torch.stack((torch.ones(b_size),torch.zeros(b_size)), dim = 1).to(device, dtype = torch.long) label = torch.full((b_size, ), real_label, device=device) #print(mask__) #print(mask__[mask__.sum(dim=2) != 0].shape) target_mask = mask__ * target target_mask = target_mask[torch.abs(target_mask).sum( dim=2) != 0].view(b_size, -1, 3) target, target_mask = target.transpose( 2, 1).contiguous(), target_mask.transpose(2, 1).contiguous() output_g = globalD(target) output_l = localD(target_mask) #rint(output_g.shape) #rint(output_l.shape) #rint(label.shape) errD_real_g = Dcriterion(output_g, label) errD_real_l = Dcriterion(output_l, label) errD_real = errD_real_g + errD_real_l errD_real.backward() target = target.transpose(2, 1).contiguous() pred = netG(points) #rint(pred.shape) ##int(target.shape) #rint(mask_.shape) #rint(mask__.shape) pred = (pred * mask__) + (target * mask_) pred_mask = pred * mask__ pred_mask = pred_mask[torch.abs(pred_mask).sum(dim=2) != 0].view( b_size, -1, 3) pred, pred_mask = pred.transpose( 2, 1).contiguous(), pred_mask.transpose(2, 1).contiguous() output_g = globalD(pred.detach()) output_l = localD(pred_mask.detach()) label.fill_(fake_label) errD_fake_g = Dcriterion(output_g, label) errD_fake_l = Dcriterion(output_l, label) errD_fake = errD_fake_g + errD_fake_l errD_fake.backward() errD = errD_real + errD_fake if errD.item() > 0.1: optimizerD.step() ###### train G ###### optimizerG.zero_grad() optimizerD.zero_grad() netG = netG.train() output_g = globalD(pred) output_l = localD(pred_mask) label.fill_(real_label) errG_g = Dcriterion(output_g, label) errG_l = Dcriterion(output_l, label) errG = errG_g + errG_l pred = pred.transpose(2, 1).contiguous() #rint(pred.shape) #rint(target.shape) dist1, dist2 = criterion(pred, target) chamferloss = (torch.mean(dist1)) + (torch.mean(dist2)) loss = chamferloss + errG loss.backward() if opt.feature_transform: loss += feature_transform_regularizer(trans_feat) * 0.001 optimizerG.step() print('[%d: %d/%d] D_loss: %f, G_loss: %f, Chamfer_loss: %f ' % (epoch, i, num_batch, errD.item(), errG.item(), chamferloss.item())) if i % 10 == 0: j, data = next(enumerate(testdataloader, 0)) points, target, mask = data points = points.transpose(2, 1) points, target = points.to( device, dtype=torch.float), target.to(device, dtype=torch.float) b_size = points.shape[0] localD = localD.eval() globalD = globalD.eval() ###### eval D ###### label = torch.full((b_size, ), real_label, device=device) #label_real = torch.stack((torch.zeros(b_size),torch.ones(b_size)), dim = 1).to(device) #label_fake = torch.stack((torch.ones(b_size),torch.zeros(b_size)), dim = 1).to(device) mask_ = mask.unsqueeze(2).repeat(1, 1, 3) mask__ = ~mask_ mask__ = mask__.to(device, dtype=torch.float32) mask_ = mask_.to(device, dtype=torch.float32) target_mask = mask__ * target target_mask = target_mask[torch.abs(target_mask).sum( dim=2) != 0].view(b_size, -1, 3) target, target_mask = target.transpose( 2, 1).contiguous(), target_mask.transpose(2, 1).contiguous() output_g = globalD(target) output_l = localD(target_mask) errD_real_g_eval = Dcriterion(output_g, label) errD_real_l_eval = Dcriterion(output_l, label) errD_real_eval = errD_real_g_eval + errD_real_l_eval target = target.transpose(2, 1).contiguous() pred = netG(points) pred = (pred * mask__) + (target * mask_) pred_mask = pred * mask__ pred_mask = pred_mask[torch.abs(pred_mask).sum( dim=2) != 0].view(b_size, -1, 3) pred, pred_mask = pred.transpose( 2, 1).contiguous(), pred_mask.transpose(2, 1).contiguous() output_g_eval = globalD(pred.detach()) output_l_eval = localD(pred_mask.detach()) label.fill_(fake_label) errD_fake_g_eval = Dcriterion(output_g, label) errD_fake_l_eval = Dcriterion(output_l, label) errD_fake_eval = errD_fake_g_eval + errD_fake_l_eval errD_eval = errD_real_eval + errD_fake_eval ###### eval G ###### netG = netG.eval() output_g = globalD(pred) output_l = localD(pred_mask) label.fill_(real_label) errG_g_eval = Dcriterion(output_g, label) errG_l_eval = Dcriterion(output_l, label) errG_eval = errG_g_eval + errG_l_eval pred = pred.transpose(2, 1).contiguous() dist1, dist2 = criterion(pred, target) chamferloss_eval = (torch.mean(dist1)) + (torch.mean(dist2)) loss_eval = chamferloss_eval + errG_eval print('[%d: %d/%d] %s D_loss: %f, G_loss: %f ' % (epoch, i, num_batch, blue('test'), errD_eval.item(), loss.item())) if i % 100 == 0: n = int(i / 100) writer.add_scalar('errD_real', errD_real.item(), 27 * epoch + n) writer.add_scalar('errD_fake', errD_fake.item(), 27 * epoch + n) writer.add_scalar('errD_loss', errD.item(), 27 * epoch + n) writer.add_scalar('validation errD_real', errD_real_eval.item(), 27 * epoch + n) writer.add_scalar('validation errD_fake', errD_fake_eval.item(), 27 * epoch + n) writer.add_scalar('validation errD_loss', errD_eval.item(), 27 * epoch + n) writer.add_scalar('errG_global', errG_g.item(), 27 * epoch + n) writer.add_scalar('errG_local', errG_l.item(), 27 * epoch + n) writer.add_scalar('chamfer_loss', chamferloss.item(), 27 * epoch + n) writer.add_scalar('errG_loss', loss.item(), 27 * epoch + n) writer.add_scalar('validation errG_global', errG_g_eval.item(), 27 * epoch + n) writer.add_scalar('validation errG_local', errG_l_eval.item(), 27 * epoch + n) writer.add_scalar('validation chamfer_loss', chamferloss_eval.item(), 27 * epoch + n) writer.add_scalar('validation errG_loss', loss_eval.item(), 27 * epoch + n) for name, param in globalD.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n) for name, param in localD.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n) for name, param in netG.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n) schedulerG.step() schedulerD.step() #torch.save(netG.state_dict(), '%s/com_model_G_%f_%d.pth' % (opt.outf, loss.item(), epoch)) #torch.save(localD.state_dict(), '%s/com_model_localD_%f_%d.pth' % (opt.outf, errD.item(), epoch)) #torch.save(globalD.state_dict(), '%s/com_model_globalD_%f_%d.pth' % (opt.outf, errD.item(), epoch)) return errD.item(), errG, chamferloss
print(opt) d = ShapeNetDataset( dir=opt.dataset, train='test', ) device = opt.device idx = opt.idx print(d.lst[idx]) print("model %d/%d" % (idx, len(d))) point, target, mask = d[idx] print(point.shape, target.shape) #point_np = point.numpy() state_dict = torch.load(opt.model, map_location='cpu') classifier = PointNetDenseCls(device=device) classifier.load_state_dict(state_dict) classifier.to(device) classifier.eval() input_cloud = PyntCloud( pd.DataFrame( # same arguments that you are passing to visualize_pcl data=point[:, :3], columns=["x", "y", "z"])) input_cloud.to_file("input.ply") target_cloud = PyntCloud( pd.DataFrame( # same arguments that you are passing to visualize_pcl data=target,
batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True) print(len(dataset), len(test_dataset)) num_classes = dataset.num_seg_classes print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass blue = lambda x: '\033[94m' + x + '\033[0m' classifier = PointNetDenseCls(k=num_classes, feature_transform=opt.feature_transform) if opt.gpu != -1: classifier = torch.nn.DataParallel(classifier).to(device) else: classifier.to(device) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999)) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) num_batch = len(dataset) / opt.batchSize
data_augmentation=False) testdataloader = torch.utils.data.DataLoader( test_dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) print(len(dataset), len(test_dataset)) num_classes = dataset.num_seg_classes print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass classifier = PointNetDenseCls(k=num_classes, feature_transform=opt.feature_transform) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999)) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) classifier.cuda() num_batch = len(dataset) / opt.batchSize def get_iou(pred_np, target_np): shape_ious = [] for shape_idx in range(target_np.shape[0]): parts = range(num_classes) # np.unique(target_np[shape_idx])
dataset = ShapeNetDataset(root=dataset_path, classification=False, class_choice=[class_choice]) test_dataset = ShapeNetDataset(root=dataset_path, classification=False, class_choice=[class_choice], split='test', data_augmentation=False) # print(len(dataset), len(test_dataset)) num_classes = dataset.num_seg_classes print('segmentation classes', num_classes) classifier = PointNetDenseCls(k=num_classes, feature_transform=False) model_path = 'trained/seg/seg_model_' + 'Chair_' + str(model_num) + '.pth' classifier.load_state_dict(torch.load(model_path)) classifier.cuda() #%% points, target = dataset[data_num] points.unsqueeze_(0) target.unsqueeze_(0) points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() classifier = classifier.eval() pred, _, _ = classifier(points) pred_choice = pred.data.max(2)[1]
train=False, test_area='Area_6') idx = 100 print("model %d/%d" % (idx, len(data))) point, seg = data[idx] print(point.size(), seg.size()) point_np = point.numpy() #forward num = 13 print('num={}'.format(num)) classifier = PointNetDenseCls(k=num) classifier.load_state_dict(torch.load(opt.model)) classifier.eval() point = point.transpose(1, 0).contiguous() point = Variable(point.view(1, point.size()[0], point.size()[1])) pred, _ = classifier(point) print(pred.size()) pred_choice = pred.data.max(2)[1] print(pred_choice.size()) print(point.size()) # print(point[0, :, 0]) point = point.squeeze() # print(point[:, 0])