def forward(self, old_weights, pos, batch, normals, edge_idx_l, dense_l, stddev): # Re-weighting weights = self.stepWeights(pos, old_weights, normals, edge_idx_l, dense_l, stddev) # , f=f) # Weighted Least-Squares cov = compute_weighted_cov_matrices_dense(pos, weights, dense_l, edge_idx_l[0]) eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) normals = eig_vec[:, :, 0] # Not necessary for PCPNetDataset but might be for other datasets with underdefined neighborhoods # mask = torch.isnan(normals) # normals[mask] = 0.0 return normals, weights
def forward(self, old_weights, pos, batch, normals, edge_idx_l, dense_l, stddev): # Re-weighting weights = self.stepWeights(pos, old_weights, normals, edge_idx_l, dense_l, stddev) # , f=f) # Weighted Least-Squares cov = compute_weighted_cov_matrices_dense(pos, weights, dense_l, edge_idx_l[0]) noise = (torch.rand(100, 3) - 0.5) * 1e-8 cov = cov + torch.diag(noise).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) normals = eig_vec[:, :, 0] # For underdefined neighborhoods mask = torch.isnan(normals) normals[mask] = 0.0 return normals, weights
def forward(self, x): shape = x.shape[:-2] x = x.reshape(-1, 3, 3) if x.is_cuda: noise = torch.cuda.FloatTensor(x.shape) else: noise = torch.FloatTensor(x.shape) torch.randn(x.shape, out=noise) noise *= self.eps x = x + noise x = (x + torch.transpose(x, -1, -2)) / 2.0 eigenvals, eigenvectors = Sym3Eig.apply(x) if x.requires_grad: # eigenvals.register_hook(self.hook) # eigenvectors.register_hook(self.hook) pass eigenvals = eigenvals.reshape(*shape, 3) eigenvectors = eigenvectors.reshape(*shape, 3, 3) return eigenvals, eigenvectors
def forward(self, points): # pos (1, 240, 320, 3) pos = points.view(-1, 3) #(240*320, 3) # Compute KNN-graph indices for GNN edge_idx_l, dense_l = radius_graph(pos, 0.5, batch=None, max_num_neighbors=self.k_size) # [2, 76800*13] [76800, 13] # (PCA) cov = compute_cov_matrices_dense(pos, dense_l, edge_idx_l[0]).cuda() # 76800, 3, 3 # add noise to avoid nan noise = (torch.rand(100, 3) - 0.5) * 1e-8 # 100, 3 cov = cov + torch.diag(noise).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) # 76800, 3; 768000, 3, 3 # _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) _, argsort = eig_val.sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) # mask = torch.isnan(eig_vec) # eig_vec[mask] = 0.0 normals = eig_vec[:, :, 0].cuda() # 76800, 3 pca_normals = torch.reshape(normals, (points.shape[0], points.shape[1], points.shape[2], points.shape[3])) # (1, 240, 320, 3) pca_normals = pca_normals.permute(0, 3, 1, 2) # (1, 3, 240, 320) N = pos.size(0) # 76800 K = dense_l.size(1) # 13 E = edge_idx_l.size(1) # 76800*13 pos = pos.detach().cuda() edge_idx_l = edge_idx_l.cuda() rows, cols =edge_idx_l # 76800*13, 76800*13 cart = pos[cols] - pos[rows] # 76800*13, 3 // p_j - p_i ppf = compute_prf(pos, normals, edge_idx_l) # 998400, 4 x = torch.cat([cart, ppf], dim=-1) # 998400, 7 x = self.layer1(x) # 998400, 16 # h x = x.view(N, K, -1) # 76800, 13, 16 global_x = x.mean(1) # 76800, 16 global_x = torch.cat([global_x, normals.view(-1, 3)], dim=-1) # 76800, 19 x_g = self.layerg(global_x) # 76800, 8 # gamma x = torch.cat([x.view(E, -1), x_g[rows]], dim=1) # 998400, 16+8=24 x = self.layer2(x) # 998400, 16 # h x = x.view(N, K, -1) # 76800, 13, 16 global_x = x.mean(1) # 76800, 16 x_g = self.layerg2(global_x) # 76800, 8 # gamma x = torch.cat([x.view(E, -1), x_g[rows]], dim=1) # 998400, 16+8=24 # gamma x = self.layer3(x) # 998400, 16 # h x = x.view(N, K, -1) # 76800, 13, 16 global_x = x.mean(1) # 76800, 16 x_g = self.layerg3(global_x) # 76800, 12 # gamma quat = x_g[:, :4] # (76800,4) quat = quat / (quat.norm(p=2, dim=-1) + 1e-8).view(-1, 1) # (76800, 4) mat = QuatToMat.apply(quat).view(-1, 3, 3) # 76800, 3, 3 # Kernel application x_g = x_g[:, 4:] # (76800, 8) rot_cart = torch.matmul(mat.view(-1, 3, 3)[rows], cart.view(-1, 3, 1)).view(-1, 3) # (998400, 3) x = torch.cat([x.view(E, -1), x_g[rows], rot_cart], dim=1) # 998400, 16+8+3=27 x = self.layer4(x) # 998400, 1 # phi x = x.view(N, K) # 76800, 13 # point_fea = self.layer5(x) # 76800, 3 # softmax point_fea = F.softmax(x, 1) # 76800, 13 point_fea = point_fea.view(points.shape[0], points.shape[1], points.shape[2], -1) # (1, 240, 320, 13) point_fea = point_fea.permute(0, 3, 1, 2) # # (1, 13, 240, 320) return pca_normals, point_fea
def test(loader, string, epoch, size): model.eval() num = 0 error_wo_amb = [0.0 for _ in range(FLAGS.iterations + 1)] error_wo_amb5000 = [0.0 for _ in range(FLAGS.iterations + 1)] for i, data in enumerate(loader): pos, batch = data.pos, data.batch # Compute statistics for normalization edge_idx_16, _ = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=16) row16, col16 = edge_idx_16 cart16 = (pos[col16].cuda() - pos[row16].cuda()) stddev = torch.sqrt((cart16**2).mean()).detach() # Compute KNN-graph indices for GNN edge_idx_l, dense_l = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=size) # Iteration 0 (PCA) cov = compute_cov_matrices_dense(pos, dense_l, edge_idx_l[0]).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) # mask = torch.isnan(eig_vec) # eig_vec[mask] = 0.0 normals = eig_vec[:, :, 0] edge_idx_c = edge_idx_l.cuda() pos, batch = pos.detach().cuda(), batch.detach().cuda() old_weights = torch.ones_like(edge_idx_c[0]).float() / float(size) old_weights = old_weights # .view(-1, 1).expand(-1, 3) # Compute error iteration 0 (PCA), # Indices of 5000 point subset stored in data.y (benchmark subset from PCPNet dataset/paper) normal_gt = data.x[:, 0:3] abs_dot5000 = torch.abs( (normals[data.y].cpu() * normal_gt[data.y]).sum(-1)) abs_dot5000 = torch.clamp(abs_dot5000, min=0.0, max=1.0) error_new_amb5000 = torch.sqrt( (torch.acos(abs_dot5000)**2).mean()).detach().item() * 180 / np.pi error_wo_amb5000[0] += error_new_amb5000 abs_dot5000 = 0 # Loop of Algorithm 1 in the paper for j in range(FLAGS.iterations): normals, old_weights = model(old_weights.detach(), pos, batch, normals.detach(), edge_idx_c, edge_idx_c[1].view(pos.size(0), -1), stddev) # Compute error iteration j ''' # Test error all points abs_dot = torch.abs((normals.cpu() * normal_gt).sum(-1)) abs_dot = torch.clamp(abs_dot, min=0.0, max=1.0) error_new_amb = torch.sqrt((torch.acos(abs_dot) ** 2).mean()).detach().item() * 180 / np.pi error_wo_amb[j] += error_new_amb abs_dot = 0 ''' # Indices of 5000 point subset stored in data.y (benchmark subset from PCPNet dataset/paper) abs_dot5000 = torch.abs( (normals[data.y].cpu() * normal_gt[data.y]).sum(-1)) abs_dot5000 = torch.clamp(abs_dot5000, min=0.0, max=1.0) error_new_amb5000 = torch.sqrt( (torch.acos(abs_dot5000)** 2).mean()).detach().item() * 180 / np.pi error_wo_amb5000[j + 1] += error_new_amb5000 abs_dot5000 = 0 normals = normals.detach() old_weights = old_weights.detach() num += 1 error_wo_amb5000 = [x / num for x in error_wo_amb5000] str = 'Epoch: {:02d}, Unoriented Test E 5000: '.format(epoch) for i, error in enumerate(error_wo_amb5000): str += '{}: {:.4f}, '.format(i, error) print(str) error_wo_amb5000 = np.array([x for x in error_wo_amb5000]) return error_wo_amb5000
def train(epoch, size): model.train() if epoch == 151: for param_group in optimizer.param_groups: param_group['lr'] = 0.0005 loss_sum = [0.0 for _ in range(FLAGS.iterations)] loss_count = 0 for i, data in enumerate(train_loader): pos, batch = data.pos, data.batch # Compute global statistics for normalization edge_idx_16, _ = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=16) row16, col16 = edge_idx_16 cart16 = (pos[col16].cuda() - pos[row16].cuda()) stddev = torch.sqrt((cart16**2).mean()).detach() # Compute KNN-graph indices for GNN edge_idx_l, dense_l = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=size) # Iteration 0 (PCA) cov = compute_cov_matrices_dense(pos, dense_l, edge_idx_l[0]).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) # mask = torch.isnan(eig_vec) # eig_vec[mask] = 0.0 normals = eig_vec[:, :, 0].cuda() pos, batch = pos.detach().cuda(), batch.detach().cuda() edge_idx_c = edge_idx_l.cuda() old_weights = torch.ones_like(edge_idx_l[0]).float() / float(size) old_weights = old_weights.cuda() normal_gt = data.x[:, 0:3].cuda() # Loop of Algorithm 1 in the paper for j in range(FLAGS.iterations): optimizer.zero_grad() normals, old_weights = model(old_weights.detach(), pos, batch, normals.detach(), edge_idx_c, edge_idx_c[1].view(pos.size(0), -1), stddev) # Compute loss iteration j and optimize loss_orientation = torch.min( torch.sqrt(((normal_gt - normals)**2).sum(-1)), torch.sqrt(((normal_gt + normals)**2).sum(-1))).mean() loss_orientation.backward() loss_sum[j] += loss_orientation.detach().item() num_nan = 0 for p in model.parameters(): num_nan += torch.isnan(p.grad).sum() p.grad[torch.isnan(p.grad)] = 0.0 if num_nan > 0: print('NUM_NAN:', num_nan) optimizer.step() loss_count += 1 str = 'Epoch {}, Losses: '.format(epoch) for loss in loss_sum: str += '{:.7f}, '.format(loss / loss_count) print(str)
def test(loader, string, test_set, size): model.eval() print('Starting eval: {}, k_test = {}, Iterations: {} '.format(string, size, FLAGS.iterations)) num = 0 error_wo_amb5000 = [0.0 for _ in range(FLAGS.iterations+1)] with torch.no_grad(): for i, data in enumerate(loader): pos, batch = data.pos, data.batch # Compute statistics for normalization edge_idx_16, _ = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=16) row16, col16 = edge_idx_16 cart16 = (pos[col16].cuda() - pos[row16].cuda()) stddev = torch.sqrt((cart16 ** 2).mean()).detach().item() # Compute KNN-graph indices for GNN edge_idx_l, dense_l = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=size) # Iteration 0 (PCA) cov = compute_cov_matrices_dense(pos, dense_l, edge_idx_l[0]).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather(2, argsort.view(-1, 1, 3).expand_as(eig_vec)) # mask = torch.isnan(eig_vec) # eig_vec[mask] = 0.0 normals = eig_vec[:, :, 0] edge_idx_c = edge_idx_l.cuda() pos, batch = pos.detach().cuda(), batch.detach().cuda() old_weights = torch.ones_like(edge_idx_c[0]).float() / float(size) # Compute error iteration 0 (PCA), # Indices of 5000 point subset stored in data.y (benchmark subset from PCPNet dataset/paper) normal_gt = data.x[:, 0:3] abs_dot5000 = torch.abs((normals[data.y].cpu() * normal_gt[data.y]).sum(-1)) abs_dot5000 = torch.clamp(abs_dot5000, min=0.0, max=1.0) error_new_amb5000 = torch.sqrt((torch.acos(abs_dot5000) ** 2).mean()).detach().item() * 180 / np.pi error_wo_amb5000[0] += error_new_amb5000 abs_dot5000 = 0 # Loop of Algorithm 1 in the paper for j in range(FLAGS.iterations): normals, old_weights = model(old_weights.detach(), pos, batch, normals.detach(), edge_idx_c, edge_idx_c[1].view(pos.size(0), -1), stddev) # Compute error iteration j, # Indices of 5000 point subset stored in data.y (benchmark subset from PCPNet dataset/paper) abs_dot5000 = torch.abs((normals[data.y].cpu() * normal_gt[data.y]).sum(-1)) abs_dot5000 = torch.clamp(abs_dot5000, min=0.0, max=1.0) error_new_amb5000 = torch.sqrt((torch.acos(abs_dot5000) ** 2).mean()).detach().item() * 180 / np.pi error_wo_amb5000[j + 1] += error_new_amb5000 abs_dot5000 = 0 normals = normals.detach() old_weights = old_weights.detach() num += 1 if (i+1) % 5 == 0: print('{}/{} point clouds done'.format(i+1, len(loader))) if FLAGS.results_path is not None: save_normals(normals, test_set, i) error_wo_amb5000 = [x / num for x in error_wo_amb5000] print('{} Unoriented Normal Angle RMSE: PCA (0 Iterations): {:.4f}, {} Iterations: {:.4f}'.format( string, error_wo_amb5000[0], FLAGS.iterations, error_wo_amb5000[-1])) error_wo_amb5000 = np.array([x for x in error_wo_amb5000]) return error_wo_amb5000
from torch_sym3eig import Sym3Eig import sys num_matrices = 100000 if len(sys.argv) > 1: num_matrices = int(sys.argv[1]) matrices = np.random.rand(num_matrices, 3, 3) matrices = matrices + matrices.swapaxes(1, 2) matrices = torch.from_numpy(matrices) matrices.requires_grad_() print('Computing Forward and Backward for {} matrices'.format(num_matrices)) starttime = time.process_time() eig_val, eig_vec = Sym3Eig.apply(matrices) runtime = (time.process_time() - starttime) * 1000.0 print('Forward CPU: {} ms'.format(runtime)) starttime = time.process_time() grad = torch.cat([eig_val.view(-1, 3, 1), eig_vec], dim=2).sum().backward() runtime = (time.process_time() - starttime) * 1000.0 print('Backward CPU: {} ms'.format(runtime)) if torch.cuda.is_available(): matrices = np.random.rand(num_matrices, 3, 3) matrices = matrices + matrices.swapaxes(1, 2) matrices = torch.from_numpy(matrices).cuda() matrices.requires_grad_() starttime = time.process_time()
def test(loader, size): global R, t model.eval() with torch.no_grad(): for i, data in enumerate(loader): original_pos = data.pos data = transform(data) pos, batch = data.pos, data.batch edge_idx_16, _ = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=16) row16, col16 = edge_idx_16 cart16 = (pos[col16].cuda() - pos[row16].cuda()) stddev = torch.sqrt((cart16**2).mean()).detach() # split in 4 pos1 = pos.view(480, 640, 3)[:260, :340].float() mask1 = torch.zeros_like(pos1[:, :, 0]) mask1[:240, :320] = 1.0 pos2 = pos.view(480, 640, 3)[:260, 300:].float() mask2 = torch.zeros_like(pos2[:, :, 0]) mask2[:240, 20:] = 1.0 pos3 = pos.view(480, 640, 3)[220:, :340].float() mask3 = torch.zeros_like(pos3[:, :, 0]) mask3[20:, :320] = 1.0 pos4 = pos.view(480, 640, 3)[220:, 300:].float() mask4 = torch.zeros_like(pos4[:, :, 0]) mask4[20:, 20:] = 1.0 batch = torch.zeros_like(pos1[:, :, 0]) examples = [(pos1.contiguous().view(-1, 3), mask1, batch.view(-1)), (pos2.contiguous().view(-1, 3), mask2, batch.view(-1)), (pos3.contiguous().view(-1, 3), mask3, batch.view(-1)), (pos4.contiguous().view(-1, 3), mask4, batch.view(-1))] normals_list = [] for (pos, mask_part, batch) in examples: # print(pos.size(), batch.size(), mask_part.size()) edge_idx_l, dense_l = radius_graph(pos, 0.5, batch=batch, max_num_neighbors=size) cov = compute_cov_matrices_dense(pos, dense_l, edge_idx_l[0]).cuda() eig_val, eig_vec = Sym3Eig.apply(cov) _, argsort = torch.abs(eig_val).sort(dim=-1, descending=False) eig_vec = eig_vec.gather( 2, argsort.view(-1, 1, 3).expand_as(eig_vec)) mask = torch.isnan(eig_vec) eig_vec[mask] = 0.0 # For underdefined neighborhoods normals = eig_vec[:, :, 0] edge_idx_c = edge_idx_l.cuda() pos, batch = pos.detach().cuda(), batch.detach().cuda() old_weights = torch.ones_like( edge_idx_c[0]).float() / float(size) old_weights = old_weights # .view(-1, 1).expand(-1, 3) # Loop of Algorithm 1 in the paper for j in range(FLAGS.iterations): normals, old_weights = model( old_weights.detach(), pos, batch, normals.detach(), edge_idx_c, edge_idx_c[1].view(pos.size(0), -1), stddev) normals = normals.detach() old_weights = old_weights.detach() mask_part = (mask_part == 1.0).view(-1) normals = normals[mask_part] normals = normals.view(240, 320, 3) normals_list.append(normals) row1 = torch.cat([normals_list[0], normals_list[1]], dim=1) row2 = torch.cat([normals_list[2], normals_list[3]], dim=1) result = torch.cat([row1, row2], dim=0).contiguous() pos = original_pos.float() # Coord Transform rot = torch.matmul(R.view(1, 3, 3), pos.view(-1, 3, 1)).view(-1, 3) pos = rot + t.view(1, 3) # flip to camera pos_dir = (torch.zeros_like(pos) - pos).cuda() sign = torch.sign((result * pos_dir.view(480, 640, 3)).sum(-1)) sign[sign == 0.0] = 1.0 result = result * sign.view(480, 640, 1) row, col = edge_idx_16 # print(col.view(-1,16).size()) sign_dist_to_neighbors = ( result.view(-1, 3)[col].view(-1, 17, 3)[:, 1:, :] * result.view(-1, 1, 3)).sum(-1).mean(-1) # flip_mask = torch.stack([(sign_dist_to_neighbors<-0.6), (torch.abs((result.view(-1,3)*pos_dir).sum(-1)) < 0.3)], dim=1) # flip_mask = -flip_mask.all(-1).float() flip_mask = -(sign_dist_to_neighbors < 0.0).float() flip_mask[flip_mask == 0] = 1.0 result = result * flip_mask.view(480, 640, 1) # result[:,2] = -result[:,2] ''' # pc pcd = open3d.PointCloud() pos = pos.view(-1, 3).cpu().numpy() result = result.view(-1,3).cpu().numpy() pcd.points = open3d.Vector3dVector(pos) #pcd.normals = open3d.Vector3dVector(result) result = normals_to_rgb(result) pcd.colors = open3d.Vector3dVector(result) mesh_frame = open3d.create_mesh_coordinate_frame(size=0.6, origin=[0, 0, 0]) open3d.draw_geometries([pcd, mesh_frame]) ''' # image result = (result + 1.0) * 0.5 result = result.cpu().numpy() result = np.clip(result, a_min=0.0, a_max=1.0) image = Image.fromarray(np.uint8(result * 255)) image.save(FLAGS.results_path + '/ex{}.png'.format(i)) print('Image stored')