def forward(self, a, b): pcsize = a.size()[-1] if pcsize != self.n_samples: indices = np.arange(pcsize) np.random.shuffle(indices) indices = indices[:self.n_samples] a = a[:, :, indices] b = b[:, :, indices] a_points = torch.transpose(a, 1, 2)[:, :, 0:3] b_points = torch.transpose(b, 1, 2)[:, :, 0:3] pd = Ops.batch_pairwise_dist(a_points, b_points) #mma = torch.stack([a_points]*self.n_samples, dim=1) #mmb = torch.stack([b_points]*self.n_samples, dim=1).transpose(1,2) d = pd a_normals = torch.transpose(a, 1, 2)[:, :, 3:6] b_normals = torch.transpose(b, 1, 2)[:, :, 3:6] mma = torch.stack([a_normals] * self.n_samples, dim=1) mmb = torch.stack([b_normals] * self.n_samples, dim=1).transpose(1, 2) d_norm = 1 - torch.sum(mma * mmb, 3).squeeze() d += self.normal_weight * d_norm normal_min_mean = torch.min(d_norm, dim=2)[0].mean() self.nlogger.update(normal_min_mean) chamfer_sym = torch.min(d, dim=2)[0].sum() + torch.min(d, dim=1)[0].sum() chamfer_sym /= a.size()[0] return chamfer_sym
def chamfer_batch(self, a, b): pcsize = a.size()[-1] if pcsize != self.n_samples: indices = np.arange(pcsize).astype(int) np.random.shuffle(indices) indices = torch.from_numpy(indices[:self.n_samples]).cuda() a = a[:, :, indices] b = b[:, :, indices] a = torch.transpose(a, 1, 2).contiguous() b = torch.transpose(b, 1, 2).contiguous() if self.cuda_opt: d1, d2 = self.dist(a, b) out = torch.sum(d1) + torch.sum(d2) return out else: d = Ops.batch_pairwise_dist(a, b) return torch.min(d, dim=2)[0].sum() + torch.min(d, dim=1)[0].sum()
def evaluate(self): n_batches = 0 all_correct_points = 0 miou = 0 total_error_class = np.zeros((13, 2)) total_count_class = np.zeros(13) in_data = None out_data = None target = None n_iter = 0.0 total_d = 0.0 self.model.eval() for i, data in enumerate(self.val_loader, 0): if data[1].size()[0] != self.model.batch_size: continue in_data = Variable(data[0].cuda()) target = Variable(data[1]).cuda() class_id = data[2][0] out_data = self.model(in_data) pd = Ops.batch_pairwise_dist(out_data.transpose(1,2), target.transpose(1,2)) pd = torch.sqrt(pd) total_error_class[class_id, 0] += torch.min(pd, dim=2)[0].data.cpu().numpy().mean() total_error_class[class_id, 1] += torch.min(pd, dim=1)[0].data.cpu().numpy().mean() total_count_class[class_id] += 1.0 scalar_group = {} #Iterate over classes for c in xrange(13): if total_count_class[c] > 0.0: scalar_group['class{}_error_pred'.format(c)] = total_error_class[c, 0]/total_count_class[c] scalar_group['class{}_error_gt'.format(c)] = total_error_class[c, 1]/total_count_class[c] np.save('total_error_class.npy', total_error_class) np.save('total_count_class.npy', total_count_class) self.writer.add_scalars('class_errors', scalar_group, i) n_iter += self.model.batch_size #Save some point clouds for visualization if i < 50: results_dir = os.path.join("eval", self.model.name) if not os.path.exists(results_dir): os.makedirs(results_dir) write_image_pc(os.path.join(results_dir, "out_{}".format(str(2*i).zfill(4))), (data[3][0, :, :, :], out_data[0, :, :].data.cpu())) save_torch_pc(os.path.join(results_dir, "out_{}.obj".format(str(2*i+1).zfill(4))), target) print "Test PC saved." #Saves results np.save('total_error_class.npy', total_error_class) np.save('total_count_class.npy', total_count_class) print total_d/n_iter