def emd_mixup(data1, data2, use_cuda=True): ''' Mixup two points clouds according to emd distance :param data: size[B, N, D], [B, N, D] :param plabel size[B, N, D] :return: Mixuped point clouds ''' batch_size, npoints = data1.size()[0], data1.size()[1] lam = 0.5 from emd_module import emdModule emd = emdModule() _, assignment = emd(data1, data2, 0.005, 3000) assignment = assignment.long() mixup_data = torch.zeros(batch_size, npoints, data1.size()[2]) # Vectorization assignment = assignment.unsqueeze(-1).expand(assignment.size()[0], assignment.size()[1], data2.size()[2]) data2 = torch.gather(data2, 1, assignment) mixup_data = (1 - lam) * data1 + lam * data2 return mixup_data
def emd_mixup_2obj(data, label, use_cuda=True): """ Mixup two points clouds according to emd distance :param data: size[B, N, D] :param plabel size[B, N] :return: mixup point clouds, point cloud 1. point cloud 2, label1, label2 """ data = data.cuda() batch_size, npoints = data.size()[0], data.size()[1] lam = 0.5 if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) s1, s2 = data, data[index] label1, label2 = label, label[index] emd = emdModule() _, assignment = emd(s1, s2, 0.005, 3000) assignment = assignment.long() mixup_data = torch.zeros(batch_size, npoints, s1.size()[2]) # Vectorization assignment = assignment.unsqueeze(-1).expand(assignment.size()[0], assignment.size()[1], s2.size()[2]) s2 = torch.gather(s2, 1, assignment) mixup_data = (1 - lam) * s1 + lam * s2 return mixup_data, s1, s2, label1, label2
def emd_loss(p1, p2): ''' Calculate emd distance between two points sets, where p1 is the predicted point cloud and p2 is the ground truth point cloud :param p1: size[B, N, D] :param p2: size[B, M, D] :return: average of all batches of emd distance of two point sets ''' from emd_module import emdModule emd = emdModule() dist, assignment = emd(p1, p2, 0.005, 3000) return dist.mean()
def __init__(self, model): super(FullModel, self).__init__() self.model = model self.EMD = emd.emdModule() self.CD = cd.chamferDist()
def calc_emd(output, gt, eps=0.005, iterations=50): emd_loss = emd.emdModule() dist, _ = emd_loss(output, gt, eps, iterations) emd_out = torch.sqrt(dist).mean(1) return emd_out
partial_dir = "/home/gpt/data/shapenet_data/val/" gt_dir = "/home/gpt/data/shapenet_data/complete/" vis = visdom.Visdom(port=8097, env=opt.env) # set your port def resample_pcd(pcd, n): """Drop or duplicate points so that pcd has exactly n points""" idx = np.random.permutation(pcd.shape[0]) if idx.shape[0] < n: idx = np.concatenate( [idx, np.random.randint(pcd.shape[0], size=n - pcd.shape[0])]) return pcd[idx[:n]] EMD = emd.emdModule() labels_generated_points = torch.Tensor( range(1, (opt.n_primitives + 1) * (opt.num_points // opt.n_primitives) + 1)).view(opt.num_points // opt.n_primitives, (opt.n_primitives + 1)).transpose(0, 1) labels_generated_points = (labels_generated_points) % (opt.n_primitives + 1) labels_generated_points = labels_generated_points.contiguous().view(-1) with torch.no_grad(): for i, model in enumerate(model_list): print(model) partial = torch.zeros((50, 5000, 3), device='cuda') gt = torch.zeros((50, opt.num_points, 3), device='cuda') for j in range(50): pcd = o3d.io.read_point_cloud(
def __init__(self, model): super(FullModel, self).__init__() self.model = model self.EMD = emd.emdModule()
def __init__(self, model1, model2, model3): super(FullModel, self).__init__() self.model_1 = model1 # MSN 网络模型 self.model_2 = model2 # MSN 网络模型 self.model_3 = model3 # MSN 网络模型 self.EMD = emd.emdModule()
B = points.size()[0] rand_index = torch.randperm(B).cuda() target_a = target target_b = target[rand_index] point_a = torch.zeros(B, 1024, 3) point_b = torch.zeros(B, 1024, 3) point_c = torch.zeros(B, 1024, 3) point_a = points point_b = points[rand_index] point_c = points[rand_index] point_a, point_b, point_c = point_a.to(device), point_b.to( device), point_c.to(device) remd = emd.emdModule() remd = remd.cuda() dis, ind = remd(point_a, point_b, 0.005, 300) for ass in range(B): point_c[ass, :, :] = point_c[ass, ind[ass].long(), :] int_lam = int(args.num_points * lam) int_lam = max(1, int_lam) random_point = torch.from_numpy( np.random.choice(1024, B, replace=False, p=None)) # kNN ind1 = torch.tensor(range(B)) query = point_a[ind1, random_point].view(B, 1, 3) dist = torch.sqrt( torch.sum(