예제 #1
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)
        if points1 is not None:
            # gated fusion
            concated_fea = tf.concat(axis=2, values=[interpolated_points, points1]) # B, ndataset1, nchannel1 + nchannel2
            concated_fea = tf.nn.relu(concated_fea)
            concated_fea = tf.expand_dims(concated_fea, 2)
            concated_fea = tf_util.conv2d(concated_fea, 1, [1, 1],
                                         padding='VALID', stride=[1, 1],
                                         bn=bn, is_training=is_training,
                                         scope='concat_conv', bn_decay=bn_decay)
            concated_fea = tf.squeeze(concated_fea, 2)
            fusion_weights = tf.sigmoid(concated_fea)
            interp_fusion_weights = tf.ones_like(fusion_weights) - fusion_weights
            new_points1 = tf.concat(axis=2, values=[tf.multiply(interpolated_points, interp_fusion_weights), tf.multiply(points1, fusion_weights)])
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
예제 #2
0
 def three_nearest_interpolation(xyz_query,
                                 xyz_support,
                                 features_support,
                                 k_interpolation=3):
     """need custom CUDA ops to support (three_nn(), three_interpolate())
     ----------
     xyz_query : Tensor
         (B, N, 3) tensor of the xyz positions of the unknown points
     xyz_support : Tensor
         (B, M, 3) tensor of the xyz positions of the known points (i.e. B PC examples, each is mx3 shape)
     features_support : Tensor
         (B, M, C) tensor of features to be propagated (i.e. B PC examples, each is mx3 shape)
     k_interpolation:
         the number of neighbors used for interpolation
     Returns
     -------
     new_features : torch.Tensor
         (B,N,C) tensor of the features of the weakly points' features(i.e., n weakly points' new features)
     """
     if xyz_support is not None:
         dist, idx = three_nn(xyz_query, xyz_support)  # (B,N,3), (B,N,3)
         dist_recip = 1.0 / (dist + 1e-8)  # (B,N,3)
         norm = tf.reduce_sum(dist_recip, axis=2, keepdims=True)  # (B,N,1)
         weight = dist_recip / norm  # (B,N,3)
         interpolated_feats = three_interpolate(features_support, idx,
                                                weight)  # (B,N,C)
     else:
         raise ValueError('make sure the known parameters are valid')
     return interpolated_feats  # (B,N,C)
def deconv(pts, fts, qrs, qrs_fts, C, tag, is_training):
    """
    pts: points of previous layer, e.g.,(B, N/2, 3)
    fts: point features of previous layer, e.g.,(B, N/2, C)
    qrs: selected representative points of this layer, e.g.,(B, N, 3)
    qrs_fts: e.g., (B, N, C)
    """
    dist, idx = three_nn(qrs, pts)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm
    interpolated_fts = three_interpolate(fts, idx, weight)  # (B, N, C)
    if qrs_fts is not None:
        interpolated_fts = tf.concat([interpolated_fts, qrs_fts], axis=2)

    interpolated_fts = tf.expand_dims(interpolated_fts, 2)
    new_features = pf.conv2d(interpolated_fts,
                             C,
                             tag,
                             is_training=is_training,
                             kernel_size=[1, 1])
    new_features = tf.squeeze(new_features, [2])

    return new_features
예제 #4
0
def texture_geodesic_tconv(xyz1,
                           xyz2,
                           points1,
                           points2,
                           mlp,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True):
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #5
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
예제 #6
0
    def call(self, xyz1, xyz2, points1, points2):
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        # 将第二维压缩求和
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        # 根据距离求得点的权重
        weight = (1.0 / dist) / norm

        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)

        for i, num_out_channel in enumerate(self.mlp):
            new_points1 = self.conv2d(new_points1,
                                      num_out_channel, [1, 1],
                                      i,
                                      padding='VALID',
                                      stride=[1, 1])
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #7
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, scope, bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = Conv2D("conv_%d" % i, new_points1, num_out_channel, [1, 1], padding='VALID',
                                 activation=BNReLU if bn else tf.nn.relu)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #8
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight) #point2向着point1的数目插值

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):# 特征插值后重新计算特征
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
예제 #9
0
def interpolate_points(dense_xyz, sparse_xyz, sparse_point):
    dist, idx = three_nn(dense_xyz, sparse_xyz)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm
    interpolated_points = three_interpolate(sparse_point, idx, weight)
    return interpolated_points
예제 #10
0
def PointASNLDecodingLayer(xyz1, xyz2, points1, points2, nsample, mlp, is_training, bn_decay, weight_decay, scope, bn=True, use_xyz = True,use_knn=True, radius=None, dilate_rate=1, mode='concat', NL=False):
    ''' Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        batch_size, num_points, num_channel = points2.get_shape()
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keepdims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm

        '''Point NonLocal Cell'''
        if NL:
            new_nonlocal_point = PointNonLocalCell(points1, tf.expand_dims(points2, axis=1), [max(32,num_channel),num_channel],
                                                       is_training, bn_decay, weight_decay, scope, bn, mode=mode)
            new_nonlocal_point = tf.squeeze(new_nonlocal_point, [1])  # (batch_size, npoints, mlp2[-1])
            points2 = tf.add(points2, new_nonlocal_point)

        interpolated_points = three_interpolate(points2, idx, weight)

        '''Point Local Cell'''
        grouped_xyz, grouped_feature, idx = grouping(interpolated_points, nsample, xyz1, xyz1, use_xyz=use_xyz,use_knn=use_knn, radius=radius)
        grouped_xyz -= tf.tile(tf.expand_dims(xyz1, 2), [1, 1, nsample, 1])  # translation normalization

        weight = weight_net_hidden(grouped_xyz, [32], scope = 'decode_weight_net', is_training=is_training, bn_decay = bn_decay, weight_decay = weight_decay)

        new_points = grouped_feature
        new_points = tf.transpose(new_points, [0, 1, 3, 2])

        new_points = tf.matmul(new_points, weight)

        new_points = tf_util.conv2d(new_points, mlp[0], [1,new_points.get_shape()[2].value],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='decode_after_conv', bn_decay=bn_decay, weight_decay = weight_decay)

        if points1 is not None:
            new_points1 = tf.concat(axis=-1, values=[new_points, tf.expand_dims(points1, axis = 2)]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points

        for i, num_out_channel in enumerate(mlp):
            if i != 0:
                new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                            padding='VALID', stride=[1,1],
                                            bn=bn, is_training=is_training,
                                            scope='conv_%d'%(i), bn_decay=bn_decay, weight_decay = weight_decay)
        new_points = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]

        return new_points
예제 #11
0
def three_nn_upsampling(target_points, source_points):

    dist, idx = three_nn(target_points, source_points)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm

    return idx, weight
def point_upsmaple(xyz1, xyz2, points2, scope):
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)
        return interpolated_points 
예제 #13
0
 def test_grad(self):
   with self.test_session():
     points = tf.constant(np.random.random((1,8,16)).astype('float32'))
     xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32'))
     xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32'))
     dist, idx = three_nn(xyz1, xyz2)
     weight = tf.ones_like(dist)/3.0
     interpolated_points = three_interpolate(points, idx, weight)
     err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16))
     self.assertLess(err, 1e-4) 
예제 #14
0
def three_nearest_interpolation(xyz_query,
                                xyz_support,
                                features_support,
                                batch_inds,
                                k_interpolation=3):
    """need custom CUDA ops to support (three_nn(), three_interpolate())
	----------
	weakly_points : torch.Tensor
		(n, 3) tensor of the xyz positions of the unknown points
	xyz_support : torch.Tensor
		(B, m, 3) tensor of the xyz positions of the known points (i.e. B PC examples, each is mx3 shape)
	features_support : torch.Tensor
		(B, m, C) tensor of features to be propagated (i.e. B PC examples, each is mx3 shape)
	batch_inds: torch.Tensor
		(n,) tensor of the batch indices to denote which batch for the weakly_points, values are 0 to B-1
	k_interpolation:
		the number of neighbors used for interpolation

	Returns
	-------
	new_features : torch.Tensor
		(n, C2, 1) tensor of the features of the weakly points' features(i.e., n weakly points' new features)
	"""

    # HACK: query features for each weak pt,here treat unknow(n,3) tensor as n sets(i.e., n batches where each batch has 1 pt) such that the MaskedUpSampled code can be used.
    xyz_query = tf.reshape(xyz_query,
                           (tf.shape(xyz_query)[0], 1, -1))  # (n,1,3)

    # points_current = points_current[batch_inds,...]  # BUG: CUDA error: an illegal memory access was encountered when use a tensor
    xyz_support = tf.gather(
        xyz_support, batch_inds, axis=0
    )  # (B,m,3) --> (n,m,3) as each weak point might come from different batch

    # features_current = features_current[batch_inds,...]
    features_support = tf.gather(
        features_support, batch_inds,
        axis=0)  # (B,m,C) --> (n,m,C), e.g., (n, 10240,32)

    if xyz_support is not None:
        # query nearest 3 neighbors for each weak point
        dist, idx = three_nn(xyz_query, xyz_support)  # (n,1,3), (n,1,3)
        dist_recip = 1.0 / (dist + 1e-8)  # (n,1,3)
        norm = tf.reduce_sum(dist_recip, axis=2, keepdims=True)  # (n,1,1)
        weight = dist_recip / norm  # (n,1,3)

        interpolated_feats = three_interpolate(features_support, idx,
                                               weight)  # (n,1,C)
    else:
        raise ValueError('make sure the known parameters are valid')

    return interpolated_feats  # (n,1,C)
예제 #15
0
def fun(xyz1, xyz2, pts2):
    with tf.device("/cpu:0"):
        points = tf.constant(np.expand_dims(pts2, 0))
        xyz1 = tf.constant(np.expand_dims(xyz1, 0))
        xyz2 = tf.constant(np.expand_dims(xyz2, 0))
        dist, idx = three_nn(xyz1, xyz2)
        # weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        print(norm)
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session("") as sess:
        tmp, pts1, d, w = sess.run([xyz1, interpolated_points, dist, weight])
        pts1 = pts1.squeeze()
    return pts1
예제 #16
0
def three_nn_upsampling(target_points, source_points):
    '''
	Input:
		target_points: (batch_size, num_tpoints, 3)
		source_points: (batch_size, num_spoints, 3)
	Returns:
		idx: (batch_size, num_tpoints, 3)
		weight: (batch_size, num_tpoints, 3)
	'''

    dist, idx = three_nn(target_points, source_points)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm

    return idx, weight
예제 #17
0
 def test(self):
     np.random.seed(100)
     pts = np.random.random((32, 128, 64)).astype("float32")
     tmp1 = np.random.random((32, 512, 3)).astype("float32")
     tmp2 = np.random.random((32, 128, 3)).astype("float32")
     with tf.device("/cpu:0"):
         points = tf.constant(pts)
         xyz1 = tf.constant(tmp1)
         xyz2 = tf.constant(tmp2)
         dist, idx = three_nn(xyz1, xyz2)
         weight = tf.ones_like(dist) / 3.0
         interpolated_points = three_interpolate(points, idx, weight)
     with tf.compat.v1.Session("") as sess:
         now = time.time()
         for _ in range(100):
             ret = sess.run(interpolated_points)
         print(time.time() - now)
         print(ret.shape, ret.dtype)
예제 #18
0
 def test_grad(self):
     with self.test_session():
         features = tf.constant(
             np.random.random(
                 (1, 8, 16)).astype('float32'))  # features, (1,8,16)
         print(features)
         xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
         xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
         dist, idx = three_nn(xyz1, xyz2)  # (1,128,3), (1,128,3)
         weight = tf.ones_like(dist) / 3.0  # (1,128,3)
         interpolated_features = three_interpolate(features, idx,
                                                   weight)  # (1,128,16)
         print(interpolated_features)
         err = tf.test.compute_gradient_error(features, (1, 8, 16),
                                              interpolated_features,
                                              (1, 128, 16))
         print(err)
         self.assertLess(err, 1e-4)
예제 #19
0
def fun(xyz1,xyz2,pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2,0))
        xyz1 = tf.constant(np.expand_dims(xyz1,0))
        xyz2 = tf.constant(np.expand_dims(xyz2,0))
        dist, idx = three_nn(xyz1, xyz2)
        #weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm, [1,1,3])
        print(norm)
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1
예제 #20
0
def pointnet_upsample(xyz1, xyz2, points2, scope):
    """ PointNet Feature Propogation (FP) Module
            Input:
                xyz1: (batch_size, ndataset1, 3) TF tensor
                xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
                points2: (batch_size, ndataset2, nchannel2) TF tensor
            Return:
                new_points: (batch_size, ndataset1, nchannel2) TF tensor
    """
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(
            points2, idx, weight)  # B x ndataset1 x nchannel2

        return interpolated_points
예제 #21
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       last_mlp_activation=True,
                       scope='fp'):
    ''' PointNet Feature Propogation (FP) Module
        Input:
            xyz1:       (batch_size, ndataset1, 3) TF tensor
            xyz2:       (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1:    (batch_size, ndataset1, nchannel1) TF tensor
            points2:    (batch_size, ndataset2, nchannel2) TF tensor
            mlp:        list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            if i == len(mlp) - 1 and not (last_mlp_activation):
                activation_fn = None
            else:
                activation_fn = tf.nn.relu
            new_points1 = conv2d(inputs=new_points1,
                                 filters=num_out_channel,
                                 name='mlp_%d' % (i + 1))

        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #22
0
def pc_upsampling(xyz_upsample, xyz, feat, scope='upsampling'):
    """ Fully connected layer with non-linear operation.
  
  Args:
    xyz_upsample: 3-D tensor B x N2 x 3
    xyz: 3-D tensor B x N x 3
    feat: 3-D tensor B x N x C
  
  Returns:
    feat_upsample: 3-D tensor B x N2 x C
  """
    with tf.variable_scope(scope) as sc:
        dist, idx_de = three_nn(xyz_upsample, xyz)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        feat_upsample = three_interpolate(feat, idx_de, weight)

        return feat_upsample
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])

    # Calculate the relative coordinates generated in the first set abstraction layer
    # These relative coordinates will be concatenated in the last feature propagation layer
    dist, idx = three_nn(l0_xyz, l1_xyz) 
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
    norm = tf.tile(norm,[1,1,3])
    weight = (1.0/dist) / norm
    l0_virtual_centers = three_interpolate(l1_xyz, idx, weight)
    l0_xyz_rel = l0_xyz - l0_virtual_centers

    #l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz_rel, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
예제 #24
0
 def call(self, x):
     '''
     Input:
         1: unknown
         2: known                                                                                                      
         xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
         xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
         points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
         points2: (batch_size, ndataset2, nchannel2) TF tensor
         mlp: list of int32 -- output size for MLP on each point
     '''
     xyz1, xyz2, points1, points2 = x
     dist, idx = three_nn(xyz1, xyz2)
     dist = tf.maximum(dist, 1e-10)
     norm = tf.reduce_sum(1.0/dist, axis=2, keep_dims=True)
     norm = tf.tile(norm, [1,1,3])
     weight = (1.0/dist)/norm
     interpolated_points = three_interpolate(points2, idx, weight)
     # suppose points1 is not None
     new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B, n1, c1+c2
     new_points1 = tf.expand_dims(new_points1, 2) # B, n1, 1, c1+c2
     return new_points1
예제 #25
0
def fun(xyz1, xyz2, pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2, 0))
        xyz1 = tf.constant(np.expand_dims(xyz1, 0))
        xyz2 = tf.constant(np.expand_dims(xyz2, 0))
        # xyz1每个点最近的三个点的距离,索引
        dist, idx = three_nn(xyz1, xyz2)
        # weight = tf.ones_like(dist)/3.0
        # print(weight)
        # 保证距离为正
        dist = tf.maximum(dist, 1e-10)
        # 到最近三个点距离和
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        # 到每个点的权重
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp, pts1, d, w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1
예제 #26
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training,
                       bn_decay, scope):
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)

        kernel = [None] * len(mlp)
        bias = [None] * len(mlp)
        for i, num_out_channel in enumerate(mlp):
            kernel[i] = tf.get_variable(
                'kernel' + str(i),
                [1, 1,
                 new_points1.get_shape()[-1].value, num_out_channel],
                initializer=tf.contrib.layers.xavier_initializer(),
                dtype=tf.float32)
            bias[i] = tf.get_variable('bias' + str(i), [num_out_channel],
                                      initializer=tf.constant_initializer(0.0),
                                      dtype=tf.float32)
            new_points1 = tf.nn.conv2d(new_points1,
                                       kernel[i], [1, 1, 1, 1],
                                       padding='VALID')
            new_points1 = tf.nn.bias_add(new_points1, bias[i])
            new_points1 = tf.nn.relu(new_points1)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]s
    return new_points1
예제 #27
0
def pointconv_decoding_depthwise(xyz1,
                                 xyz2,
                                 points1,
                                 points2,
                                 radius,
                                 sigma,
                                 K,
                                 mlp,
                                 is_training,
                                 bn_decay,
                                 weight_decay,
                                 scope,
                                 bn=True,
                                 use_xyz=True,
                                 is_dist=False):
    """ Input:
            depthwise version of pointconv
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    """
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        # setup for deConv
        grouped_xyz, grouped_feature, idx = pointconv_grouping(
            interpolated_points, K, xyz1, xyz1, use_xyz=use_xyz)

        weight = weight_net(grouped_xyz,
                            [32, grouped_feature.get_shape()[3].value],
                            scope='decode_weight_net',
                            is_training=is_training,
                            bn_decay=bn_decay,
                            weight_decay=weight_decay,
                            is_dist=is_dist)

        new_points = tf.multiply(grouped_feature, weight)

        new_points = tf_util.reduce_sum2d_conv(new_points,
                                               axis=2,
                                               scope='fp_sumpool',
                                               bn=True,
                                               is_dist=is_dist,
                                               bn_decay=bn_decay,
                                               is_training=is_training,
                                               keepdims=False)

        if points1 is not None:
            # [B x ndataset1 x nchannel1+nchannel2]
            new_points1 = tf.concat(axis=-1, values=[new_points, points1])
        else:
            new_points1 = new_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         is_dist=is_dist,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # [B x ndataset1 x mlp[-1]]
        return new_points1
import numpy as np
import tensorflow as tf
import time
from tf_interpolate import three_nn, three_interpolate

if __name__ == "__main__":
    np.random.seed(100)

    target_points = np.random.random((64, 8192, 3)).astype("float32")
    reference_points = np.random.random((64, 1024, 3)).astype("float32")

    with tf.device("/cpu:0"):
        xyz1 = tf.constant(target_points)
        xyz2 = tf.constant(reference_points)
        dist, idx = three_nn(xyz1, xyz2)

    with tf.Session("") as sess:
        # Warm up
        dist, idx = sess.run(three_nn(xyz1, xyz2))

        # Run
        s = time.time()
        dist, idx = sess.run(three_nn(xyz1, xyz2))
        print("Time: {}".format(time.time() - s))
        print(idx.shape, idx.dtype)
        print(dist.shape, dist.dtype)
        print(dist[:3, :3, :1].flatten())
        print(idx[:3, :3, :1].flatten())

        # Expected output
        # (64, 8192, 3) int32
예제 #29
0
def triplenet_fp_module(xyz1,
                        xyz2,
                        points1,
                        points2,
                        mlp1,
                        mlp2,
                        is_training,
                        bn_decay,
                        scope,
                        bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    batch_size = xyz1.get_shape()[0].value
    ndataset1 = xyz1.get_shape()[1].value
    with tf.variable_scope(scope) as sc:
        # retrieve three nearest neighbors
        _, knn_idx = three_nn(xyz1, xyz2)  #shape=(B,ndataset1,3)
        B_idx = np.arange(batch_size)
        B_idx = np.reshape(B_idx, (batch_size, 1))
        B_idx = np.tile(B_idx, 3 * ndataset1)
        B_idx = np.reshape(B_idx, (batch_size, ndataset1, 3))
        knn_idx = tf.stack((B_idx, knn_idx), -1)
        knn_points = tf.gather_nd(points2,
                                  knn_idx)  #shape=(B,ndataset1,3,nchannel2)

        # Generate all possible permutations of the 3 nearest neighbors
        knn_points_tr = tf.transpose(
            knn_points,
            [2, 0, 1, 3])  #bringing the neighbors to the first axis
        point_idxs = np.array(
            [0, 1, 2, 0, 2, 1, 1, 0, 2, 1, 2, 0, 2, 0, 1, 2, 1, 0])
        knn_points_tr = tf.gather(knn_points_tr, point_idxs)
        knn_points = tf.transpose(knn_points_tr, [1, 2, 0, 3])

        new_points1 = tf_util.conv2d(knn_points,
                                     mlp1[0], [1, 3],
                                     padding='VALID',
                                     stride=[1, 3],
                                     bn=bn,
                                     is_training=is_training,
                                     scope='interpol_%d' % (0),
                                     bn_decay=bn_decay)

        mlp1.pop(0)
        for i, num_out_channel in enumerate(mlp1, start=1):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='interpol_%d' % (i),
                                         bn_decay=bn_decay)

        new_points1 = tf.reduce_max(new_points1, axis=[2], name='maxpool')

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[new_points1,
                                points1])  # B,ndataset1,nchannel1+nchannel2

        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp2):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay)

        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
def deconv_new(pts,
               fts,
               qrs,
               tag,
               N,
               K,
               radius,
               P,
               C,
               C_pts_fts,
               is_training,
               with_X_transformation,
               depth_multiplier,
               D=1,
               sorting_method=None,
               with_global=False,
               knn=False):
    """
    pts: points of previous layer, e.g.,(B, N/2, 3)
    fts: point features of previous layer, e.g.,(B, N/2, C)
    qrs: selected representative points of this layer, e.g.,(B, N, 3)
    N: batch_size,
    K: neighbor_size,
    D: dilation parameter,
    P: the number of selected representative points,
    C: output feature number per point,
    C_pts_fts: feature number for each local point
    radius: float32, the radius of query ball search
    knn: True: knn; False: query ball
    """
    dist, idx = three_nn(qrs, pts)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm
    interpolated_fts = three_interpolate(fts, idx, weight)  # (B, N, C)

    if knn:
        _, indices_dilated = pf.knn_indices_general(qrs, qrs, K * D, True)
        indices = indices_dilated[:, :, ::D, :]
        nn_pts = tf.gather_nd(qrs, indices,
                              name=tag + 'nn_pts')  # (B, N, K, 3)
        nn_fts_from_prev = tf.gather_nd(interpolated_fts,
                                        indices,
                                        name=tag + 'nn_fts')
    else:
        indices, pts_cnt = query_ball_point(radius, K, qrs, qrs)
        nn_pts = group_point(qrs, indices)
        nn_fts_from_prev = group_point(interpolated_fts, indices)

    nn_pts_center = tf.expand_dims(qrs, axis=2,
                                   name=tag + 'nn_pts_center')  # (B, N, 1, 3)
    nn_pts_local = tf.subtract(nn_pts,
                               nn_pts_center,
                               name=tag + 'nn_pts_local')  # (B, N, K, 3)

    # Prepare features to be transformed
    nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts,
                                 tag + 'nn_fts_from_pts_0', is_training)
    nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts,
                               tag + 'nn_fts_from_pts', is_training)

    nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev],
                             axis=-1,
                             name=tag + 'nn_fts_input')

    if with_X_transformation:
        ######################## X-transformation #########################
        X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training,
                        (1, K))  # following paper
        X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
        X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training,
                                  (1, K))  # following paper
        X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
        X_2 = pf.depthwise_conv2d(X_1_KK,
                                  K,
                                  tag + 'X_2',
                                  is_training, (1, K),
                                  activation=None)  # following paper
        X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
        fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
        ###################################################################
    else:
        fts_X = nn_fts_input

    fts_conv = pf.separable_conv2d(fts_X,
                                   C,
                                   tag + 'fts_conv',
                                   is_training, (1, K),
                                   depth_multiplier=depth_multiplier)
    fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')

    if with_global:
        fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
        fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global_',
                              is_training)
        return tf.concat([fts_global, fts_conv_3d],
                         axis=-1,
                         name=tag + 'fts_conv_3d_with_global')
    else:
        return fts_conv_3d
예제 #31
0
def feature_decoding_layer(xyz1,
                           xyz2,
                           points1,
                           points2,
                           radius,
                           sigma,
                           K,
                           mlp,
                           is_training,
                           bn_decay,
                           weight_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           boundary_label=None):
    ''' Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        if boundary_label is None:
            pass
        else:
            tmp_boundary_label = tf.tile(tf.expand_dims(boundary_label, [-1]),
                                         [1, 1, interpolated_points.shape[2]])
            interpolated_points = interpolated_points * tmp_boundary_label
        grouped_xyz, grouped_feature, idx, grouped_boundary_label = pointconv_util.grouping(
            interpolated_points,
            K,
            xyz1,
            xyz1,
            use_xyz=use_xyz,
            boundary_label=boundary_label)

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='decode_weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        new_points = tf.transpose(grouped_feature, [0, 1, 3, 2])
        new_points = tf.matmul(new_points, weight)
        new_points = tf_util.conv2d(new_points,
                                    mlp[0],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='decode_after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        if points1 is not None:
            new_points1 = tf.concat(axis=-1,
                                    values=[
                                        new_points,
                                        tf.expand_dims(points1, axis=2)
                                    ])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points

        for i, num_out_channel in enumerate(mlp):
            if i != 0:
                new_points1 = tf_util.conv2d(new_points1,
                                             num_out_channel, [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=bn,
                                             is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay,
                                             weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #32
0
def feature_decoding_layer_depthwise(xyz1,
                                     xyz2,
                                     points1,
                                     points2,
                                     radius,
                                     sigma,
                                     K,
                                     mlp,
                                     is_training,
                                     bn_decay,
                                     weight_decay,
                                     scope,
                                     bn=True,
                                     use_xyz=True):
    ''' Input:                                      
            depthwise version of pointconv                                                                
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        grouped_xyz, grouped_feature, idx = pointconv_util.grouping(
            interpolated_points, K, xyz1, xyz1, use_xyz=use_xyz)

        density = pointconv_util.kernel_density_estimation_ball(
            xyz1, radius, sigma)
        inverse_density = tf.div(1.0, density)
        grouped_density = tf.gather_nd(inverse_density,
                                       idx)  # (batch_size, npoint, nsample, 1)
        #grouped_density = tf_grouping.group_point(inverse_density, idx)
        inverse_max_density = tf.reduce_max(grouped_density,
                                            axis=2,
                                            keep_dims=True)
        density_scale = tf.div(grouped_density, inverse_max_density)

        #density_scale = tf_grouping.group_point(density, idx)

        weight = weight_net(grouped_xyz,
                            [32, grouped_feature.get_shape()[3].value],
                            scope='decode_weight_net',
                            is_training=is_training,
                            bn_decay=bn_decay,
                            weight_decay=weight_decay)

        density_scale = nonlinear_transform(density_scale, [16, 1],
                                            scope='decode_density_net',
                                            is_training=is_training,
                                            bn_decay=bn_decay,
                                            weight_decay=weight_decay)

        new_points = tf.multiply(grouped_feature, density_scale)

        new_points = tf.multiply(grouped_feature, weight)

        new_points = tf_util.reduce_sum2d_conv(new_points,
                                               axis=2,
                                               scope='fp_sumpool',
                                               bn=True,
                                               bn_decay=bn_decay,
                                               is_training=is_training,
                                               keep_dims=False)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=-1, values=[new_points,
                                 points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1