예제 #1
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
예제 #2
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       is_dist=False):
    ''' PointNet Feature Propogation (FP) Module
        Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         is_dist=is_dist)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #3
0
def fun(xyz1, xyz2, pts2):
    with tf.device("/cpu:0"):
        points = tf.constant(np.expand_dims(pts2, 0))
        xyz1 = tf.constant(np.expand_dims(xyz1, 0))
        xyz2 = tf.constant(np.expand_dims(xyz2, 0))
        dist, idx = three_nn(xyz1, xyz2)
        # weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        print(norm)
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session("") as sess:
        tmp, pts1, d, w = sess.run([xyz1, interpolated_points, dist, weight])
        pts1 = pts1.squeeze()
    return pts1
예제 #4
0
 def test(self):
     np.random.seed(100)
     pts = np.random.random((32, 128, 64)).astype("float32")
     tmp1 = np.random.random((32, 512, 3)).astype("float32")
     tmp2 = np.random.random((32, 128, 3)).astype("float32")
     with tf.device("/cpu:0"):
         points = tf.constant(pts)
         xyz1 = tf.constant(tmp1)
         xyz2 = tf.constant(tmp2)
         dist, idx = three_nn(xyz1, xyz2)
         weight = tf.ones_like(dist) / 3.0
         interpolated_points = three_interpolate(points, idx, weight)
     with tf.compat.v1.Session("") as sess:
         now = time.time()
         for _ in range(100):
             ret = sess.run(interpolated_points)
         print(time.time() - now)
         print(ret.shape, ret.dtype)
    def test_grad(self):
        with self.test_session():
            points = tf.constant(
                np.random.random((1, 8, 16)).astype('float32'))

            xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
            xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
            dist, idx = three_nn(xyz1, xyz2)
            weight = tf.ones_like(dist) / 3.0
            interpolated_points = three_interpolate(points, idx, weight)
            print(interpolated_points)
            print('------------')
            err = tf.test.compute_gradient_error(points, (1, 8, 16),
                                                 interpolated_points,
                                                 (1, 128, 16))
            print('------------')
            print(err)
            self.assertLess(err, 1e-4)
예제 #6
0
def fun(xyz1,xyz2,pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2,0))
        xyz1 = tf.constant(np.expand_dims(xyz1,0))
        xyz2 = tf.constant(np.expand_dims(xyz2,0))
        dist, idx = three_nn(xyz1, xyz2)
        #weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm, [1,1,3])
        print(norm)
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1
예제 #7
0
 def test_grad(self):
     with self.test_session():
         features = tf.constant(
             np.random.random(
                 (1, 8, 16)).astype('float32'))  # features, (1,8,16)
         print(features)
         xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
         xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
         dist, idx = three_nn(xyz1, xyz2)  # (1,128,3), (1,128,3)
         weight = tf.ones_like(dist) / 3.0  # (1,128,3)
         interpolated_features = three_interpolate(features, idx,
                                                   weight)  # (1,128,16)
         print(interpolated_features)
         err = tf.test.compute_gradient_error(features, (1, 8, 16),
                                              interpolated_features,
                                              (1, 128, 16))
         print(err)
         self.assertLess(err, 1e-4)
예제 #8
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       last_mlp_activation=True,
                       scope='fp'):
    ''' PointNet Feature Propogation (FP) Module
        Input:
            xyz1:       (batch_size, ndataset1, 3) TF tensor
            xyz2:       (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1:    (batch_size, ndataset1, nchannel1) TF tensor
            points2:    (batch_size, ndataset2, nchannel2) TF tensor
            mlp:        list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            if i == len(mlp) - 1 and not (last_mlp_activation):
                activation_fn = None
            else:
                activation_fn = tf.nn.relu
            new_points1 = conv2d(inputs=new_points1,
                                 filters=num_out_channel,
                                 name='mlp_%d' % (i + 1))

        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #9
0
def pointnet_upsample(xyz1, xyz2, points2, scope):
    """ PointNet Feature Propogation (FP) Module
            Input:
                xyz1: (batch_size, ndataset1, 3) TF tensor
                xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
                points2: (batch_size, ndataset2, nchannel2) TF tensor
            Return:
                new_points: (batch_size, ndataset1, nchannel2) TF tensor
    """
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(
            points2, idx, weight)  # B x ndataset1 x nchannel2

        return interpolated_points
예제 #10
0
def pc_upsampling(xyz_upsample, xyz, feat, scope='upsampling'):
    """ Fully connected layer with non-linear operation.
  
  Args:
    xyz_upsample: 3-D tensor B x N2 x 3
    xyz: 3-D tensor B x N x 3
    feat: 3-D tensor B x N x C
  
  Returns:
    feat_upsample: 3-D tensor B x N2 x C
  """
    with tf.variable_scope(scope) as sc:
        dist, idx_de = three_nn(xyz_upsample, xyz)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        feat_upsample = three_interpolate(feat, idx_de, weight)

        return feat_upsample
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])

    # Calculate the relative coordinates generated in the first set abstraction layer
    # These relative coordinates will be concatenated in the last feature propagation layer
    dist, idx = three_nn(l0_xyz, l1_xyz) 
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
    norm = tf.tile(norm,[1,1,3])
    weight = (1.0/dist) / norm
    l0_virtual_centers = three_interpolate(l1_xyz, idx, weight)
    l0_xyz_rel = l0_xyz - l0_virtual_centers

    #l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz_rel, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
예제 #12
0
 def call(self, x):
     '''
     Input:
         1: unknown
         2: known                                                                                                      
         xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
         xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
         points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
         points2: (batch_size, ndataset2, nchannel2) TF tensor
         mlp: list of int32 -- output size for MLP on each point
     '''
     xyz1, xyz2, points1, points2 = x
     dist, idx = three_nn(xyz1, xyz2)
     dist = tf.maximum(dist, 1e-10)
     norm = tf.reduce_sum(1.0/dist, axis=2, keep_dims=True)
     norm = tf.tile(norm, [1,1,3])
     weight = (1.0/dist)/norm
     interpolated_points = three_interpolate(points2, idx, weight)
     # suppose points1 is not None
     new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B, n1, c1+c2
     new_points1 = tf.expand_dims(new_points1, 2) # B, n1, 1, c1+c2
     return new_points1
예제 #13
0
def fun(xyz1, xyz2, pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2, 0))
        xyz1 = tf.constant(np.expand_dims(xyz1, 0))
        xyz2 = tf.constant(np.expand_dims(xyz2, 0))
        # xyz1每个点最近的三个点的距离,索引
        dist, idx = three_nn(xyz1, xyz2)
        # weight = tf.ones_like(dist)/3.0
        # print(weight)
        # 保证距离为正
        dist = tf.maximum(dist, 1e-10)
        # 到最近三个点距离和
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        # 到每个点的权重
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp, pts1, d, w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1
예제 #14
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training,
                       bn_decay, scope):
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)

        kernel = [None] * len(mlp)
        bias = [None] * len(mlp)
        for i, num_out_channel in enumerate(mlp):
            kernel[i] = tf.get_variable(
                'kernel' + str(i),
                [1, 1,
                 new_points1.get_shape()[-1].value, num_out_channel],
                initializer=tf.contrib.layers.xavier_initializer(),
                dtype=tf.float32)
            bias[i] = tf.get_variable('bias' + str(i), [num_out_channel],
                                      initializer=tf.constant_initializer(0.0),
                                      dtype=tf.float32)
            new_points1 = tf.nn.conv2d(new_points1,
                                       kernel[i], [1, 1, 1, 1],
                                       padding='VALID')
            new_points1 = tf.nn.bias_add(new_points1, bias[i])
            new_points1 = tf.nn.relu(new_points1)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]s
    return new_points1
예제 #15
0
def pointconv_decoding_depthwise(xyz1,
                                 xyz2,
                                 points1,
                                 points2,
                                 radius,
                                 sigma,
                                 K,
                                 mlp,
                                 is_training,
                                 bn_decay,
                                 weight_decay,
                                 scope,
                                 bn=True,
                                 use_xyz=True,
                                 is_dist=False):
    """ Input:
            depthwise version of pointconv
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    """
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        # setup for deConv
        grouped_xyz, grouped_feature, idx = pointconv_grouping(
            interpolated_points, K, xyz1, xyz1, use_xyz=use_xyz)

        weight = weight_net(grouped_xyz,
                            [32, grouped_feature.get_shape()[3].value],
                            scope='decode_weight_net',
                            is_training=is_training,
                            bn_decay=bn_decay,
                            weight_decay=weight_decay,
                            is_dist=is_dist)

        new_points = tf.multiply(grouped_feature, weight)

        new_points = tf_util.reduce_sum2d_conv(new_points,
                                               axis=2,
                                               scope='fp_sumpool',
                                               bn=True,
                                               is_dist=is_dist,
                                               bn_decay=bn_decay,
                                               is_training=is_training,
                                               keepdims=False)

        if points1 is not None:
            # [B x ndataset1 x nchannel1+nchannel2]
            new_points1 = tf.concat(axis=-1, values=[new_points, points1])
        else:
            new_points1 = new_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         is_dist=is_dist,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # [B x ndataset1 x mlp[-1]]
        return new_points1
예제 #16
0
def feature_decoding_layer(xyz1,
                           xyz2,
                           points1,
                           points2,
                           radius,
                           sigma,
                           K,
                           mlp,
                           is_training,
                           bn_decay,
                           weight_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           boundary_label=None):
    ''' Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        if boundary_label is None:
            pass
        else:
            tmp_boundary_label = tf.tile(tf.expand_dims(boundary_label, [-1]),
                                         [1, 1, interpolated_points.shape[2]])
            interpolated_points = interpolated_points * tmp_boundary_label
        grouped_xyz, grouped_feature, idx, grouped_boundary_label = pointconv_util.grouping(
            interpolated_points,
            K,
            xyz1,
            xyz1,
            use_xyz=use_xyz,
            boundary_label=boundary_label)

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='decode_weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        new_points = tf.transpose(grouped_feature, [0, 1, 3, 2])
        new_points = tf.matmul(new_points, weight)
        new_points = tf_util.conv2d(new_points,
                                    mlp[0],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='decode_after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        if points1 is not None:
            new_points1 = tf.concat(axis=-1,
                                    values=[
                                        new_points,
                                        tf.expand_dims(points1, axis=2)
                                    ])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points

        for i, num_out_channel in enumerate(mlp):
            if i != 0:
                new_points1 = tf_util.conv2d(new_points1,
                                             num_out_channel, [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=bn,
                                             is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay,
                                             weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #17
0
def feature_decoding_layer_depthwise(xyz1,
                                     xyz2,
                                     points1,
                                     points2,
                                     radius,
                                     sigma,
                                     K,
                                     mlp,
                                     is_training,
                                     bn_decay,
                                     weight_decay,
                                     scope,
                                     bn=True,
                                     use_xyz=True):
    ''' Input:                                      
            depthwise version of pointconv                                                                
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        grouped_xyz, grouped_feature, idx = pointconv_util.grouping(
            interpolated_points, K, xyz1, xyz1, use_xyz=use_xyz)

        density = pointconv_util.kernel_density_estimation_ball(
            xyz1, radius, sigma)
        inverse_density = tf.div(1.0, density)
        grouped_density = tf.gather_nd(inverse_density,
                                       idx)  # (batch_size, npoint, nsample, 1)
        #grouped_density = tf_grouping.group_point(inverse_density, idx)
        inverse_max_density = tf.reduce_max(grouped_density,
                                            axis=2,
                                            keep_dims=True)
        density_scale = tf.div(grouped_density, inverse_max_density)

        #density_scale = tf_grouping.group_point(density, idx)

        weight = weight_net(grouped_xyz,
                            [32, grouped_feature.get_shape()[3].value],
                            scope='decode_weight_net',
                            is_training=is_training,
                            bn_decay=bn_decay,
                            weight_decay=weight_decay)

        density_scale = nonlinear_transform(density_scale, [16, 1],
                                            scope='decode_density_net',
                                            is_training=is_training,
                                            bn_decay=bn_decay,
                                            weight_decay=weight_decay)

        new_points = tf.multiply(grouped_feature, density_scale)

        new_points = tf.multiply(grouped_feature, weight)

        new_points = tf_util.reduce_sum2d_conv(new_points,
                                               axis=2,
                                               scope='fp_sumpool',
                                               bn=True,
                                               bn_decay=bn_decay,
                                               is_training=is_training,
                                               keep_dims=False)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=-1, values=[new_points,
                                 points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
예제 #18
0
def get_model(point_input, labels_pl, is_training, bn_decay=None, coarse_flag=0, 
	mmd_flag=0, pfs_flag=False, fully_concate=False):
	""" Seg, input is BxNx9, output BxNx13 """
	batch_size = point_input.get_shape()[0].value
	num_point1 = point_input.get_shape()[1].value
	num_point2 = int(np.floor(num_point1 / 4.0))
	num_point3 = int(np.floor(num_point2 / 4.0))
	# num_point4 = int(np.floor(num_point3 / 4.0))

	end_points = {}

	k = 10
	pk = 10

	de1_1 = 1
	de1_2 = 2
	de2_1 = 1
	de2_2 = 2
	de3_1 = 1
	de3_2 = 2

	r1_11 = 0
	r1_12 = 0.1
	r1_21 = 0
	r1_22 = 0.2

	r2_11 = 0
	r2_12 = 0.4
	r2_21 = 0
	r2_22 = 0.8

	r3_11 = 0
	r3_12 = 1.6
	r3_21 = 0
	r3_22 = 3.2

	p1_1 = 0
	p1_2 = 0.4

	p2_1 = 0
	p2_2 = 1.6

	# activation_fn = tf.math.softplus 
	activation_fn = tf.nn.relu

##################################################################################################
	# Hierarchy 1

	point_cloud1 = tf.slice(point_input, [0, 0, 0], [-1, -1, 3])

	hie_matrix1 = tf.math.maximum(tf.sqrt(tf_util.pairwise_distance(point_cloud1)), 1e-20)
	# dist_matrix1_1 = hie_matrix1
	# dist_matrix1_2 = hie_matrix1
	dist_matrix1_1 = None
	dist_matrix1_2 = None


	adj_matrix = tf_util.pairwise_distance(point_cloud1)
	net1_1 = pan_util.point_atrous_conv(point_input, adj_matrix, dist_matrix1_1, k, de1_1, r1_11, r1_12, 64, 
		scope='page1_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


	adj_matrix = tf_util.pairwise_distance(net1_1)
	net1_2 = pan_util.point_atrous_conv(net1_1, adj_matrix, dist_matrix1_2, k, de1_2, r1_21, r1_22, 64,
		scope='page1_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)
	

	net = tf.squeeze(net1_2)


##################################################################################################
	# Hierarchy 2

	dist_threshold1 = False
	net, p1_idx, _, point_cloud2 = pan_util.edge_preserve_sampling(net, point_cloud1, num_point2, hie_matrix1, 
		dist_threshold1, pk, 1, p1_1, p1_2, pfs_flag, atrous_flag=False)
		
	# point_cloud2 = gather_point(point_cloud1, p1_idx)
	hie_matrix2 = tf.math.maximum(tf.sqrt(tf_util.pairwise_distance(point_cloud2)), 1e-20)
	# dist_matrix2_1 = hie_matrix2
	# dist_matrix2_2 = hie_matrix2
	dist_matrix2_1 = None
	dist_matrix2_2 = None


	adj_matrix = tf_util.pairwise_distance(net)
	net2_1 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix2_1, k, de2_1, r2_11, r2_12, 128,
		scope='page2_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


	adj_matrix = tf_util.pairwise_distance(net2_1)
	net2_2 = pan_util.point_atrous_conv(net2_1, adj_matrix, dist_matrix2_2, k, de2_2, r2_21, r2_22, 128,
		scope='page2_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


	net = tf.squeeze(net2_2)


##################################################################################################
	# Hierarchy 3
	
	dist_threshold2 = False
	net, p2_idx, _, point_cloud3 = pan_util.edge_preserve_sampling(net, point_cloud2, num_point3, hie_matrix2, 
		dist_threshold2, pk, 1, p2_1, p2_2, pfs_flag, atrous_flag=False)
		
	# point_cloud3 = gather_point(point_cloud2, p2_idx)
	hie_matrix3 = tf.math.maximum(tf.sqrt(tf_util.pairwise_distance(point_cloud3)), 1e-20)	
	# dist_matrix3_1 = hie_matrix3
	# dist_matrix3_2 = hie_matrix3
	dist_matrix3_1 = None
	dist_matrix3_2 = None


	adj_matrix = tf_util.pairwise_distance(net)
	net3_1 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix3_1, k, de3_1, r3_11, r3_12, 256,
		scope='page3_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


	adj_matrix = tf_util.pairwise_distance(net3_1)
	net3_2 = pan_util.point_atrous_conv(net3_1, adj_matrix, dist_matrix3_2, k, de3_2, r3_21, r3_22, 256,
		scope='page3_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


##################################################################################################	
	# Embeded Features

	net = tf_util.conv2d(net3_2, 1024, [1,1],
					   padding='VALID', stride=[1,1], activation_fn=activation_fn,
					   bn=True, is_training=is_training,
					   scope='encoder', bn_decay=bn_decay)

	net = tf.reduce_max(net, axis=1, keepdims=True)
	net = tf.reshape(net, [batch_size, -1])
	net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, activation_fn=None,
								scope='rg1', bn_decay=bn_decay)
	
	if mmd_flag > 0:
		end_points['embedding'] = net
	else:
		end_points['embedding'] = None

	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='rgdp1')
	
	global_feature_size = 1024
	net = tf_util.fully_connected(net, global_feature_size, bn=True, is_training=is_training, activation_fn=activation_fn,
								scope='rg2', bn_decay=bn_decay)
	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='rgdp2')
	net = tf.reshape(net, [batch_size, 1, 1, global_feature_size])

	net = tf.tile(net, [1, num_point3, 1, 1])

	net = tf.concat([net, net3_2], axis=-1)

	net = tf_util.conv2d(net, 512, [1,1],
					   padding='VALID', stride=[1,1], activation_fn=activation_fn,
					   bn=True, is_training=is_training,
					   scope='decoder', bn_decay=bn_decay)

	if coarse_flag > 0:
		coarse_net = tf.squeeze(net)
		coarse_net = tf_util.conv1d(coarse_net, 128, 1, padding='VALID', bn=True, activation_fn=activation_fn,
			is_training=is_training, scope='coarse_fc1', bn_decay=bn_decay)
		coarse_net = tf_util.dropout(coarse_net, keep_prob=0.5, is_training=is_training, scope='cdp1')
		coarse_net = tf_util.conv1d(coarse_net, 50, 1, padding='VALID', activation_fn=None, scope='coarse_fc2')

		coarse_labels_pl = tf_util.gather_labels(labels_pl, p1_idx)
		coarse_labels_pl = tf_util.gather_labels(coarse_labels_pl, p2_idx)

		end_points['coarse_pred'] = coarse_net
		end_points['coarse_label'] = coarse_labels_pl

	else:
		end_points['coarse_pred'] = None
		end_points['coarse_label'] = None


##################################################################################################
	# Hierarchy 3

	adj_matrix = tf_util.pairwise_distance(net)
	net3_2 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix3_2, k, de3_2, r3_21, r3_22, 256,
		scope='pagd3_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)

	net = tf.concat([net3_2, net3_1], axis=-1)
	if fully_concate:
		net3_2 = tf.squeeze(net3_2)

	adj_matrix = tf_util.pairwise_distance(net)
	net3_1 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix3_1, k, de3_1, r3_11, r3_12, 256,
		scope='pagd3_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)

	net3_1 = tf.squeeze(net3_1)


##################################################################################################
	# Hierarchy 2

	idx, weight = pan_util.three_nn_upsampling(point_cloud2, point_cloud3)
	net3_1 = three_interpolate(net3_1, idx, weight)
	if fully_concate:
		net3_2 = three_interpolate(net3_2, idx, weight)	

	net = tf.concat([tf.expand_dims(net3_1, 2), net2_2], axis=-1)
	
	adj_matrix = tf_util.pairwise_distance(net)
	net2_2 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix2_2, k, de2_2, r2_21, r2_22, 128,
		scope='pagd2_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)

	net = tf.concat([net2_2, net2_1], axis=-1)
	if fully_concate:
		net2_2 = tf.squeeze(net2_2)

	adj_matrix = tf_util.pairwise_distance(net)
	net2_1 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix2_1, k, de2_1, r2_11, r2_12, 128,
		scope='pagd2_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)

	net2_1 = tf.squeeze(net2_1)


##################################################################################################
	# Hierarchy 1
	
	idx, weight = pan_util.three_nn_upsampling(point_cloud1, point_cloud2)
	net2_1 = three_interpolate(net2_1, idx, weight)
	net3_1 = three_interpolate(net3_1, idx, weight)
	if fully_concate:
		net2_2 = three_interpolate(net2_2, idx, weight)
		net3_2 = three_interpolate(net3_2, idx, weight)

	net = tf.concat([tf.expand_dims(net2_1, 2), net1_2], axis=-1)

	adj_matrix = tf_util.pairwise_distance(net)
	net1_2 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix1_2, k, de1_2, r1_21, r1_22, 64,
		scope='pagd1_2', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)

	
	net = tf.concat([net1_2, net1_1], axis=-1)

	adj_matrix = tf_util.pairwise_distance(net)
	net1_1 = pan_util.point_atrous_conv(net, adj_matrix, dist_matrix1_1, k, de1_1, r1_11, r1_12, 64,
		scope='pagd1_1', bn=True, bn_decay=bn_decay, is_training=is_training, activation_fn=activation_fn)


##################################################################################################
	# Final Prediction

	if fully_concate:
		net = tf.concat([net1_1, net1_2, tf.expand_dims(net2_1, 2), tf.expand_dims(net2_2, 2), tf.expand_dims(net3_1, 2), tf.expand_dims(net3_2, 2)], axis=-1)
	else:
		net = tf.concat([net1_1, tf.expand_dims(net2_1, 2), tf.expand_dims(net3_1, 2)], axis=-1)

	net = tf.squeeze(net)

	net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=True, activation_fn=activation_fn,
		is_training=is_training, scope='fc1', bn_decay=bn_decay)
	
	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
	net = tf_util.conv1d(net, 13, 1, padding='VALID', activation_fn=None, scope='fc2')
	
	return net, end_points
def deconv_new(pts,
               fts,
               qrs,
               tag,
               N,
               K,
               radius,
               P,
               C,
               C_pts_fts,
               is_training,
               with_X_transformation,
               depth_multiplier,
               D=1,
               sorting_method=None,
               with_global=False,
               knn=False):
    """
    pts: points of previous layer, e.g.,(B, N/2, 3)
    fts: point features of previous layer, e.g.,(B, N/2, C)
    qrs: selected representative points of this layer, e.g.,(B, N, 3)
    N: batch_size,
    K: neighbor_size,
    D: dilation parameter,
    P: the number of selected representative points,
    C: output feature number per point,
    C_pts_fts: feature number for each local point
    radius: float32, the radius of query ball search
    knn: True: knn; False: query ball
    """
    dist, idx = three_nn(qrs, pts)
    dist = tf.maximum(dist, 1e-10)
    norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
    norm = tf.tile(norm, [1, 1, 3])
    weight = (1.0 / dist) / norm
    interpolated_fts = three_interpolate(fts, idx, weight)  # (B, N, C)

    if knn:
        _, indices_dilated = pf.knn_indices_general(qrs, qrs, K * D, True)
        indices = indices_dilated[:, :, ::D, :]
        nn_pts = tf.gather_nd(qrs, indices,
                              name=tag + 'nn_pts')  # (B, N, K, 3)
        nn_fts_from_prev = tf.gather_nd(interpolated_fts,
                                        indices,
                                        name=tag + 'nn_fts')
    else:
        indices, pts_cnt = query_ball_point(radius, K, qrs, qrs)
        nn_pts = group_point(qrs, indices)
        nn_fts_from_prev = group_point(interpolated_fts, indices)

    nn_pts_center = tf.expand_dims(qrs, axis=2,
                                   name=tag + 'nn_pts_center')  # (B, N, 1, 3)
    nn_pts_local = tf.subtract(nn_pts,
                               nn_pts_center,
                               name=tag + 'nn_pts_local')  # (B, N, K, 3)

    # Prepare features to be transformed
    nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts,
                                 tag + 'nn_fts_from_pts_0', is_training)
    nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts,
                               tag + 'nn_fts_from_pts', is_training)

    nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev],
                             axis=-1,
                             name=tag + 'nn_fts_input')

    if with_X_transformation:
        ######################## X-transformation #########################
        X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training,
                        (1, K))  # following paper
        X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
        X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training,
                                  (1, K))  # following paper
        X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
        X_2 = pf.depthwise_conv2d(X_1_KK,
                                  K,
                                  tag + 'X_2',
                                  is_training, (1, K),
                                  activation=None)  # following paper
        X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
        fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
        ###################################################################
    else:
        fts_X = nn_fts_input

    fts_conv = pf.separable_conv2d(fts_X,
                                   C,
                                   tag + 'fts_conv',
                                   is_training, (1, K),
                                   depth_multiplier=depth_multiplier)
    fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')

    if with_global:
        fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
        fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global_',
                              is_training)
        return tf.concat([fts_global, fts_conv_3d],
                         axis=-1,
                         name=tag + 'fts_conv_3d_with_global')
    else:
        return fts_conv_3d
예제 #20
0
def PointASNLDecodingLayer(xyz1,
                           xyz2,
                           points1,
                           points2,
                           nsample,
                           mlp,
                           is_training,
                           bn_decay,
                           weight_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_knn=True,
                           radius=None,
                           dilate_rate=1,
                           mode='concat',
                           NL=False):
    ''' Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        batch_size, num_points, num_channel = points2.get_shape()
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        '''Point NonLocal Cell'''
        if NL:
            new_nonlocal_point = PointNonLocalCell(
                points1,
                tf.expand_dims(points2,
                               axis=1), [max(32, num_channel), num_channel],
                is_training,
                bn_decay,
                weight_decay,
                scope,
                bn,
                mode=mode)
            new_nonlocal_point = tf.squeeze(
                new_nonlocal_point, [1])  # (batch_size, npoints, mlp2[-1])
            points2 = tf.add(points2, new_nonlocal_point)

        interpolated_points = three_interpolate(points2, idx, weight)
        '''Point Local Cell'''
        grouped_xyz, grouped_feature, idx = grouping(interpolated_points,
                                                     nsample,
                                                     xyz1,
                                                     xyz1,
                                                     use_xyz=use_xyz,
                                                     use_knn=use_knn,
                                                     radius=radius)
        grouped_xyz -= tf.tile(tf.expand_dims(xyz1, 2),
                               [1, 1, nsample, 1])  # translation normalization

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='decode_weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        new_points = grouped_feature
        new_points = tf.transpose(new_points, [0, 1, 3, 2])

        new_points = tf.matmul(new_points, weight)

        new_points = tf_util.conv2d(new_points,
                                    mlp[0],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='decode_after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        if points1 is not None:
            new_points1 = tf.concat(axis=-1,
                                    values=[
                                        new_points,
                                        tf.expand_dims(points1, axis=2)
                                    ])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points

        for i, num_out_channel in enumerate(mlp):
            if i != 0:
                new_points1 = tf_util.conv2d(new_points1,
                                             num_out_channel, [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=bn,
                                             is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay,
                                             weight_decay=weight_decay)
        new_points = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]

        return new_points
예제 #21
0
def get_model(point_input, labels_pl, is_training, bn_decay=None, coarse_flag=0,
				mmd_flag=0, pfs_flag=False, fully_concate=False):
	batch_size = point_input.get_shape()[0].value
	num_point1 = point_input.get_shape()[1].value
	num_point2 = int(np.floor(num_point1 / 4.0))
	num_point3 = int(np.floor(num_point2 / 4.0))

	end_points = {}

	activation_fn = tf.nn.relu


	point_cloud1 = tf.slice(point_input, [0, 0, 0], [-1, -1, 3])



	k = 16
	nn_idx = pointSIFT_select_two(point_cloud1, 0.2)
	net1_1 = tf_util.attention_conv(point_cloud1, point_input, 64, nn_idx, k, scope='conv_1_1', bn=False,
									 bn_decay=bn_decay, is_training=is_training)
	net1_2 = tf_util.attention_conv(point_cloud1, net1_1, 64, nn_idx, k, scope='conv_1_2', bn=False, bn_decay=bn_decay,
									 is_training=is_training)

	k = 20
	net, p1_idx, pn_idx, point_cloud2 = tf_util.attention_pooling(net1_2, point_cloud1, num_point2, k, scope='12',
																   bn_decay=bn_decay, is_training=is_training)

	k = 8
	nn_idx = pointSIFT_select(point_cloud2, 0.4)
	net2_1 = tf_util.attention_conv(point_cloud2, net, 64, nn_idx, k, scope='conv_2_1', bn=False, bn_decay=bn_decay,
									 is_training=is_training)
	net2_2 = tf_util.attention_conv(point_cloud2, net2_1, 64, nn_idx, k, scope='conv_2_2', bn=False, bn_decay=bn_decay,
									 is_training=is_training)

	k = 20
	net, p2_idx, pn_idx, point_cloud3 = tf_util.attention_pooling(net2_2, point_cloud2, num_point3, k, scope='13',
																   bn_decay=bn_decay, is_training=is_training)

	k = 8
	nn_idx = pointSIFT_select(point_cloud3, 0.7)
	net3_1 = tf_util.attention_conv(point_cloud3, net, 128, nn_idx, k, scope='conv_3_1', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)
	net3_2 = tf_util.attention_conv(point_cloud3, net3_1, 128, nn_idx, k, scope='conv_3_2', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)
	net3_2 = tf.expand_dims(net3_2, 2)


	net = tf_util.conv2d(net3_2, 1024, [1, 1],
						 padding='VALID', stride=[1, 1], activation_fn=activation_fn,
						 bn=False, is_training=is_training,
						 scope='encoder', bn_decay=bn_decay)

	net = tf.reduce_max(net, axis=1, keepdims=True)
	net = tf.reshape(net, [batch_size, -1])
	net = tf_util.fully_connected(net, 512, bn=False, is_training=is_training, activation_fn=None,
								  scope='rg1', bn_decay=bn_decay)

	if mmd_flag > 0:
		end_points['embedding'] = net
	else:
		end_points['embedding'] = None

	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='rgdp1')

	global_feature_size = 1024
	net = tf_util.fully_connected(net, global_feature_size, bn=False, is_training=is_training,
								  activation_fn=activation_fn,
								  scope='rg2', bn_decay=bn_decay)
	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='rgdp2')
	net = tf.reshape(net, [batch_size, 1, 1, global_feature_size])

	net = tf.tile(net, [1, num_point3, 1, 1])

	net = tf.concat([net, net3_2], axis=-1)

	net = tf_util.conv2d(net, 512, [1, 1],
						 padding='VALID', stride=[1, 1], activation_fn=activation_fn,
						 bn=False, is_training=is_training,
						 scope='decoder', bn_decay=bn_decay)

	if coarse_flag > 0:
		coarse_net = tf.squeeze(net, [2])
		coarse_net = tf_util.conv1d(coarse_net, 128, 1, padding='VALID', bn=False, activation_fn=activation_fn,
									is_training=is_training, scope='coarse_fc1', bn_decay=bn_decay)
		coarse_net = tf_util.dropout(coarse_net, keep_prob=0.5, is_training=is_training, scope='cdp1')
		coarse_net = tf_util.conv1d(coarse_net, 50, 1, padding='VALID', activation_fn=None, scope='coarse_fc2')

		coarse_labels_pl = tf_util.gather_labels(labels_pl, p1_idx)
		coarse_labels_pl = tf_util.gather_labels(coarse_labels_pl, p2_idx)

		end_points['coarse_pred'] = coarse_net
		end_points['coarse_label'] = coarse_labels_pl

	else:
		end_points['coarse_pred'] = None
		end_points['coarse_label'] = None

	k = 8
	nn_idx = pointSIFT_select(point_cloud3, 0.7)
	net3_2 = tf_util.attention_conv(point_cloud3, tf.squeeze(net,[2]), 128, nn_idx, k, scope='conv_4_1', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)

	net = tf.concat([net3_2, net3_1], axis=-1)


	net3_1 = tf_util.attention_conv(point_cloud3, net, 128, nn_idx, k, scope='conv_4_2', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)



	idx, weight = tf_util.three_nn_upsampling(point_cloud2, point_cloud3)
	net3_1 = three_interpolate(net3_1, idx, weight)
	if fully_concate:
		net3_2 = three_interpolate(net3_2, idx, weight)

	net = tf.concat([net3_1, net2_2], axis=-1)
	k = 8
	nn_idx = pointSIFT_select(point_cloud2, 0.4)
	net2_2 = tf_util.attention_conv(point_cloud2, net, 64, nn_idx, k, scope='conv_5_1', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)

	net = tf.concat([net2_2, net2_1], axis=-1)


	net2_1 = tf_util.attention_conv(point_cloud2, net, 64, nn_idx, k, scope='conv_5_2', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)

	idx, weight = tf_util.three_nn_upsampling(point_cloud1, point_cloud2)
	net2_1 = three_interpolate(net2_1, idx, weight)
	net3_1 = three_interpolate(net3_1, idx, weight)
	if fully_concate:
		net2_2 = three_interpolate(net2_2, idx, weight)
		net3_2 = three_interpolate(net3_2, idx, weight)

	net = tf.concat([net2_1, net1_2], axis=-1)
	k = 16
	nn_idx = pointSIFT_select_two(point_cloud1, 0.2)
	net1_2 = tf_util.attention_conv(point_cloud1, net, 64, nn_idx, k, scope='conv_6_1', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)

	net = tf.concat([net1_2, net1_1], axis=-1)

	net1_1 = tf_util.attention_conv(point_cloud1, net, 64, nn_idx, k, scope='conv_6_2', bn=False,
									 bn_decay=bn_decay,
									 is_training=is_training)



	if fully_concate:
		net = tf.concat(
			[net1_1, net1_2, net2_1, net2_2, net3_1,
			 net3_2], axis=-1)
	else:
		net = tf.concat([net1_1, tf.expand_dims(net2_1, 2), tf.expand_dims(net3_1, 2)], axis=-1)

	net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=False, activation_fn=activation_fn,
						 is_training=is_training, scope='fc1', bn_decay=bn_decay)
	net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
	net = tf_util.conv1d(net, 13, 1, padding='VALID', activation_fn=None, scope='fc2')

	return net, end_points