Esempio n. 1
0
    def get_layer(self, functions_pl, with_bn, bn_decay, interpolation):

        convlution_operation = self.get_convlution_operator(
            functions_pl, interpolation)

        with tf.variable_scope(self.scope) as sc:
            biases = tf_util._variable_on_cpu('biases', [self.out_channels],
                                              tf.constant_initializer(0.0))
            outputs = tf.nn.bias_add(convlution_operation, biases)

            if (with_bn):
                outputs = tf_util.batch_norm_template(outputs,
                                                      self.is_training, 'bn',
                                                      [0, 1], bn_decay)
            return tf.nn.relu(outputs)
Esempio n. 2
0
def edge_unit_with_ec(point_cloud,
                      mask,
                      pooling,
                      neighbornum,
                      outchannel,
                      scope,
                      bn=False,
                      activation_fn=tf.nn.relu,
                      bn_decay=None,
                      is_training=None):
    """
    :param point_cloud: B N C*K
    :param mask: tensor
    :param pooling: String
    :return: Variable tensor
    """
    batch_size = point_cloud.get_shape()[0]
    point_num = point_cloud.get_shape()[1]

    coordinate_length = neighbornum  # adj points number * 4, will change
    #input_image = tf.expand_dims(point_cloud, -1) # B N 1 C*K
    mask = tf.expand_dims(mask, -1)
    ec = econ.create_ec(point_cloud, mask)
    ec_length = ec.get_shape()[3].value
    ec = tf.reshape(ec, [batch_size, point_num, -1])
    ec = tf.expand_dims(ec, axis=3)
    #ww = point_cloud.get_shape()[2].value / coordinate_length

    kernel_1 = tf_util._variable_with_weight_decay(
        name='weights_1',
        shape=[1, ec_length, 1, outchannel],
        use_xavier=True,
        stddev=0.001,
        wd=0.1)  # kernel_h, kernel_w, num_in_channels, output

    biases_1 = tf_util._variable_on_cpu('biases_1', [outchannel],
                                        tf.constant_initializer(0.0))

    outputs = tf.nn.conv2d(
        ec,
        kernel_1,
        [1, 1, ec_length, 1],  # [1, stride_h, stride_w, 1]
        padding='VALID')  # 4 -> 1
    outputs = tf.nn.bias_add(outputs, biases_1)
    if bn:
        outputs = tf_util.batch_norm_for_conv2d(outputs,
                                                is_training,
                                                bn_decay=bn_decay,
                                                scope='bn')
    if activation_fn is not None:
        outputs = activation_fn(outputs)

    outputs = outputs * mask
    #for i in range(100000):
    #    print(tf.shape(outputs))
    max_index = tf.squeeze(tf.argmax(tf.squeeze(outputs, -1)), -1)
    if pooling == 'max':
        outputs = tf.nn.max_pool(outputs,
                                 ksize=[1, 1, coordinate_length, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')
    elif pooling == 'avg':
        outputs = tf.nn.avg_pool(outputs,
                                 ksize=[1, 1, coordinate_length, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')
    return outputs, kernel_1
Esempio n. 3
0
def edge_unit_without_pooling(data,
                              mask,
                              pooling,
                              neighbornum,
                              outchannel,
                              scope,
                              bn=False,
                              activation_fn=tf.nn.relu,
                              bn_decay=None,
                              is_training=None):
    """
    :param point_cloud: B N C*K
    :param mask: tensor
    :param pooling: String
    :return: Variable tensor
    """
    batch_size = data.get_shape()[0]
    point_num = data.get_shape()[1]

    mask = tf.expand_dims(mask, -1)

    ww = data.get_shape()[2].value / neighbornum
    data = tf.reshape(data, [batch_size, point_num, -1])
    data = tf.expand_dims(data, -1)

    kernel_1 = tf_util._variable_with_weight_decay(
        name='weights_1',
        shape=[1, ww, 1, outchannel],
        use_xavier=True,
        stddev=0.001,
        wd=0.1)  # kernel_h, kernel_w, num_in_channels, output

    biases_1 = tf_util._variable_on_cpu('biases_1', [outchannel],
                                        tf.constant_initializer(0.0))

    outputs = tf.nn.conv2d(
        data,
        kernel_1,
        [1, 1, ww, 1],  # [1, stride_h, stride_w, 1]
        padding='VALID')  # 4 -> 1
    outputs = tf.nn.bias_add(outputs, biases_1)
    if bn:
        outputs = tf_util.batch_norm_for_conv2d(
            outputs, is_training, bn_decay=bn_decay,
            scope='bn')  # none values not supported
    if activation_fn is not None:
        outputs = activation_fn(outputs)

    outputs = outputs * mask
    #
    #
    # outputs = tf_util.conv2d(outputs, 32, [1, 1],
    #                      padding='VALID', stride=[1, 1],
    #                      bn=True, is_training=is_training,
    #                      scope='ec_conv2', bn_decay=bn_decay)
    # outputs = tf.reshape(outputs, [batch_size, -1])
    # outputs = tf_util.fully_connected(outputs, point_num*neighbornum*16, bn=True, is_training=is_training,
    #                               scope='tfc1', bn_decay=bn_decay)
    # outputs = tf_util.fully_connected(outputs, point_num*neighbornum*8, bn=True, is_training=is_training,
    #                               scope='tfc2', bn_decay=bn_decay)
    # outputs = tf_util.fully_connected(outputs, point_num*neighbornum*7, bn=True, is_training=is_training,
    #                                  scope='tfc3', bn_decay=bn_decay)
    tmp = tf.zeros([1, neighbornum * 7])
    for i in range(batch_size):
        #for j in range(batch_size):
        for j in range(point_num):
            edges = outputs[i, j, :]
            edges = tf.reshape(edges, [1, -1])
            with tf.variable_scope('ec_weights_%d_%d' % (i, j)) as sc:
                edges = tf_util.fully_connected(edges,
                                                neighbornum * 32,
                                                bn=True,
                                                is_training=is_training,
                                                scope='tfc1',
                                                bn_decay=bn_decay)

                rst = tf_util.fully_connected(edges,
                                              neighbornum * 7,
                                              bn=True,
                                              is_training=is_training,
                                              scope='tfc2',
                                              bn_decay=bn_decay)
                tmp = tf.concat([tmp, rst], axis=0)
            a = 1
    outputs = tmp[1:, ]
    outputs = tf.reshape(outputs, [batch_size, point_num, neighbornum, -1])

    return outputs
Esempio n. 4
0
def edge_unit(point_cloud,
              mask,
              pooling,
              neighbornum,
              outchannel,
              scope,
              bn=False,
              activation_fn=tf.nn.relu,
              bn_decay=None,
              is_training=None):
    """
    :param point_cloud: tensor
    :param mask: tensor
    :param pooling: String
    :return: Variable tensor
    """

    coordinate_length = neighbornum  # adj points number * 4, will change
    input_image = tf.expand_dims(point_cloud, -1)  # B N C*K 1
    #masked_result = mask
    mask = tf.expand_dims(mask, 0)  # 1 32 512 16

    mask = tf.tile(mask, [outchannel, 1, 1, 1])
    #print(mask.shape)
    #print(mask.shape)
    ww = point_cloud.get_shape()[2].value / coordinate_length

    kernel_1 = tf_util._variable_with_weight_decay(
        name='weights_1',
        shape=[1, ww, 1, outchannel],
        use_xavier=True,
        stddev=0.001,
        wd=0.1)  # kernel_h, kernel_w, num_in_channels, output

    biases_1 = tf_util._variable_on_cpu('biases_1', [outchannel],
                                        tf.constant_initializer(0.0))

    outputs = tf.nn.conv2d(
        input_image,
        kernel_1,
        [1, 1, ww, 1],  # [1, stride_h, stride_w, 1]
        padding='VALID')  # 4 -> 1
    outputs = tf.nn.bias_add(outputs, biases_1)
    if bn:
        outputs = tf_util.batch_norm_for_conv2d(outputs,
                                                is_training,
                                                bn_decay=bn_decay,
                                                scope='bn')
    if activation_fn is not None:
        outputs = activation_fn(outputs)
    #masked_result = mask
    outputs = tf.transpose(outputs, [3, 0, 1, 2])  # 32 32 512 16
    outputs = tf.multiply(outputs, mask)  # 32 32 512 16
    #print(outputs.shape)

    #max_index = tf.argmax(outputs,)
    #print(tf.shape(outputs))
    outputs = tf.transpose(outputs, [1, 2, 3, 0])  # 32 512 16 32

    masked_result = outputs
    print(masked_result.shape)
    #print(masked_result.shape)
    max_index_local = tf.squeeze(tf.argmax(outputs, 2))
    if pooling == 'max':
        outputs = tf.nn.max_pool(outputs,
                                 ksize=[1, 1, coordinate_length, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')

    elif pooling == 'avg':
        outputs = tf.nn.avg_pool(outputs,
                                 ksize=[1, 1, coordinate_length, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')
    return outputs, kernel_1, max_index_local, masked_result
Esempio n. 5
0
def spiderConv(grouped_points,
               feat,
               mlp,
               taylor_channel,
               bn=False,
               is_training=None,
               bn_decay=None,
               gn=False,
               is_multi_GPU=False,
               activation_fn=tf.nn.relu,
               scope='taylor'):
    """ 2D convolution with non-linear operation.

  Args:
    feat: 3-D tensor variable BxNxC
    idx: 3-D tensor variable BxNxk
    delta: 4-D tensor variable BxNxkx3
    num_conv: int
    taylor_channel: int    
    bn: bool, whether to use batch norm
    is_training: bool Tensor variable
    bn_decay: float or float tensor variable in [0,1]
    gn: bool, whether to use group norm
    G: int
    is_multi_GPU: bool, whether to use multi GPU
    activation_fn: function
    scope: string
    

  Returns:
    feat: 3-D tensor variable BxNxC
  """
    with tf.variable_scope(scope) as sc:

        batch_size = feat.get_shape()[0].value
        num_point = feat.get_shape()[1].value
        in_channels = grouped_points.get_shape()[2].value
        shape = [1, 1, taylor_channel]

        X = feat[:, :, 0]
        Y = feat[:, :, 1]
        Z = feat[:, :, 2]

        X = tf.expand_dims(X, -1)  #[x, 1]
        Y = tf.expand_dims(Y, -1)
        Z = tf.expand_dims(Z, -1)

        #initialize
        initializer = tf.contrib.layers.xavier_initializer()

        w_x = tf.tile(tf_util._variable_on_cpu('weight_x', shape, initializer),
                      [batch_size, num_point, 1])
        w_y = tf.tile(tf_util._variable_on_cpu('weight_y', shape, initializer),
                      [batch_size, num_point, 1])
        w_z = tf.tile(tf_util._variable_on_cpu('weight_z', shape, initializer),
                      [batch_size, num_point, 1])
        w_xyz = tf.tile(
            tf_util._variable_on_cpu('weight_xyz', shape, initializer),
            [batch_size, num_point, 1])

        w_xy = tf.tile(
            tf_util._variable_on_cpu('weight_xy', shape, initializer),
            [batch_size, num_point, 1])
        w_yz = tf.tile(
            tf_util._variable_on_cpu('weight_yz', shape, initializer),
            [batch_size, num_point, 1])
        w_xz = tf.tile(
            tf_util._variable_on_cpu('weight_xz', shape, initializer),
            [batch_size, num_point, 1])
        biases = tf.tile(
            tf_util._variable_on_cpu('biases', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])

        w_xx = tf.tile(
            tf_util._variable_on_cpu('weight_xx', shape, initializer),
            [batch_size, num_point, 1])
        w_yy = tf.tile(
            tf_util._variable_on_cpu('weight_yy', shape, initializer),
            [batch_size, num_point, 1])
        w_zz = tf.tile(
            tf_util._variable_on_cpu('weight_zz', shape, initializer),
            [batch_size, num_point, 1])

        w_xxy = tf.tile(
            tf_util._variable_on_cpu('weight_xxy', shape, initializer),
            [batch_size, num_point, 1])
        w_xyy = tf.tile(
            tf_util._variable_on_cpu('weight_xyy', shape, initializer),
            [batch_size, num_point, 1])
        w_xxz = tf.tile(
            tf_util._variable_on_cpu('weight_xxz', shape, initializer),
            [batch_size, num_point, 1])

        w_xzz = tf.tile(
            tf_util._variable_on_cpu('weight_xzz', shape, initializer),
            [batch_size, num_point, 1])
        w_yyz = tf.tile(
            tf_util._variable_on_cpu('weight_yyz', shape, initializer),
            [batch_size, num_point, 1])
        w_yzz = tf.tile(
            tf_util._variable_on_cpu('weight_yzz', shape, initializer),
            [batch_size, num_point, 1])

        w_xxx = tf.tile(
            tf_util._variable_on_cpu('weight_xxx', shape, initializer),
            [batch_size, num_point, 1])
        w_yyy = tf.tile(
            tf_util._variable_on_cpu('weight_yyy', shape, initializer),
            [batch_size, num_point, 1])
        w_zzz = tf.tile(
            tf_util._variable_on_cpu('weight_zzz', shape, initializer),
            [batch_size, num_point, 1])

        alpha0 = tf.tile(
            tf_util._variable_on_cpu('alpha0', shape, initializer),
            [batch_size, num_point, 1])
        alpha1 = tf.tile(
            tf_util._variable_on_cpu('alpha1', shape, initializer),
            [batch_size, num_point, 1])
        alpha2 = tf.tile(
            tf_util._variable_on_cpu('alpha2', shape, initializer),
            [batch_size, num_point, 1])
        alpha3 = tf.tile(
            tf_util._variable_on_cpu('alpha3', shape, initializer),
            [batch_size, num_point, 1])

        biases1 = tf.tile(
            tf_util._variable_on_cpu('biases1', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])
        biases2 = tf.tile(
            tf_util._variable_on_cpu('biases2', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])
        biases3 = tf.tile(
            tf_util._variable_on_cpu('biases3', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])
        biases4 = tf.tile(
            tf_util._variable_on_cpu('biases4', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])
        biases5 = tf.tile(
            tf_util._variable_on_cpu('biases5', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, 1])

        g1 = w_x * X + w_y * Y + w_z * Z + w_xyz * X * Y * Z
        g2 = w_xy * X * Y + w_yz * Y * Z + w_xz * X * Z + biases
        g3 = w_xx * X * X + w_yy * Y * Y + w_zz * Z * Z
        g4 = w_xxy * X * X * Y + w_xyy * X * Y * Y + w_xxz * X * X * Z
        g5 = w_xzz * X * Z * Z + w_yyz * Y * Y * Z + w_yzz * Y * Z * Z
        g6 = w_xxx * X * X * X + w_yyy * Y * Y * Y + w_zzz * Z * Z * Z
        #g_d = g1 + g2 + g3 + g4 + g5 + g6
        g_d = g1 + g2 + g3
        g_d = 100 * (
            (0.5 * alpha0 / math.pi) * tf.exp(-(g_d - biases1) *
                                              (g_d - biases1)) + biases4)
        #g_d = alpha0*tf.exp(g_d+biases1)+biases2
        grouped_points = tf.expand_dims(grouped_points, -1)
        g_d = tf.expand_dims(g_d, 2)  #[batch_size, num_point, K_knn, 1, 1]
        g_d = tf.tile(g_d, [1, 1, in_channels, 1
                            ])  #[batch_size, num_point, K_knn, in_channels, 1]
        grouped_points = grouped_points * g_d
        grouped_points = tf.reshape(
            grouped_points,
            [batch_size, num_point, in_channels * taylor_channel])

        for i, num_out_channel in enumerate(mlp):
            grouped_points = tf_util.conv1d(grouped_points,
                                            num_out_channel,
                                            1,
                                            padding='VALID',
                                            bn=True,
                                            is_training=is_training,
                                            scope='convf%d' % (i),
                                            bn_decay=bn_decay)

        return grouped_points
Esempio n. 6
0
def pointnet_sa_module_spider(xyz,
                              points,
                              npoint,
                              radius,
                              nsample,
                              mlp,
                              mlp2,
                              group_all,
                              is_training,
                              bn_decay,
                              scope,
                              pooling,
                              bn=True,
                              knn=False,
                              use_xyz=True,
                              use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, knn, use_xyz)

        #------------------------------------------------------------------------
        #print('-----------------', edge_feature)
        batch_size = grouped_xyz.get_shape()[0].value
        num_point = grouped_xyz.get_shape()[1].value
        K_knn = grouped_xyz.get_shape()[2].value
        in_channels = new_points.get_shape()[3].value
        shape = [1, 1, 1, 3]
        shape1 = [1, 1, 1, 1, 1]
        num_gau = 10

        X = grouped_xyz[:, :, :, 0]
        Y = grouped_xyz[:, :, :, 1]
        Z = grouped_xyz[:, :, :, 2]

        X = tf.expand_dims(X, -1)  #[x, 1]
        Y = tf.expand_dims(Y, -1)
        Z = tf.expand_dims(Z, -1)

        #var = grouped_xyz*grouped_xyz

        initializer = tf.contrib.layers.xavier_initializer()

        w_x = tf.tile(tf_util._variable_on_cpu('weight_x', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_y = tf.tile(tf_util._variable_on_cpu('weight_y', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_z = tf.tile(tf_util._variable_on_cpu('weight_z', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_xyz = tf.tile(
            tf_util._variable_on_cpu('weight_xyz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xy = tf.tile(
            tf_util._variable_on_cpu('weight_xy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yz = tf.tile(
            tf_util._variable_on_cpu('weight_yz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xz = tf.tile(
            tf_util._variable_on_cpu('weight_xz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases = tf.tile(
            tf_util._variable_on_cpu('biases', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])

        w_xx = tf.tile(
            tf_util._variable_on_cpu('weight_xx', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yy = tf.tile(
            tf_util._variable_on_cpu('weight_yy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_zz = tf.tile(
            tf_util._variable_on_cpu('weight_zz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xxy = tf.tile(
            tf_util._variable_on_cpu('weight_xxy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xyy = tf.tile(
            tf_util._variable_on_cpu('weight_xyy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xxz = tf.tile(
            tf_util._variable_on_cpu('weight_xxz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xzz = tf.tile(
            tf_util._variable_on_cpu('weight_xzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yyz = tf.tile(
            tf_util._variable_on_cpu('weight_yyz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yzz = tf.tile(
            tf_util._variable_on_cpu('weight_yzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xxx = tf.tile(
            tf_util._variable_on_cpu('weight_xxx', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yyy = tf.tile(
            tf_util._variable_on_cpu('weight_yyy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_zzz = tf.tile(
            tf_util._variable_on_cpu('weight_zzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        biases1 = tf.tile(
            tf_util._variable_on_cpu('biases1', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases2 = tf.tile(
            tf_util._variable_on_cpu('biases2', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases3 = tf.tile(
            tf_util._variable_on_cpu('biases3', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases4 = tf.tile(
            tf_util._variable_on_cpu('biases4', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases5 = tf.tile(
            tf_util._variable_on_cpu('biases5', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases6 = tf.tile(
            tf_util._variable_on_cpu('biases6', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases7 = tf.tile(
            tf_util._variable_on_cpu('biases7', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases8 = tf.tile(
            tf_util._variable_on_cpu('biases8', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases9 = tf.tile(
            tf_util._variable_on_cpu('biases9', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases10 = tf.tile(
            tf_util._variable_on_cpu('biases10', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases11 = tf.tile(
            tf_util._variable_on_cpu('biases11', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases12 = tf.tile(
            tf_util._variable_on_cpu('biases12', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases13 = tf.tile(
            tf_util._variable_on_cpu('biases13', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases14 = tf.tile(
            tf_util._variable_on_cpu('biases14', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases15 = tf.tile(
            tf_util._variable_on_cpu('biases15', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases16 = tf.tile(
            tf_util._variable_on_cpu('biases16', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases17 = tf.tile(
            tf_util._variable_on_cpu('biases17', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases18 = tf.tile(
            tf_util._variable_on_cpu('biases18', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases19 = tf.tile(
            tf_util._variable_on_cpu('biases19', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases20 = tf.tile(
            tf_util._variable_on_cpu('biases20', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        g1 = w_x * X + w_y * Y + w_z * Z + w_xyz * X * Y * Z + biases
        g2 = w_xy * X * Y + w_yz * Y * Z + w_xz * X * Z
        g3 = w_xx * X * X + w_yy * Y * Y + w_zz * Z * Z
        g4 = w_xxy * X * X * Y + w_xyy * X * Y * Y + w_xxz * X * X * Z
        g5 = w_xzz * X * Z * Z + w_yyz * Y * Y * Z + w_yzz * Y * Z * Z
        g6 = w_xxx * X * X * X + w_yyy * Y * Y * Y + w_zzz * Z * Z * Z
        g_d = g1 + g2 + g3 + g4 + g5 + g6

        #paris_Lille
        #g_d = g1

        g_d1 = tf.exp(-0.5 * (g_d - biases1) * (g_d - biases1) /
                      (biases11 * biases11))
        g_d2 = tf.exp(-0.5 * (g_d - biases2) * (g_d - biases2) /
                      (biases12 * biases12))
        g_d3 = tf.exp(-0.5 * (g_d - biases3) * (g_d - biases3) /
                      (biases13 * biases13))
        g_d4 = tf.exp(-0.5 * (g_d - biases4) * (g_d - biases4) /
                      (biases14 * biases14))
        g_d5 = tf.exp(-0.5 * (g_d - biases5) * (g_d - biases5) /
                      (biases15 * biases15))
        g_d6 = tf.exp(-0.5 * (g_d - biases6) * (g_d - biases6) /
                      (biases16 * biases16))
        g_d7 = tf.exp(-0.5 * (g_d - biases7) * (g_d - biases7) /
                      (biases17 * biases17))
        g_d8 = tf.exp(-0.5 * (g_d - biases8) * (g_d - biases8) /
                      (biases18 * biases18))
        g_d9 = tf.exp(-0.5 * (g_d - biases9) * (g_d - biases9) /
                      (biases19 * biases19))
        g_d10 = tf.exp(-0.5 * (g_d - biases10) * (g_d - biases10) /
                       (biases20 * biases20))
        '''g_d1 = tf.exp((g_d-biases1))
        g_d2 = tf.exp((g_d-biases2))
        g_d3 = tf.exp((g_d-biases3))
        g_d4 = tf.exp((g_d-biases4))
        g_d5 = tf.exp((g_d-biases5))
        g_d6 = tf.exp((g_d-biases6))
        g_d7 = tf.exp((g_d-biases7))
        g_d8 = tf.exp((g_d-biases8))
        g_d9 = tf.exp((g_d-biases9))
        g_d10 = tf.exp((g_d-biases10))'''

        g_d1 = tf.expand_dims(g_d1, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d2 = tf.expand_dims(g_d2, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d3 = tf.expand_dims(g_d3, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d4 = tf.expand_dims(g_d4, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d5 = tf.expand_dims(g_d5, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d6 = tf.expand_dims(g_d6, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d7 = tf.expand_dims(g_d7, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d8 = tf.expand_dims(g_d8, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d9 = tf.expand_dims(g_d9, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d10 = tf.expand_dims(g_d10, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d1 = tf.tile(g_d1,
                       [1, 1, 1, in_channels, 1
                        ])  #[batch_size, num_point, K_knn, in_channels, 1]
        g_d2 = tf.tile(g_d2, [1, 1, 1, in_channels, 1])
        g_d3 = tf.tile(g_d3, [1, 1, 1, in_channels, 1])
        g_d4 = tf.tile(g_d4, [1, 1, 1, in_channels, 1])
        g_d5 = tf.tile(g_d5, [1, 1, 1, in_channels, 1])
        g_d6 = tf.tile(g_d6, [1, 1, 1, in_channels, 1])
        g_d7 = tf.tile(g_d7, [1, 1, 1, in_channels, 1])
        g_d8 = tf.tile(g_d8, [1, 1, 1, in_channels, 1])
        g_d9 = tf.tile(g_d9, [1, 1, 1, in_channels, 1])
        g_d10 = tf.tile(g_d10, [1, 1, 1, in_channels, 1])
        new_points = tf.expand_dims(new_points, -1)
        new_points = new_points * g_d1 + new_points * g_d2 + new_points * g_d3 + new_points * g_d4 + new_points * g_d5 + new_points * g_d6 + new_points * g_d7 + new_points * g_d8 + new_points * g_d9 + new_points * g_d10
        new_points = tf.reshape(
            new_points, [batch_size, num_point, K_knn, in_channels * 3])

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay,
                                        data_format=data_format)
        if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        # Pooling in Local Regions
        if pooling == 'max':
            new_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
        elif pooling == 'avg':
            new_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                exp_dists = tf_util.conv2d(tf.transpose(
                    exp_dists, [0, 1, 3, 2]),
                                           K_knn, [1, 1],
                                           padding='VALID',
                                           bn=True,
                                           is_training=is_training,
                                           scope='weighted',
                                           bn_decay=bn_decay)
                exp_dists = tf.transpose(exp_dists, [0, 1, 3, 2])
                weights = exp_dists / (
                    tf.reduce_sum(exp_dists, axis=2, keep_dims=True) + 1e-8
                )  # (batch_size, npoint, nsample, 1)
                new_points1 = new_points
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
                avg_points_max = tf.reduce_max(new_points1,
                                               axis=[2],
                                               keep_dims=True,
                                               name='avgpool')
                new_points = tf.concat([new_points, avg_points_max], axis=-1)
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        '''_, new_points1, _, _ = pointSIFT_group(radius, new_xyz, new_points, use_xyz=False)
        new_points1 = tf.concat([tf.tile(tf.expand_dims(new_points,2),[1,1,8,1]), new_points1-tf.tile(tf.expand_dims(new_points,2),[1,1,8,1])], axis=-1)
        
        # Point Feature Embedding
        if use_nchw: new_points1 = tf.transpose(new_points1, [0,3,1,2])
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='convl%d'%(i), bn_decay=bn_decay,
                                        data_format=data_format) 
        if use_nchw: new_points1 = tf.transpose(new_points1, [0,2,3,1])
        new_points1 = tf.reduce_max(new_points1, axis=[2], keep_dims=False, name='maxpool')'''
        #new_points = tf.concat([new_points, new_points1], axis=-1)

        return new_xyz, new_points, idx