Exemplo n.º 1
0
    def _create_network(self):
        """Sets up the graph for the neural network

        :returns: (tensor) train_step - tensor to train on, (tensor) output - raw output of network, (ops) init_ops - set of graph operations
        """

        #Layer 1 convolution + 2x2 maxpool
        layer1_conv_W = weight_variable([5,5,1,64], name="layer1_W")
        layer1_conv_b = bias_variable([64], name="layer1_b")
        layer1_conv   = tf.nn.relu(conv2d(self.x, layer1_conv_W, name='layer1_conv') + layer1_conv_b, name='layer1_relu')
        tf.histogram_summary('layer1_relu', layer1_conv)
        layer1_pool   = max_pool_2x2(layer1_conv, name='layer1_pool')
        # layer1_drop   = tf.nn.dropout(layer1_pool, self.keep_prob, name='layer1_drop')

        #Layer 2 convolution + 2x2 maxpool
        layer2_conv_W = weight_variable([5,5,64,128], name='layer2_W')
        layer2_conv_b = bias_variable([128], name='layer2_b')
        layer2_conv   = tf.nn.relu(conv2d(layer1_pool, layer2_conv_W, name='layer2_conv') + layer2_conv_b, name='layer2_relu')
        tf.histogram_summary('layer2_relu', layer2_conv)
        layer2_pool   = max_pool_2x2(layer2_conv, name='layer2_pool')

        #Layer 3 convolution + 2x2 maxpool
        image_size_8  = self.image_size / 8
        layer3_conv_W = weight_variable([5,5,128,256], name='layer3_W')
        layer3_conv_b = bias_variable([256], name='layer3_b')
        layer3_conv   = tf.nn.relu(conv2d(layer2_pool, layer3_conv_W, name='layer3_conv') + layer3_conv_b, name='layer3_relu')
        tf.histogram_summary('layer3_relu', layer3_conv)
        layer3_pool   = max_pool_2x2(layer3_conv, name='layer3_pool')

        # layer3_drop   = tf.nn.dropout(layer3_pool, self.keep_prob, name='layer3_drop')

        #Flatten output to 1D tensor
        layer3_flat = tf.reshape(layer3_pool, [-1, image_size_8 * image_size_8 * 256], name='layer3_pool')

        #Fully connected layer (image_size_8^2 * 256 -> 300 neurons)
        layer4_full_W = weight_variable(shape=[image_size_8 * image_size_8 * 256, 300], name='layer4_W')
        layer4_full_b = bias_variable([300], name='layer4_b')
        layer4_full   = tf.nn.relu(tf.matmul(layer3_flat, layer4_full_W, name='layer4_matmull') + layer4_full_b, name='layer4_full')
        tf.histogram_summary('layer4_relu', layer4_full)
        layer4_drop   = tf.nn.dropout(layer4_full, self.keep_prob, name='layer4_drop')

        #Fully connected layer (300 -> len(codes) neurons)
        layer5_relu_W = weight_variable(shape=[300, len(self.codes)], name='layer5_W')
        layer5_relu_b = bias_variable([len(self.codes)], name='layer5_b')
        layer5_relu   = tf.nn.relu(tf.matmul(layer4_drop, layer5_relu_W) + layer5_relu_b, name='layer5_relu')
        tf.histogram_summary('layer5_relu', layer5_relu)

        #Cost function and reduction
        sigmoids = tf.nn.sigmoid_cross_entropy_with_logits(layer5_relu, self.y_)
        cost = tf.reduce_mean(sigmoids)

        #Setup training
        tf.scalar_summary('cost/summary', cost)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)

        #initialize the variables of the graph
        init_ops = tf.initialize_all_variables()

        return train_step, layer5_relu, init_ops
Exemplo n.º 2
0
    def _create_network(self):
        logging.info("Begin creating Ghosh graph")

        #First "mega" layer
        conv_W_1 = weight_variable([KERNEL_SIZE, KERNEL_SIZE, 1, 70],
                                   name='conv_W_1')
        conv_b_1 = bias_variable([70], name="conv_b_1")
        conv_1 = conv2d(self.x, conv_W_1, 'conv_layer_1')
        relu_1 = tf.nn.relu(conv_1 + conv_b_1, 'relu_1')
        drop_1 = tf.nn.dropout(relu_1, self.keep_prob, name='drop_1')
        max_pool_1 = max_pool_2x2(drop_1, name='pool_1')

        #Second "mega" layer
        conv_W_2 = weight_variable([KERNEL_SIZE, KERNEL_SIZE, 70, 10],
                                   name='conv_W_2')
        conv_b_2 = bias_variable([10], name="conv_b_2")
        conv_2 = conv2d(max_pool_1, conv_W_2, 'conv_layer_2')
        relu_2 = tf.nn.relu(conv_2 + conv_b_2, 'relu_2')
        drop_2 = tf.nn.dropout(relu_2, self.keep_prob, name='drop_2')
        max_pool_2 = max_pool_2x2(drop_2, name='pool_2')

        #Fully connected layers

        flattened = tf.reshape(max_pool_2, [-1, 24 * 24 * 10],
                               name='layer3_pool')

        #1st fully connected
        flat_W_1 = weight_variable(shape=[24 * 24 * 10, FULL_SIZE_1],
                                   name='flat_W_1')
        flat_b_1 = bias_variable(shape=[FULL_SIZE_1], name='flat_b_1')
        flat_1 = tf.nn.relu(tf.matmul(flattened, flat_W_1) + flat_b_1,
                            name='flat_1')

        #2nd
        flat_W_2 = weight_variable(shape=[FULL_SIZE_1, FULL_SIZE_2],
                                   name='flat_W_2')
        flat_b_2 = bias_variable(shape=[FULL_SIZE_2], name='flat_b_2')
        flat_2 = tf.nn.relu(tf.matmul(flat_1, flat_W_2) + flat_b_2,
                            name='flat_2')

        #3rd
        flat_W_3 = weight_variable(shape=[FULL_SIZE_2, FULL_SIZE_3],
                                   name='flat_W_3')
        flat_b_3 = bias_variable(shape=[FULL_SIZE_3], name='flat_b_3')
        flat_3 = tf.nn.relu(tf.matmul(flat_2, flat_W_3) + flat_b_3,
                            name='flat_3')

        cost = self._multilabel_error(self.y_, flat_3)

        tf.scalar_summary('cost/summary', cost)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)

        #initialize the variables of the graph
        init_ops = tf.initialize_all_variables()

        return train_step, flat_3, init_ops
Exemplo n.º 3
0
    def _create_network(self):
        """Sets up graph of network

        :returns: (tensor) train_step - tensor to train on, (tensor) output - raw output of network, (ops) init_ops - set of graph operations
        """

        #1st convolution layer w/ 64 5x5 kernels and 2x2 maxpool
        layer1_conv_W = weight_variable([5,5,1,64], name="layer1_W")
        layer1_conv_b = bias_variable([64], name="layer1_b")
        layer1_conv   = tf.nn.relu(conv2d(self.x, layer1_conv_W, name='layer1_conv') + layer1_conv_b, name='layer1_relu')
        tf.histogram_summary('layer1_relu', layer1_conv)
        layer1_pool   = max_pool_2x2(layer1_conv, name='layer1_pool')
        # layer1_drop   = tf.nn.dropout(layer1_pool, self.keep_prob, name='layer1_drop')

        #2nd conv layer w/ 128 5x5 kernels and 2x2 maxpool
        layer2_conv_W = weight_variable([5,5,64,128], name='layer2_W')
        layer2_conv_b = bias_variable([128], name='layer2_b')
        layer2_conv   = tf.nn.relu(conv2d(layer1_pool, layer2_conv_W) + layer2_conv_b, name='layer2_relu')
        tf.histogram_summary('layer2_relu', layer2_conv)
        layer2_pool   = max_pool_2x2(layer2_conv, name='layer2_pool')

        #3rd conv layer w/ 256 kernels and 2x2 maxpool
        image_size_8  = self.image_size / 8
        layer3_conv_W = weight_variable([5,5,128,256], name='layer3_W')
        layer3_conv_b = bias_variable([256], name='layer3_b')
        layer3_conv   = tf.nn.relu(conv2d(layer2_pool, layer3_conv_W) + layer3_conv_b, name='layer3_relu')
        tf.histogram_summary('layer3_relu', layer3_conv)
        layer3_pool   = max_pool_2x2(layer3_conv, name='layer3_pool')
        # layer3_drop   = tf.nn.dropout(layer3_pool, self.keep_prob, name='layer3_drop')

        #Flatten output into 1D tensor for input into fully connected layer
        layer3_flat = tf.reshape(layer3_pool, [-1, image_size_8 * image_size_8 * 256], name='layer2_pool_flat')

        #Fully connected layer; output is 300 neurons
        layer4_full_W = weight_variable(shape=[image_size_8 * image_size_8 * 256, 300], name='layer4_W')
        layer4_full_b = bias_variable([300], name='layer4_b')
        layer4_full   = tf.nn.relu(tf.matmul(layer3_flat, layer4_full_W, name='layer4_matmull') + layer4_full_b, name='layer4_full')
        layer4_drop   = tf.nn.dropout(layer4_full, self.keep_prob, name='layer4_drop')

        #Output layer of 2 neurons 
        layer5_soft_W = weight_variable(shape=[300, OUTPUT_SIZE], name='layer5_W')
        layer5_soft_b = bias_variable([OUTPUT_SIZE], name='layer5_b')
        layer5_soft   = tf.nn.relu(tf.matmul(layer4_drop, layer5_soft_W) + layer5_soft_b)

        #Cost and training calculations
        cross_entropy = -tf.reduce_sum(self.y_*tf.log(layer5_soft + 1e50))
        tf.scalar_summary = ('cost', cross_entropy)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)
        init_ops = tf.initialize_all_variables()

        return train_step, layer5_soft, init_ops
Exemplo n.º 4
0
    def _create_network(self):
        logging.info("Begin creating Ghosh graph")

        #First "mega" layer 
        conv_W_1 = weight_variable([KERNEL_SIZE, KERNEL_SIZE, 1, 70], name='conv_W_1')
        conv_b_1 = bias_variable([70], name="conv_b_1")
        conv_1 = conv2d(self.x, conv_W_1, 'conv_layer_1')
        relu_1 = tf.nn.relu(conv_1 + conv_b_1, 'relu_1')
        drop_1 = tf.nn.dropout(relu_1, self.keep_prob, name='drop_1')
        max_pool_1 = max_pool_2x2(drop_1, name='pool_1')

        #Second "mega" layer 
        conv_W_2 = weight_variable([KERNEL_SIZE, KERNEL_SIZE, 70, 10], name='conv_W_2')
        conv_b_2 = bias_variable([10], name="conv_b_2")
        conv_2 = conv2d(max_pool_1, conv_W_2, 'conv_layer_2')
        relu_2 = tf.nn.relu(conv_2 + conv_b_2, 'relu_2')
        drop_2 = tf.nn.dropout(relu_2, self.keep_prob, name='drop_2')
        max_pool_2 = max_pool_2x2(drop_2, name='pool_2')

        #Fully connected layers
        
        flattened = tf.reshape(max_pool_2, [-1, 24*24*10], name='layer3_pool')

        #1st fully connected
        flat_W_1 = weight_variable(shape=[24*24*10, FULL_SIZE_1], name='flat_W_1')
        flat_b_1 = bias_variable(shape=[FULL_SIZE_1], name='flat_b_1')
        flat_1 = tf.nn.relu(tf.matmul(flattened, flat_W_1) + flat_b_1, name='flat_1')

        #2nd
        flat_W_2 = weight_variable(shape=[FULL_SIZE_1, FULL_SIZE_2], name='flat_W_2')
        flat_b_2 = bias_variable(shape=[FULL_SIZE_2], name='flat_b_2')
        flat_2 = tf.nn.relu(tf.matmul(flat_1, flat_W_2) + flat_b_2, name='flat_2')

        #3rd
        flat_W_3 = weight_variable(shape=[FULL_SIZE_2, FULL_SIZE_3], name='flat_W_3')
        flat_b_3 = bias_variable(shape=[FULL_SIZE_3], name='flat_b_3')
        flat_3 = tf.nn.relu(tf.matmul(flat_2, flat_W_3) + flat_b_3, name='flat_3')

        cost = self._multilabel_error(self.y_, flat_3)

        tf.scalar_summary('cost/summary', cost)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)

        #initialize the variables of the graph
        init_ops = tf.initialize_all_variables()

        return train_step, flat_3, init_ops
Exemplo n.º 5
0
def createNetwork():
    # network weights
    W_conv1 = weight_variable([8, 8, 4, 32])
    b_conv1 = bias_variable([32])

    W_conv2 = weight_variable([4, 4, 32, 64])
    b_conv2 = bias_variable([64])

    W_conv3 = weight_variable([3, 3, 64, 64])
    b_conv3 = bias_variable([64])

    W_fc1 = weight_variable([1600, 512])
    b_fc1 = bias_variable([512])

    W_fc2 = weight_variable([512, ACTIONS])
    b_fc2 = bias_variable([ACTIONS])

    # input layer
    s = tf.placeholder("float", [None, 80, 80, 4])

    # hidden layers
    h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
    #h_pool2 = max_pool_2x2(h_conv2)

    h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
    #h_pool3 = max_pool_2x2(h_conv3)

    #h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
    h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])

    h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)

    # readout layer
    readout = tf.matmul(h_fc1, W_fc2) + b_fc2

    return s, readout, h_fc1
Exemplo n.º 6
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True):
    """ PointNet Feature Propogation (FP) Module
        Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    """
    with tf.compat.v1.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(
                new_points1,
                num_out_channel,
                [1, 1],
                padding="VALID",
                stride=[1, 1],
                bn=bn,
                is_training=is_training,
                scope="conv_%d" % (i),
                bn_decay=bn_decay,
            )
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
Exemplo n.º 7
0
    def _create_network(self):
        """Sets up graph of network

        :returns: (tensor) train_step - tensor to train on, (tensor) output - raw output of network, (ops) init_ops - set of graph operations
        """

        #1st convolution layer w/ 64 5x5 kernels and 2x2 maxpool
        layer1_conv_W = weight_variable([5, 5, 1, 64], name="layer1_W")
        layer1_conv_b = bias_variable([64], name="layer1_b")
        layer1_conv = tf.nn.relu(
            conv2d(self.x, layer1_conv_W, name='layer1_conv') + layer1_conv_b,
            name='layer1_relu')
        tf.histogram_summary('layer1_relu', layer1_conv)
        layer1_pool = max_pool_2x2(layer1_conv, name='layer1_pool')
        # layer1_drop   = tf.nn.dropout(layer1_pool, self.keep_prob, name='layer1_drop')

        #2nd conv layer w/ 128 5x5 kernels and 2x2 maxpool
        layer2_conv_W = weight_variable([5, 5, 64, 128], name='layer2_W')
        layer2_conv_b = bias_variable([128], name='layer2_b')
        layer2_conv = tf.nn.relu(conv2d(layer1_pool, layer2_conv_W) +
                                 layer2_conv_b,
                                 name='layer2_relu')
        tf.histogram_summary('layer2_relu', layer2_conv)
        layer2_pool = max_pool_2x2(layer2_conv, name='layer2_pool')

        #3rd conv layer w/ 256 kernels and 2x2 maxpool
        image_size_8 = self.image_size / 8
        layer3_conv_W = weight_variable([5, 5, 128, 256], name='layer3_W')
        layer3_conv_b = bias_variable([256], name='layer3_b')
        layer3_conv = tf.nn.relu(conv2d(layer2_pool, layer3_conv_W) +
                                 layer3_conv_b,
                                 name='layer3_relu')
        tf.histogram_summary('layer3_relu', layer3_conv)
        layer3_pool = max_pool_2x2(layer3_conv, name='layer3_pool')
        # layer3_drop   = tf.nn.dropout(layer3_pool, self.keep_prob, name='layer3_drop')

        #Flatten output into 1D tensor for input into fully connected layer
        layer3_flat = tf.reshape(layer3_pool,
                                 [-1, image_size_8 * image_size_8 * 256],
                                 name='layer2_pool_flat')

        #Fully connected layer; output is 300 neurons
        layer4_full_W = weight_variable(
            shape=[image_size_8 * image_size_8 * 256, 300], name='layer4_W')
        layer4_full_b = bias_variable([300], name='layer4_b')
        layer4_full = tf.nn.relu(
            tf.matmul(layer3_flat, layer4_full_W, name='layer4_matmull') +
            layer4_full_b,
            name='layer4_full')
        layer4_drop = tf.nn.dropout(layer4_full,
                                    self.keep_prob,
                                    name='layer4_drop')

        #Output layer of 2 neurons
        layer5_soft_W = weight_variable(shape=[300, OUTPUT_SIZE],
                                        name='layer5_W')
        layer5_soft_b = bias_variable([OUTPUT_SIZE], name='layer5_b')
        layer5_soft = tf.nn.relu(
            tf.matmul(layer4_drop, layer5_soft_W) + layer5_soft_b)

        #Cost and training calculations
        cross_entropy = -tf.reduce_sum(self.y_ * tf.log(layer5_soft + 1e50))
        tf.scalar_summary = ('cost', cross_entropy)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(
            cross_entropy)
        init_ops = tf.initialize_all_variables()

        return train_step, layer5_soft, init_ops
Exemplo n.º 8
0
def pointnet_sa_module(xyz,
                       points,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       knn=False,
                       use_xyz=True,
                       use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, knn, use_xyz)

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
        for i, num_out_channel in enumerate(mlp):
            new_points = conv2d(new_points,
                                num_out_channel, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=bn,
                                is_training=is_training,
                                scope='conv%d' % (i),
                                bn_decay=bn_decay,
                                data_format=data_format)
        if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        # Pooling in Local Regions
        if pooling == 'max':
            new_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keepdims=True,
                                       name='maxpool')
        elif pooling == 'avg':
            new_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keepdims=True,
                                        name='avgpool')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keepdims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keepdims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keepdims=True)
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keepdims=True,
                                       name='maxpool')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keepdims=True,
                                        name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        # [Optional] Further Processing
        if mlp2 is not None:
            if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
            for i, num_out_channel in enumerate(mlp2):
                new_points = conv2d(new_points,
                                    num_out_channel, [1, 1],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='conv_post_%d' % (i),
                                    bn_decay=bn_decay,
                                    data_format=data_format)
            if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx
Exemplo n.º 9
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = conv2d(grouped_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d_%d' % (i, j),
                                        bn_decay=bn_decay)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat