Exemplo n.º 1
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            ####################################
            new_points1 = Ops.xxlu(Ops.conv2d(new_points1,
                                              k=(1, 1),
                                              out_c=num_out_channel,
                                              str=1,
                                              pad='VALID',
                                              name='llll' + str(i)),
                                   label='lrelu')
            #new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],padding='VALID', stride=[1,1],
            #    bn=bn, is_training=is_training,scope='conv_%d'%(i), bn_decay=bn_decay)

        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
Exemplo n.º 2
0
	def bbox_net(self, global_features):
		b1 = Ops.xxlu(Ops.fc(global_features, out_d= 512, name='b1'), label='lrelu')
		b2 = Ops.xxlu(Ops.fc(b1, out_d= 256, name='b2'), label='lrelu')

		#### sub branch 1
		b3 = Ops.xxlu(Ops.fc(b2, out_d=256, name='b3'), label='lrelu')
		bbvert = Ops.fc(b3, out_d=self.bb_num * 2 * 3, name='bbvert')
		bbvert = tf.reshape(bbvert, [-1, self.bb_num, 2, 3])
		points_min = tf.reduce_min(bbvert, axis=-2)[:, :, None, :]
		points_max = tf.reduce_max(bbvert, axis=-2)[:, :, None, :]
		y_bbvert_pred = tf.concat([points_min, points_max], axis=-2, name='y_bbvert_pred')

		#### sub branch 2
		b4 = Ops.xxlu(Ops.fc(b2, out_d=256, name='b4'), label='lrelu')
		y_bbscore_pred = tf.sigmoid(Ops.fc(b4, out_d=self.bb_num * 1, name='y_bbscore_pred'))

		return y_bbvert_pred, y_bbscore_pred
Exemplo n.º 3
0
	def backbone_pointnet2(self, X_pc, is_train=None):
		import helper_pointnet2 as pnet2
		points_num = tf.shape(X_pc)[1]
		l0_xyz = X_pc[:,:,0:3]
		l0_points = X_pc[:,:,3:9]

		l1_xyz, l1_points, l1_indices = pnet2.pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32,
			mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=None, bn_decay=None, scope='layer1')
		l2_xyz, l2_points, l2_indices = pnet2.pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=64,
			mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=None, bn_decay=None, scope='layer2')
		l3_xyz, l3_points, l3_indices = pnet2.pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=128,
		    mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=None, bn_decay=None, scope='layer3')
		l4_xyz, l4_points, l4_indices = pnet2.pointnet_sa_module(l3_xyz, l3_points, npoint=None, radius=None, nsample=None,
			mlp=[256, 256, 512], mlp2=None, group_all=True, is_training=None, bn_decay=None, scope='layer4')

		# Feature Propagation layers
		l3_points = pnet2.pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256, 256], is_training=None, bn_decay=None, scope='fa_layer1')
		l2_points = pnet2.pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256, 256], is_training=None, bn_decay=None,scope='fa_layer2')
		l1_points = pnet2.pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training=None, bn_decay=None,scope='fa_layer3')
		l0_points = pnet2.pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz, l0_points], axis=-1),
		            l1_points,[128, 128, 128, 128], is_training=None, bn_decay=None, scope='fa_layer4')
		global_features = tf.reshape(l4_points, [-1, 512])
		point_features = l0_points

		# sem
		l0_points = l0_points[:,:,None,:]
		sem1 = Ops.xxlu(Ops.conv2d(l0_points, k=(1, 1), out_c=128, str=1, pad='VALID', name='sem1'), label='lrelu')
		sem2 = Ops.xxlu(Ops.conv2d(sem1, k=(1, 1), out_c=64, str=1, pad='VALID', name='sem2'), label='lrelu')
		sem2 = Ops.dropout(sem2, keep_prob=0.5, is_train=is_train, name='sem2_dropout')
		sem3 = Ops.conv2d(sem2, k=(1, 1), out_c=self.sem_num, str=1, pad='VALID', name='sem3')
		sem3 = tf.reshape(sem3, [-1, points_num, self.sem_num])
		self.y_psem_logits = sem3
		y_sem_pred = tf.nn.softmax(self.y_psem_logits, name='y_sem_pred')

		return point_features, global_features, y_sem_pred
Exemplo n.º 4
0
    def backbone_pointnet(self, X_pc, is_train):
        [_, _, points_cc] = X_pc.get_shape()
        points_num = tf.shape(X_pc)[1]
        X_pc = tf.reshape(X_pc, [-1, points_num, int(points_cc), 1])

        l1 = Ops.xxlu(Ops.conv2d(X_pc,
                                 k=(1, points_cc),
                                 out_c=64,
                                 str=1,
                                 pad='VALID',
                                 name='l1'),
                      label='lrelu')
        l2 = Ops.xxlu(Ops.conv2d(l1,
                                 k=(1, 1),
                                 out_c=64,
                                 str=1,
                                 pad='VALID',
                                 name='l2'),
                      label='lrelu')
        l3 = Ops.xxlu(Ops.conv2d(l2,
                                 k=(1, 1),
                                 out_c=64,
                                 str=1,
                                 pad='VALID',
                                 name='l3'),
                      label='lrelu')
        l4 = Ops.xxlu(Ops.conv2d(l3,
                                 k=(1, 1),
                                 out_c=128,
                                 str=1,
                                 pad='VALID',
                                 name='l4'),
                      label='lrelu')
        l5 = Ops.xxlu(Ops.conv2d(l4,
                                 k=(1, 1),
                                 out_c=1024,
                                 str=1,
                                 pad='VALID',
                                 name='l5'),
                      label='lrelu')
        global_features = tf.reduce_max(l5, axis=1, name='maxpool')
        global_features = tf.reshape(global_features, [-1, int(l5.shape[-1])])
        point_features = tf.reshape(l5, [-1, points_num, int(l5.shape[-1])])

        ####  sem
        g1 = Ops.xxlu(Ops.fc(global_features, out_d=256, name='semg1'),
                      label='lrelu')
        g2 = Ops.xxlu(Ops.fc(g1, out_d=128, name='semg2'), label='lrelu')
        sem1 = tf.tile(g2[:, None, None, :], [1, points_num, 1, 1])
        sem1 = tf.concat([l5, sem1], axis=-1)
        sem1 = Ops.xxlu(Ops.conv2d(sem1,
                                   k=(1, 1),
                                   out_c=512,
                                   str=1,
                                   pad='VALID',
                                   name='sem1'),
                        label='lrelu')
        sem2 = Ops.xxlu(Ops.conv2d(sem1,
                                   k=(1, 1),
                                   out_c=256,
                                   str=1,
                                   pad='VALID',
                                   name='sem2'),
                        label='lrelu')
        sem3 = Ops.xxlu(Ops.conv2d(sem2,
                                   k=(1, 1),
                                   out_c=128,
                                   str=1,
                                   pad='VALID',
                                   name='sem3'),
                        label='lrelu')
        sem3 = Ops.dropout(sem3,
                           keep_prob=0.5,
                           is_train=is_train,
                           name='sem3_dropout')
        sem4 = Ops.conv2d(sem3,
                          k=(1, 1),
                          out_c=self.sem_num,
                          str=1,
                          pad='VALID',
                          name='sem4')
        sem4 = tf.reshape(sem4, [-1, points_num, self.sem_num])
        self.y_psem_logits = sem4
        y_sem_pred = tf.nn.softmax(self.y_psem_logits, name='y_sem_pred')

        return point_features, global_features, y_sem_pred
Exemplo n.º 5
0
    def build_graph(self, GPU='0'):
        #######   1. define inputs
        self.X_pc = tf.placeholder(shape=[None, None, self.points_cc],
                                   dtype=tf.float32,
                                   name='X_pc')
        self.Y_bbvert = tf.placeholder(shape=[None, self.bb_num, 2, 3],
                                       dtype=tf.float32,
                                       name='Y_bbvert')
        self.Y_pmask = tf.placeholder(shape=[None, self.bb_num, None],
                                      dtype=tf.float32,
                                      name='Y_pmask')
        self.Y_psem = tf.placeholder(shape=[None, None, self.sem_num],
                                     dtype=tf.float32,
                                     name='Y_psem')
        self.is_train = tf.placeholder(dtype=tf.bool, name='is_train')
        self.lr = tf.placeholder(dtype=tf.float32, name='lr')

        #######  2. define networks, losses
        with tf.variable_scope('backbone'):
            #self.point_features, self.global_features, self.y_psem_pred = self.backbone_pointnet(self.X_pc, self.is_train)
            self.point_features, self.global_features, self.y_psem_pred = self.backbone_pointnet2(
                self.X_pc, self.is_train)

            ### loss
            self.psemce_loss = Ops.get_loss_psem_ce(self.y_psem_logits,
                                                    self.Y_psem)
            self.sum_psemce_loss = tf.summary.scalar('psemce_loss',
                                                     self.psemce_loss)

        with tf.variable_scope('bbox'):
            self.y_bbvert_pred_raw, self.y_bbscore_pred_raw = self.bbox_net(
                self.global_features)
            #### association, only used for training
            bbox_criteria = 'use_all_ce_l2_iou'
            self.y_bbvert_pred, self.pred_bborder = Ops.bbvert_association(
                self.X_pc,
                self.y_bbvert_pred_raw,
                self.Y_bbvert,
                label=bbox_criteria)
            self.y_bbscore_pred = Ops.bbscore_association(
                self.y_bbscore_pred_raw, self.pred_bborder)

            ### loss
            self.bbvert_loss, self.bbvert_loss_l2, self.bbvert_loss_ce, self.bbvert_loss_iou = \
             Ops.get_loss_bbvert(self.X_pc, self.y_bbvert_pred, self.Y_bbvert, label=bbox_criteria)
            self.bbscore_loss = Ops.get_loss_bbscore(self.y_bbscore_pred,
                                                     self.Y_bbvert)
            self.sum_bbox_vert_loss = tf.summary.scalar(
                'bbvert_loss', self.bbvert_loss)
            self.sum_bbox_vert_loss_l2 = tf.summary.scalar(
                'bbvert_loss_l2', self.bbvert_loss_l2)
            self.sum_bbox_vert_loss_ce = tf.summary.scalar(
                'bbvert_loss_ce', self.bbvert_loss_ce)
            self.sum_bbox_vert_loss_iou = tf.summary.scalar(
                'bbvert_loss_iou', self.bbvert_loss_iou)
            self.sum_bbox_score_loss = tf.summary.scalar(
                'bbscore_loss', self.bbscore_loss)

        with tf.variable_scope('pmask'):
            self.y_pmask_pred = self.pmask_net(self.point_features,
                                               self.global_features,
                                               self.y_bbvert_pred,
                                               self.y_bbscore_pred)

            ### loss
            self.pmask_loss = Ops.get_loss_pmask(self.X_pc, self.y_pmask_pred,
                                                 self.Y_pmask)
            self.sum_pmask_loss = tf.summary.scalar('pmask_loss',
                                                    self.pmask_loss)

        with tf.variable_scope('pmask', reuse=True):
            #### during testing, no need to associate, use unordered predictions
            self.y_pmask_pred_raw = self.pmask_net(self.point_features,
                                                   self.global_features,
                                                   self.y_bbvert_pred_raw,
                                                   self.y_bbscore_pred_raw)

        ######   3. define optimizers
        var_backbone = [
            var for var in tf.trainable_variables()
            if var.name.startswith('backbone')
            and not var.name.startswith('backbone/sem')
        ]
        var_sem = [
            var for var in tf.trainable_variables()
            if var.name.startswith('backbone/sem')
        ]
        var_bbox = [
            var for var in tf.trainable_variables()
            if var.name.startswith('bbox')
        ]
        var_pmask = [
            var for var in tf.trainable_variables()
            if var.name.startswith('pmask')
        ]

        end_2_end_loss = self.bbvert_loss + self.bbscore_loss + self.pmask_loss + self.psemce_loss
        self.optim = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(
            end_2_end_loss,
            var_list=var_bbox + var_pmask + var_backbone + var_sem)

        ######   4. others
        print(Ops.variable_count())
        self.saver = tf.train.Saver(max_to_keep=20)
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.visible_device_list = GPU
        self.sess = tf.Session(config=config)
        self.sum_writer_train = tf.summary.FileWriter(self.train_sum_dir,
                                                      self.sess.graph)
        self.sum_write_test = tf.summary.FileWriter(self.test_sum_dir)
        self.sum_merged = tf.summary.merge_all()

        path = self.train_mod_dir
        if os.path.isfile(path + 'model.cptk.data-00000-of-00001'):
            print("restoring saved model")
            self.saver.restore(self.sess, path + 'model.cptk')
        else:
            print("model not found, all weights are initilized")
            self.sess.run(tf.global_variables_initializer())
        return 0
Exemplo n.º 6
0
    def pmask_net(self, point_features, global_features, bbox, bboxscore):
        p_f_num = int(point_features.shape[-1])
        p_num = tf.shape(point_features)[1]
        bb_num = int(bbox.shape[1])

        global_features = tf.tile(
            Ops.xxlu(Ops.fc(global_features, out_d=256, name='down_g1'),
                     label='lrelu')[:, None, None, :], [1, p_num, 1, 1])
        point_features = Ops.xxlu(Ops.conv2d(point_features[:, :, :, None],
                                             k=(1, p_f_num),
                                             out_c=256,
                                             str=1,
                                             name='down_p1',
                                             pad='VALID'),
                                  label='lrelu')
        point_features = tf.concat([point_features, global_features], axis=-1)
        point_features = Ops.xxlu(Ops.conv2d(point_features,
                                             k=(1,
                                                int(point_features.shape[-2])),
                                             out_c=128,
                                             str=1,
                                             pad='VALID',
                                             name='down_p2'),
                                  label='lrelu')
        point_features = Ops.xxlu(Ops.conv2d(point_features,
                                             k=(1,
                                                int(point_features.shape[-2])),
                                             out_c=128,
                                             str=1,
                                             pad='VALID',
                                             name='down_p3'),
                                  label='lrelu')
        point_features = tf.squeeze(point_features, axis=-2)

        bbox_info = tf.tile(
            tf.concat(
                [tf.reshape(bbox, [-1, bb_num, 6]), bboxscore[:, :, None]],
                axis=-1)[:, :, None, :], [1, 1, p_num, 1])
        pmask0 = tf.tile(point_features[:, None, :, :], [1, bb_num, 1, 1])
        pmask0 = tf.concat([pmask0, bbox_info], axis=-1)
        pmask0 = tf.reshape(pmask0, [-1, p_num, int(pmask0.shape[-1]), 1])

        pmask1 = Ops.xxlu(Ops.conv2d(pmask0,
                                     k=(1, int(pmask0.shape[-2])),
                                     out_c=64,
                                     str=1,
                                     pad='VALID',
                                     name='pmask1'),
                          label='lrelu')
        pmask2 = Ops.xxlu(Ops.conv2d(pmask1,
                                     k=(1, 1),
                                     out_c=32,
                                     str=1,
                                     pad='VALID',
                                     name='pmask2'),
                          label='lrelu')
        pmask3 = Ops.conv2d(pmask2,
                            k=(1, 1),
                            out_c=1,
                            str=1,
                            pad='VALID',
                            name='pmask3')
        pmask3 = tf.reshape(pmask3, [-1, bb_num, p_num])

        y_pmask_logits = pmask3
        y_pmask_pred = tf.nn.sigmoid(y_pmask_logits, name='y_pmask_pred')

        return y_pmask_pred
Exemplo n.º 7
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                ####################################
                grouped_points = Ops.xxlu(Ops.conv2d(grouped_points,
                                                     k=(1, 1),
                                                     out_c=num_out_channel,
                                                     str=1,
                                                     pad='VALID',
                                                     name='lll' + str(i)),
                                          label='lrelu')
                #grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                #padding='VALID', stride=[1,1], bn=bn, is_training=is_training,scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemplo n.º 8
0
def pointnet_sa_module(xyz,
                       points,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       knn=False,
                       use_xyz=True,
                       use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, knn, use_xyz)

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
        for i, num_out_channel in enumerate(mlp):
            ####################################
            new_points = Ops.xxlu(Ops.conv2d(new_points,
                                             k=(1, 1),
                                             out_c=num_out_channel,
                                             str=1,
                                             pad='VALID',
                                             name='l' + str(i)),
                                  label='lrelu')
            #new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],padding='VALID', stride=[1,1],
            #        bn=bn, is_training=is_training, scope='conv%d'%(i), bn_decay=bn_decay, data_format=data_format)

        if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        # Pooling in Local Regions
        if pooling == 'max':
            new_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
        elif pooling == 'avg':
            new_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keep_dims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        # [Optional] Further Processing
        if mlp2 is not None:
            if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
            for i, num_out_channel in enumerate(mlp2):
                ####################################
                new_points = Ops.xxlu(Ops.conv2d(new_points,
                                                 k=(1, 1),
                                                 out_c=num_out_channel,
                                                 str=1,
                                                 pad='VALID',
                                                 name='ll' + str(i)),
                                      label='lrelu')
                #new_points = tf_util.conv2d(new_points, num_out_channel, [1,1], padding='VALID', stride=[1,1],
                #bn=bn, is_training=is_training, scope='conv_post_%d'%(i), bn_decay=bn_decay,data_format=data_format)

            if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx