def get_model(point_cloud, is_training, num_class, bn_decay=None): """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """ batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = point_cloud l0_points = None end_points['l0_xyz'] = l0_xyz # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points
def get_model(point_cloud, is_training, bn_decay=None): """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """ batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3]) # Set Abstraction layers l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') # Feature Propagation layers l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points
def get_model(point_cloud, cls_label, is_training, bn_decay=None): """ Classification PointNet, input is BxNx3, output Bx40 """ batch_size = tf.shape(point_cloud)[0] num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3]) # Set abstraction layers l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1, 0.2, 0.4], [32, 64, 128], [[32, 32, 64], [64, 64, 128], [64, 96, 128]], is_training, bn_decay, scope='layer1') l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4, 0.8], [64, 128], [[128, 128, 256], [128, 196, 256]], is_training, bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256, 512, 1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') # Feature propagation layers l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256, 256], is_training, bn_decay, scope='fa_layer1') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training, bn_decay, scope='fa_layer2') cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0) cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES]) cls_label_one_hot = tf.tile(cls_label_one_hot, [1, num_point, 1]) l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points], axis=-1), l1_points, [128, 128], is_training, bn_decay, scope='fp_layer3') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None, use_rv=False, use_bn = False,use_ibn = False, use_normal=False,bn_decay=None, up_ratio = 4): with tf.variable_scope(scope,reuse=reuse) as sc: batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value l0_xyz = point_cloud[:,:,0:3] if use_normal: l0_points = point_cloud[:,:,3:] else: l0_points = None # Layer 1 l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, npoint=num_point, radius_list=[0.05,0.1,0.15], nsample_list=[32,32,32], mlp_list =[[32,32,64],[32,32,64],[32,32,64]], is_training=is_training, bn_decay=bn_decay, scope='layer1', bn=use_bn,ibn = use_ibn, use_xyz=True) l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, npoint=num_point/2, radius_list=[0.1,0.2,0.3], nsample_list=[32,32,32], mlp_list=[[64,64,128],[64,64,128],[64,64,128]], is_training=is_training, bn_decay=bn_decay,scope='layer2', bn=use_bn,ibn = use_ibn, use_xyz=True) l3_xyz, l3_points = pointnet_sa_module_msg(l2_xyz, l2_points, npoint=num_point/4, radius_list=[0.2,0.3,0.4], nsample_list=[32,32,32], mlp_list=[[128,128,256],[128,128,256],[128,128,256]], is_training=is_training, bn_decay=bn_decay,scope='layer3', bn=use_bn, ibn = use_ibn, use_xyz=True) l4_xyz, l4_points = pointnet_sa_module_msg(l3_xyz, l3_points, npoint=num_point/8, radius_list=[0.3,0.4,0.5], nsample_list=[32,32,32], mlp_list=[[256,256,512],[256,256,512],[256,256,512]], is_training=is_training, bn_decay=bn_decay,scope='layer4', bn=use_bn, ibn = use_ibn, use_xyz=True) # # combine random variables into the network # if use_rv: # rv = tf.tile(tf.random_normal([batch_size, 1, 128], mean=0.0, stddev=1.0), [1, 16, 1]) # l4_points = tf.concat((l4_points, rv), axis=-1) # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1',bn=use_bn,ibn = use_ibn) l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2',bn=use_bn,ibn = use_ibn) l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3',bn=use_bn,ibn = use_ibn) l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_xyz, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4', bn=use_bn, ibn=use_ibn) # concat_features = tf.concat((l0_xyz, l0_points), axis=2) # feat_num = concat_features.get_shape()[2].value ###FC layer l0_points = tf.expand_dims(l0_points,axis=2) net = tf_util2.conv2d(l0_points, 128*4, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.reshape(net, [batch_size, 4*num_point, 1, -1]) coord = tf_util2.conv2d(net, 64, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc2', bn_decay=bn_decay) coord = tf_util2.conv2d(coord, 3, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc3', bn_decay=bn_decay, activation_fn=None) coord = tf.squeeze(coord, [2]) # coord = tf.squeeze(coord, [2]) # B*(2N)*3 # get the normal normal = tf_util2.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='norm_fc_layer1', bn_decay=bn_decay) normal = tf_util2.conv2d(normal, 3, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='norm_fc_layer2', bn_decay=bn_decay, activation_fn=None, weight_decay=0.0) # B*(2N)*1*3 normal = tf.squeeze(normal, [2]) # B*(2N)*3 return coord,None,None
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None,use_bn = False,use_ibn = False, use_normal=False,bn_decay=None, up_ratio = 4,num_addpoint=600,idx=None,is_crop=False): print "Crop flag is ",is_crop with tf.variable_scope(scope,reuse=reuse) as sc: batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value l0_xyz = point_cloud[:,:,0:3] if use_normal: l0_points = point_cloud[:,:,3:] else: l0_points = None # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point, radius=bradius*0.1,bn=use_bn,ibn = use_ibn, nsample=12, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point/2, radius=bradius*0.2,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point/4, radius=bradius*0.4,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point/8, radius=bradius*0.6,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers if not is_training: l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1/2), :]) l1_points = tf.gather_nd(l1_points, idx[:, :int(num_point * 1/2), :]) elif is_crop: l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1/2), :]) l1_points = tf.gather_nd(l1_points, idx[:, :int(num_point * 1/2), :]) up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay, scope='fa_layer1',bn=use_bn,ibn = use_ibn) up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay, scope='fa_layer2',bn=use_bn,ibn = use_ibn) up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay, scope='fa_layer3',bn=use_bn,ibn = use_ibn) feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points,l0_xyz], axis=-1) feat = tf.expand_dims(feat, axis=2) #branch1: the new generate points with tf.variable_scope('up_layer', reuse=reuse): up_feat_list = [] for i in range(up_ratio): up_feat = tf_util2.conv2d(feat, 256, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='conv1_%d' % (i), bn_decay=bn_decay) up_feat = tf_util2.conv2d(up_feat, 128, [1, 1], padding='VALID', stride=[1, 1], bn=use_bn, is_training=is_training, scope='conv2_%d' % (i), bn_decay=bn_decay) up_feat_list.append(up_feat) up_feat = tf.concat(up_feat_list, axis=1) dist_feat = tf_util2.conv2d(up_feat, 64, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='dist_fc1', bn_decay=bn_decay, weight_decay=0.0) dist = tf_util2.conv2d(dist_feat, 1, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='dist_fc2', bn_decay=bn_decay, activation_fn=None, weight_decay=0.0) dist = tf.squeeze(dist, axis=[2, 3]) #branch2: dist to the edge combined_feat = tf.concat((up_feat, dist_feat),axis=-1) coord_feat = tf_util2.conv2d(combined_feat, 64, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='coord_fc1', bn_decay=bn_decay,weight_decay=0.0) r_coord = tf_util2.conv2d(coord_feat, 3, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='coord_fc2', bn_decay=bn_decay, activation_fn=None,weight_decay=0.0) coord = tf.squeeze(r_coord, [2]) # prune the points according to probability(how to better prune it? as a guidance???) # poolsize = int(num_addpoint * 1.2) # val,idx1 = tf.nn.top_k(-dist,poolsize) # tmp_idx0 = tf.tile(tf.reshape(tf.range(batch_size),(batch_size,1)),(1,num_addpoint)) # tmp_idx1 = tf.random_uniform((batch_size,num_addpoint),0,poolsize,tf.int32) # idx1 = tf.gather_nd(idx1,tf.stack([tmp_idx0,tmp_idx1],axis=-1)) edge_dist, idx1 = tf.nn.top_k(-dist, num_addpoint) idx0 = tf.tile(tf.reshape(tf.range(batch_size),(batch_size,1)),(1,num_addpoint)) idx = tf.stack([idx0,idx1],axis=-1) return dist, coord, idx, None
def get_model(point_cloud, is_training, num_class, hyperparams, bn_decay=None): """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """ batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} if hyperparams['use_color'] or hyperparams['use_z_feature']: feature_size = 3 * int(hyperparams['use_color']) + int( hyperparams['use_z_feature']) l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, feature_size]) else: l0_xyz = point_cloud l0_points = None end_points['l0_xyz'] = l0_xyz # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module( l0_xyz, l0_points, npoint=hyperparams['l1_npoint'], radius=hyperparams['l1_radius'], nsample=hyperparams['l1_nsample'], mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module( l1_xyz, l1_points, npoint=hyperparams['l2_npoint'], radius=hyperparams['l2_radius'], nsample=hyperparams['l2_nsample'], mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module( l2_xyz, l2_points, npoint=hyperparams['l3_npoint'], radius=hyperparams['l3_radius'], nsample=hyperparams['l3_nsample'], mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module( l3_xyz, l3_points, npoint=hyperparams['l4_npoint'], radius=hyperparams['l4_radius'], nsample=hyperparams['l4_nsample'], mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256, 256], is_training, bn_decay, scope='fa_layer1') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256, 256], is_training, bn_decay, scope='fa_layer2') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training, bn_decay, scope='fa_layer3') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 128, 128], is_training, bn_decay, scope='fa_layer4') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points
def completion(self, inputs, is_training): num_point = inputs.get_shape()[1].value l0_xyz = inputs[:,:,0:3] l0_points = None is_training = is_training bradius = 1.0 use_bn = False use_ibn = False bn_decay = 0.95 up_ratio = 8 self.grid_size = 2 self.num_coarse = int(num_point * up_ratio / 4) with tf.variable_scope('encoder_0', reuse=tf.AUTO_REUSE): l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point, radius=bradius * 0.05, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point / 2, radius=bradius * 0.1, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point / 4, radius=bradius * 0.2, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point / 8, radius=bradius * 0.3, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') l5_xyz, l5_points, l5_indices = pointnet_sa_module(l4_xyz, l4_points, npoint=num_point / 16, radius=bradius * 0.4, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[512, 512, 1024], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer5') gl_xyz, gl_points, gl_indices = pointnet_sa_module(l5_xyz, l5_points, npoint=1, radius=bradius * 0.3, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[512, 512, 1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer6') gl_feature = tf.reduce_max(gl_points, axis=1) print('gl_feature', gl_feature) # Feature Propagation layers up_gl_points = pointnet_fp_module(l0_xyz, gl_xyz, None, gl_points, [64], is_training, bn_decay, scope='fa_layer0', bn=use_bn, ibn=use_ibn) up_l5_points = pointnet_fp_module(l0_xyz, l5_xyz, None, l5_points, [64], is_training, bn_decay, scope='fa_layer1', bn=use_bn, ibn=use_ibn) up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay, scope='fa_layer2', bn=use_bn, ibn=use_ibn) up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay, scope='fa_layer3', bn=use_bn, ibn=use_ibn) up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay, scope='fa_layer4', bn=use_bn, ibn=use_ibn) ###concat feature with tf.variable_scope('up_layer', reuse=tf.AUTO_REUSE): new_points_list = [] for i in range(up_ratio): if i>3: transform = input_transform_net(l0_xyz, is_training, bn_decay, K=3) xyz_transformed = tf.matmul(l0_xyz, transform) concat_feat = tf.concat([up_gl_points, up_gl_points-up_l5_points, up_gl_points-up_l4_points, up_gl_points-up_l3_points, up_gl_points-up_l2_points, up_gl_points-l1_points, xyz_transformed], axis=-1) print('concat_feat1', concat_feat) else: concat_feat = tf.concat([up_gl_points, up_l5_points, up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz], axis=-1) print('concat_feat2', concat_feat) #concat_feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz], axis=-1) concat_feat = tf.expand_dims(concat_feat, axis=2) concat_feat = tf_util2.conv2d(concat_feat, 256, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer0_%d' % (i), bn_decay=bn_decay) new_points = tf_util2.conv2d(concat_feat, 128, [1, 1], padding='VALID', stride=[1, 1], bn=use_bn, is_training=is_training, scope='conv_%d' % (i), bn_decay=bn_decay) new_points_list.append(new_points) net = tf.concat(new_points_list, axis=1) coord_feature = tf_util2.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer1', bn_decay=bn_decay) coord = tf_util2.conv2d(coord_feature, 3, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer2', bn_decay=bn_decay, activation_fn=None, weight_decay=0.0) # B*(2N)*1*3 coarse_highres = tf.squeeze(coord, [2]) # B*(2N)*3 coord_feature = tf.squeeze(coord_feature, [2]) fps_idx = farthest_point_sample(int(self.num_fine)/2, coarse_highres) coord_feature = gather_point(coord_feature, fps_idx) coarse_fps = gather_point(coarse_highres, fps_idx) coord_feature = tf.expand_dims(coord_feature, 2) print('coord_feature', coord, coord_feature) score = tf_util2.conv2d(coord_feature, 16, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer3', bn_decay=bn_decay) score = tf_util2.conv2d(score, 8, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer4', bn_decay=bn_decay) score = tf_util2.conv2d(score, 1, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer5', bn_decay=bn_decay) score = tf.nn.softplus(score) score = tf.squeeze(score, [2,3]) _, idx = tf.math.top_k(score, self.num_coarse) coarse = gather_point(coarse_fps, idx) coord_feature = tf.squeeze(coord_feature, [2]) coord_feature = gather_point(coord_feature, idx) print('coarse', coord_feature, coarse) with tf.variable_scope('folding', reuse=tf.AUTO_REUSE): grid = tf.meshgrid(tf.linspace(-0.05, 0.05, self.grid_size), tf.linspace(-0.05, 0.05, self.grid_size)) print('grid:', grid) grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [-1, 2]), 0) print('grid:', grid) grid_feat = tf.tile(grid, [coarse.shape[0], self.num_coarse, 1]) print('grid_feat', grid_feat) point_feat = tf.tile(tf.expand_dims(tf.concat([coarse, coord_feature], axis=-1), 2), [1, 1, self.grid_size ** 2, 1]) point_feat = tf.reshape(point_feat, [coarse.shape[0], self.num_fine, -1]) print('point_feat', point_feat) global_feat = tf.tile(tf.expand_dims(gl_feature, 1), [1, self.num_fine, 1]) #print('global_feat', global_feat) feat = tf.concat([grid_feat, point_feat, global_feat], axis=2) print('feat:', feat) center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1]) center = tf.reshape(center, [-1, self.num_fine, 3]) print('center', center) fine = mlp_conv(feat, [512, 512, 3]) + center print('fine:', fine) return coarse_highres, coarse, fine
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None, use_rv=False, use_bn = False,use_ibn = False, use_normal=False,bn_decay=None, up_ratio = 4): with tf.variable_scope(scope,reuse=reuse) as sc: batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value l0_xyz = point_cloud[:,:,0:3] if use_normal: l0_points = point_cloud[:,:,3:] else: l0_points = None # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point, radius=0.05,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point/2, radius=0.1,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point/4, radius=0.2,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point/8, radius=0.3,bn=use_bn,ibn = use_ibn, nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # # combine random variables into the network # if use_rv: # rv = tf.tile(tf.random_normal([batch_size, 1, 128], mean=0.0, stddev=1.0), [1, 16, 1]) # l4_points = tf.concat((l4_points, rv), axis=-1) # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1',bn=use_bn,ibn = use_ibn) l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2',bn=use_bn,ibn = use_ibn) l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3',bn=use_bn,ibn = use_ibn) l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_xyz, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4', bn=use_bn, ibn=use_ibn) # concat_features = tf.concat((l0_xyz, l0_points), axis=2) feat_num = l0_points.get_shape()[2].value ###FC layer l0_points = tf.expand_dims(l0_points, axis=2) net = tf_util2.conv2d(l0_points, feat_num * 4, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.reshape(net, [batch_size, 4 * num_point, 1, -1]) net = tf_util2.conv2d(net, 64, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf_util2.conv2d(net, 3, 1, padding='VALID', bn=use_bn, is_training=is_training, scope='fc3', bn_decay=bn_decay, activation_fn=None) net = tf.squeeze(net, [2]) # coord = tf.squeeze(coord, [2]) # B*(2N)*3 return net,None,None
def get_model(point_cloud, cls_label, normals, axis_x, axis_y, kernel, scale, interp, fit, is_training, classes=50, bn_decay=None, d=1, knn=1, nsample=16, use_xyz_feature=True): """ Part segmentation A-CNN, input is points BxNx3 and normals BxNx3, output Bx50 """ batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = point_cloud l0_normals = normals l0_axis_x = axis_x l0_axis_y = axis_y l0_points = None #用了法向量作为输入 xyz_feature = None l1_xyz, l1_points, l1_normals, l1_axis_x, l1_axis_y, kernel_out, weight, kernel_fit, xyz_feature1 = lfnet_module( kernel, scale, interp, fit, l0_xyz, l0_points, l0_normals, l0_axis_x, l0_axis_y, xyz_feature, 1024, [0.1], nsample[0], [[16, 16, 32], [64, 96, 128]], is_training, bn_decay, mlp=[16, 16, 32], first_layer=True, scope='layer1', d=d, knn=knn, use_xyz_feature=use_xyz_feature) l2_xyz, l2_points, l2_normals, l2_axis_x, l2_axis_y, _, _, _, xyz_feature2 = lfnet_module( kernel, scale, interp, fit, l1_xyz, l1_points, l1_normals, l1_axis_x, l1_axis_y, xyz_feature1, 256, [0.2], nsample[1], [[32, 32, 64], [128, 128, 256]], is_training, bn_decay, mlp=[32, 32, 64], scope='layer2', d=d, knn=knn, use_xyz_feature=use_xyz_feature) l3_xyz, l3_points, l3_normals, l3_axis_x, l3_axis_y, _, _, _, xyz_feature3 = lfnet_module( kernel, scale, interp, fit, l2_xyz, l2_points, l2_normals, l2_axis_x, l2_axis_y, xyz_feature2, 64, [0.4], nsample[1], [[64, 64, 128], [128, 128, 256]], is_training, bn_decay, mlp=[64, 64, 64], scope='layer3', d=d, knn=knn, use_xyz_feature=use_xyz_feature) l4_xyz, l4_points, l4_normals, l4_axis_x, l4_axis_y, _, _, _, xyz_feature4 = lfnet_module( kernel, scale, interp, fit, l3_xyz, l3_points, l3_normals, l3_axis_x, l3_axis_y, xyz_feature3, 16, [0.8], nsample[1], [[128, 128, 256], [128, 128, 256]], is_training, bn_decay, mlp=[64, 64, 64], scope='layer4', d=d, knn=knn, use_xyz_feature=use_xyz_feature) l4_points = tf_util.conv1d(l4_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, xyz_feature4, [256, 128], is_training, bn_decay, scope='fa_layer1_up') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, xyz_feature3, [128, 128], is_training, bn_decay, scope='fa_layer2_up') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, xyz_feature2, [128, 128], is_training, bn_decay, scope='fa_layer3_up') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, xyz_feature1, [128, 128], is_training, bn_decay, scope='fa_layer4') l4_feature = tf.tile(tf.reduce_max(l4_points, axis=1, keep_dims=True), [1, num_point, 1]) concat = tf.concat([l0_points, l4_feature], axis=-1) # FC layers net = tf_util.conv1d(concat, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf_util.dropout(net, keep_prob=0.6, is_training=is_training, scope='dp2') net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc3', bn_decay=bn_decay) net = tf_util.conv1d(net, classes, 1, padding='VALID', activation_fn=None, scope='fc4') return net, end_points, kernel_out, weight, kernel_fit
def get_gen_model(point_cloud, is_training, scope, bradius=1.0, reuse=None, use_rv=False, use_bn=False, use_ibn=False, use_normal=False, bn_decay=None, up_ratio=4, idx=None): with tf.variable_scope(scope, reuse=reuse) as sc: batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value l0_xyz = point_cloud[:, :, 0:3] if use_normal: l0_points = point_cloud[:, :, 3:] else: l0_points = None # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module( l0_xyz, l0_points, npoint=num_point, radius=bradius * 0.05, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module( l1_xyz, l1_points, npoint=num_point / 2, radius=bradius * 0.1, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module( l2_xyz, l2_points, npoint=num_point / 4, radius=bradius * 0.2, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module( l3_xyz, l3_points, npoint=num_point / 8, radius=bradius * 0.3, bn=use_bn, ibn=use_ibn, nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay, scope='fa_layer1', bn=use_bn, ibn=use_ibn) up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay, scope='fa_layer2', bn=use_bn, ibn=use_ibn) up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay, scope='fa_layer3', bn=use_bn, ibn=use_ibn) ###concat feature with tf.variable_scope('up_layer', reuse=reuse): new_points_list = [] for i in range(up_ratio): concat_feat = tf.concat([ up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz ], axis=-1) concat_feat = tf.expand_dims(concat_feat, axis=2) concat_feat = tf_util2.conv2d(concat_feat, 256, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer0_%d' % (i), bn_decay=bn_decay) new_points = tf_util2.conv2d(concat_feat, 128, [1, 1], padding='VALID', stride=[1, 1], bn=use_bn, is_training=is_training, scope='conv_%d' % (i), bn_decay=bn_decay) new_points_list.append(new_points) net = tf.concat(new_points_list, axis=1) #get the xyz coord = tf_util2.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer1', bn_decay=bn_decay) coord = tf_util2.conv2d(coord, 3, [1, 1], padding='VALID', stride=[1, 1], bn=False, is_training=is_training, scope='fc_layer2', bn_decay=bn_decay, activation_fn=None, weight_decay=0.0) # B*(2N)*1*3 coord = tf.squeeze(coord, [2]) # B*(2N)*3 return coord, None
def get_model(point_cloud, is_training, num_class, bn_decay=None): """ Semantic segmentation PointNet++, input is BxNxF, output Bxnum_class """ end_points = {} # COG l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) # Features l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, -1]) end_points['l0_xyz'] = l0_xyz # Set Abstraction layers l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, npoint=4096, radius_list=[0.5, 1, 2], nsample_list=[8, 16, 32], mlp_list=[[32, 32, 64], [64, 64, 128], [128, 128, 256]], is_training=is_training, pooling='max_and_avg', bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, npoint=256, radius_list=[2, 4, 8], nsample_list=[32, 64, 128], mlp_list=[[128, 128, 256], [256, 256, 512], [256, 256, 512]], is_training=is_training, pooling='max_and_avg', bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points = pointnet_sa_module_msg(l2_xyz, l2_points, npoint=32, radius_list=[4, 8, 16], nsample_list=[64, 64, 128], mlp_list=[[256, 256, 512], [512, 512, 1024], [512, 1024, 1024]], is_training=is_training, pooling='max_and_avg', bn_decay=bn_decay, scope='layer3') # Feature Propagation layers l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [512, 512], is_training, bn_decay, scope='fa_layer1') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training, bn_decay, scope='fa_layer2') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 128, 128], is_training, bn_decay, scope='fa_layer3') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points