Exemple #1
0
def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util2.conv2d(input_image,
                          64, [1, 3],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv1',
                          bn_decay=bn_decay)
    net = tf_util2.conv2d(net,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv2',
                          bn_decay=bn_decay)
    net = tf_util2.conv2d(net,
                          512, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv3',
                          bn_decay=bn_decay)
    net = tf.layers.max_pooling2d(net, [num_point, 1], (1, 1),
                                  padding='VALID',
                                  name='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    # net = tf_util2.fully_connected(net, 512, bn=False, is_training=is_training,
    #                               scope='tfc1', bn_decay=bn_decay)
    net = tf_util2.fully_connected(net,
                                   256,
                                   bn=False,
                                   is_training=is_training,
                                   scope='tfc2',
                                   bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert (K == 3)
        weights = tf.get_variable('weights', [256, 3 * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [3 * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
Exemple #2
0
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training,
                       bn_decay, scope, bn=True, ibn=False, pooling='max', tnet_spec=None, knn=False, use_xyz=True):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            batch_radius: the size of each object
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    with tf.variable_scope(scope) as sc:
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec, knn, use_xyz)
        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util2.conv2d(new_points, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, ibn=ibn, is_training=is_training,
                                        scope='conv%d'%(i), bn_decay=bn_decay) 
        if pooling=='avg':
            new_points = tf.layers.average_pooling2d(new_points, [1,nsample], [1,1], padding='VALID', name='avgpool1')
        elif pooling=='weighted_avg':
            with tf.variable_scope('weighted_avg1'):
                dists = tf.norm(grouped_xyz,axis=-1,ord=2,keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keep_dims=True) # (batch_size, npoint, nsample, 1)
                new_points *= weights # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling=='max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)
        elif pooling=='min':
            new_points = tf.layers.max_pooling2d(-1 * new_points, [1, nsample], [1, 1], padding='VALID',name='minpool1')
        elif pooling=='max_and_avg':
            avg_points = tf.layers.max_pooling2d(new_points, [1,nsample], [1,1], padding='VALID', name='maxpool1')
            max_points = tf.layers.average_pooling2d(new_points, [1,nsample],[1,1], padding='VALID', name='avgpool1')
            new_points = tf.concat([avg_points, max_points], axis=-1)
            
        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            new_points = tf_util2.conv2d(new_points, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, ibn=ibn,is_training=is_training,
                                        scope='conv_post_%d'%(i), bn_decay=bn_decay) 
        new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx
Exemple #3
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True,ibn=False):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util2.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, ibn=ibn,is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
Exemple #4
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           ibn=False,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.expand_dims(new_xyz, 2)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util2.conv2d(grouped_points,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 ibn=ibn,
                                                 is_training=is_training,
                                                 scope='conv%d_%d' % (i, j),
                                                 bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None, use_rv=False, use_bn = False,use_ibn = False,
                  use_normal=False,bn_decay=None, up_ratio = 4):

    with tf.variable_scope(scope,reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        l0_xyz = point_cloud[:,:,0:3]
        if use_normal:
            l0_points = point_cloud[:,:,3:]
        else:
            l0_points = None

        # Layer 1
        l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, npoint=num_point, radius_list=[0.05,0.1,0.15],
                                                               nsample_list=[32,32,32],
                                                               mlp_list =[[32,32,64],[32,32,64],[32,32,64]], is_training=is_training,
                                                               bn_decay=bn_decay, scope='layer1',
                                                               bn=use_bn,ibn = use_ibn, use_xyz=True)

        l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, npoint=num_point/2, radius_list=[0.1,0.2,0.3],
                                                               nsample_list=[32,32,32],
                                                               mlp_list=[[64,64,128],[64,64,128],[64,64,128]], is_training=is_training,
                                                               bn_decay=bn_decay,scope='layer2',
                                                               bn=use_bn,ibn = use_ibn, use_xyz=True)

        l3_xyz, l3_points = pointnet_sa_module_msg(l2_xyz, l2_points, npoint=num_point/4, radius_list=[0.2,0.3,0.4],
                                                               nsample_list=[32,32,32],
                                                               mlp_list=[[128,128,256],[128,128,256],[128,128,256]], is_training=is_training,
                                                               bn_decay=bn_decay,scope='layer3',
                                                               bn=use_bn, ibn = use_ibn, use_xyz=True)

        l4_xyz, l4_points = pointnet_sa_module_msg(l3_xyz, l3_points, npoint=num_point/8, radius_list=[0.3,0.4,0.5],
                                                               nsample_list=[32,32,32],
                                                               mlp_list=[[256,256,512],[256,256,512],[256,256,512]], is_training=is_training,
                                                               bn_decay=bn_decay,scope='layer4',
                                                               bn=use_bn, ibn = use_ibn, use_xyz=True)
        # # combine random variables into the network
        # if use_rv:
        #     rv = tf.tile(tf.random_normal([batch_size, 1, 128], mean=0.0, stddev=1.0), [1, 16, 1])
        #     l4_points = tf.concat((l4_points, rv), axis=-1)


        # Feature Propagation layers
        l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay,
                                       scope='fa_layer1',bn=use_bn,ibn = use_ibn)

        l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay,
                                       scope='fa_layer2',bn=use_bn,ibn = use_ibn)

        l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay,
                                       scope='fa_layer3',bn=use_bn,ibn = use_ibn)

        l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_xyz, l1_points, [128,128,128], is_training, bn_decay,
                                          scope='fa_layer4', bn=use_bn, ibn=use_ibn)

        # concat_features = tf.concat((l0_xyz, l0_points), axis=2)
        # feat_num = concat_features.get_shape()[2].value

        ###FC layer
        l0_points = tf.expand_dims(l0_points,axis=2)
        net = tf_util2.conv2d(l0_points, 128*4, 1, padding='VALID', bn=use_bn, is_training=is_training,
                             scope='fc1', bn_decay=bn_decay)
        net = tf.reshape(net, [batch_size, 4*num_point, 1, -1])

        coord = tf_util2.conv2d(net, 64, 1, padding='VALID', bn=use_bn, is_training=is_training,
                             scope='fc2', bn_decay=bn_decay)

        coord = tf_util2.conv2d(coord, 3, 1, padding='VALID', bn=use_bn, is_training=is_training,
                             scope='fc3', bn_decay=bn_decay, activation_fn=None)
        coord = tf.squeeze(coord, [2])
        # coord = tf.squeeze(coord, [2])  # B*(2N)*3

        # get the normal
        normal = tf_util2.conv2d(net, 64, [1, 1],
                                 padding='VALID', stride=[1, 1],
                                 bn=False, is_training=is_training,
                                 scope='norm_fc_layer1', bn_decay=bn_decay)

        normal = tf_util2.conv2d(normal, 3, [1, 1],
                                 padding='VALID', stride=[1, 1],
                                 bn=False, is_training=is_training,
                                 scope='norm_fc_layer2', bn_decay=bn_decay,
                                 activation_fn=None, weight_decay=0.0)  # B*(2N)*1*3
        normal = tf.squeeze(normal, [2])  # B*(2N)*3


    return coord,None,None
Exemple #6
0
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None,use_bn = False,use_ibn = False,
                  use_normal=False,bn_decay=None, up_ratio = 4,num_addpoint=600,idx=None,is_crop=False):

    print "Crop flag is ",is_crop

    with tf.variable_scope(scope,reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        l0_xyz = point_cloud[:,:,0:3]
        if use_normal:
            l0_points = point_cloud[:,:,3:]
        else:
            l0_points = None
        # Layer 1
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point, radius=bradius*0.1,bn=use_bn,ibn = use_ibn,
                                                           nsample=12, mlp=[32, 32, 64], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer1')

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point/2, radius=bradius*0.2,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer2')

        l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point/4, radius=bradius*0.4,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer3')

        l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point/8, radius=bradius*0.6,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer4')

        # Feature Propagation layers
        if not is_training:
            l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1/2), :])
            l1_points = tf.gather_nd(l1_points, idx[:, :int(num_point * 1/2), :])
        elif is_crop:
            l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1/2), :])
            l1_points = tf.gather_nd(l1_points, idx[:, :int(num_point * 1/2), :])


        up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay,
                                       scope='fa_layer1',bn=use_bn,ibn = use_ibn)

        up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay,
                                       scope='fa_layer2',bn=use_bn,ibn = use_ibn)

        up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay,
                                       scope='fa_layer3',bn=use_bn,ibn = use_ibn)

        feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points,l0_xyz], axis=-1)
        feat = tf.expand_dims(feat, axis=2)

        #branch1: the new generate points
        with tf.variable_scope('up_layer', reuse=reuse):
            up_feat_list = []
            for i in range(up_ratio):
                up_feat = tf_util2.conv2d(feat, 256, [1, 1],
                                          padding='VALID', stride=[1, 1],
                                          bn=False, is_training=is_training,
                                          scope='conv1_%d' % (i), bn_decay=bn_decay)

                up_feat = tf_util2.conv2d(up_feat, 128, [1, 1],
                                          padding='VALID', stride=[1, 1],
                                          bn=use_bn, is_training=is_training,
                                          scope='conv2_%d' % (i),
                                          bn_decay=bn_decay)
                up_feat_list.append(up_feat)
        up_feat = tf.concat(up_feat_list, axis=1)
        dist_feat = tf_util2.conv2d(up_feat, 64, [1, 1],
                                padding='VALID', stride=[1, 1],
                                bn=False, is_training=is_training,
                                scope='dist_fc1', bn_decay=bn_decay, weight_decay=0.0)
        dist = tf_util2.conv2d(dist_feat, 1, [1, 1],
                               padding='VALID', stride=[1, 1],
                               bn=False, is_training=is_training,
                               scope='dist_fc2', bn_decay=bn_decay,
                               activation_fn=None, weight_decay=0.0)
        dist = tf.squeeze(dist, axis=[2, 3])

        #branch2: dist to the edge
        combined_feat = tf.concat((up_feat, dist_feat),axis=-1)
        coord_feat = tf_util2.conv2d(combined_feat, 64, [1, 1],
                               padding='VALID', stride=[1, 1],
                               bn=False, is_training=is_training,
                               scope='coord_fc1', bn_decay=bn_decay,weight_decay=0.0)
        r_coord = tf_util2.conv2d(coord_feat, 3, [1, 1],
                                padding='VALID', stride=[1, 1],
                                bn=False, is_training=is_training,
                                scope='coord_fc2', bn_decay=bn_decay,
                                activation_fn=None,weight_decay=0.0)
        coord = tf.squeeze(r_coord, [2])

        # prune the points according to probability(how to better prune it? as a guidance???)
        # poolsize = int(num_addpoint * 1.2)
        # val,idx1 = tf.nn.top_k(-dist,poolsize)
        # tmp_idx0 = tf.tile(tf.reshape(tf.range(batch_size),(batch_size,1)),(1,num_addpoint))
        # tmp_idx1 = tf.random_uniform((batch_size,num_addpoint),0,poolsize,tf.int32)
        # idx1 = tf.gather_nd(idx1,tf.stack([tmp_idx0,tmp_idx1],axis=-1))
        edge_dist, idx1 = tf.nn.top_k(-dist, num_addpoint)
        idx0 = tf.tile(tf.reshape(tf.range(batch_size),(batch_size,1)),(1,num_addpoint))
        idx = tf.stack([idx0,idx1],axis=-1)

    return dist, coord, idx, None
Exemple #7
0
def get_gen_model(point_cloud,
                  is_training,
                  scope,
                  bradius=1.0,
                  reuse=None,
                  use_rv=False,
                  use_bn=False,
                  use_ibn=False,
                  use_normal=False,
                  bn_decay=None,
                  up_ratio=4):

    with tf.variable_scope(scope, reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        pl_xyz = point_cloud[:, :, 0:3]
        input_pl = tf.expand_dims(pl_xyz, -1)

        net1 = tf_util2.conv2d(input_pl,
                               64, [1, 3],
                               padding='VALID',
                               stride=[1, 1],
                               bn=use_bn,
                               is_training=is_training,
                               scope='conv1',
                               bn_decay=bn_decay)
        net2 = tf_util2.conv2d(net1,
                               128, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=use_bn,
                               is_training=is_training,
                               scope='conv2',
                               bn_decay=bn_decay)

        net3 = tf_util2.conv2d(net2,
                               256, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=use_bn,
                               is_training=is_training,
                               scope='conv3',
                               bn_decay=bn_decay)
        net4 = tf_util2.conv2d(net3,
                               1024, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=use_bn,
                               is_training=is_training,
                               scope='conv4',
                               bn_decay=bn_decay)
        global_feat = tf_util.max_pool2d(net4, [num_point, 1],
                                         padding='VALID',
                                         scope='maxpool')

        global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
        original_xyz = tf.expand_dims(pl_xyz, 2)
        concat_feat = tf.concat(
            [original_xyz, net1, net2, net3, global_feat_expand], 3)

        feature_num = concat_feat.get_shape()[3].value
        net = tf_util2.conv2d(concat_feat,
                              feature_num * 4, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=use_bn,
                              is_training=is_training,
                              scope='conv5',
                              bn_decay=bn_decay)
        net = tf.squeeze(net, [2])
        net = tf.reshape(net, [batch_size, 4 * num_point, feature_num])
        net = tf.expand_dims(net, [2])

        net = tf_util2.conv2d(net,
                              1024, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=use_bn,
                              is_training=is_training,
                              scope='conv6',
                              bn_decay=bn_decay)
        net = tf_util2.conv2d(net,
                              512, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=use_bn,
                              is_training=is_training,
                              scope='conv7',
                              bn_decay=bn_decay)
        net = tf_util2.conv2d(net,
                              64, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=use_bn,
                              is_training=is_training,
                              scope='conv8',
                              bn_decay=bn_decay)
        net = tf_util2.conv2d(net,
                              3, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=use_bn,
                              is_training=is_training,
                              scope='conv9',
                              bn_decay=bn_decay,
                              activation_fn=None)
        net = tf.squeeze(net, [2])

    return net, None, None
Exemple #8
0
    def completion(self, inputs, is_training):
        num_point = inputs.get_shape()[1].value
        l0_xyz = inputs[:,:,0:3]
        l0_points = None

        is_training = is_training
        bradius = 1.0
        use_bn = False
        use_ibn = False
        bn_decay = 0.95
        up_ratio = 8

        self.grid_size = 2
        self.num_coarse = int(num_point * up_ratio / 4)

        with tf.variable_scope('encoder_0', reuse=tf.AUTO_REUSE):
            l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point,
                                                               radius=bradius * 0.05, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer1')

            l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point / 2,
                                                               radius=bradius * 0.1, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[64, 64, 128], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer2')

            l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point / 4,
                                                               radius=bradius * 0.2, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[128, 128, 256], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer3')

            l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point / 8,
                                                               radius=bradius * 0.3, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[256, 256, 512], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer4')

            l5_xyz, l5_points, l5_indices = pointnet_sa_module(l4_xyz, l4_points, npoint=num_point / 16,
                                                               radius=bradius * 0.4, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[512, 512, 1024], mlp2=None,
                                                               group_all=False,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer5')

            gl_xyz, gl_points, gl_indices = pointnet_sa_module(l5_xyz, l5_points, npoint=1,
                                                               radius=bradius * 0.3, bn=use_bn, ibn=use_ibn,
                                                               nsample=32, mlp=[512, 512, 1024], mlp2=None,
                                                               group_all=True,
                                                               is_training=is_training, bn_decay=bn_decay,
                                                               scope='layer6')

            gl_feature = tf.reduce_max(gl_points, axis=1)

            print('gl_feature', gl_feature)

            # Feature Propagation layers

            up_gl_points = pointnet_fp_module(l0_xyz, gl_xyz, None, gl_points, [64], is_training, bn_decay,
                                              scope='fa_layer0', bn=use_bn, ibn=use_ibn)

            up_l5_points = pointnet_fp_module(l0_xyz, l5_xyz, None, l5_points, [64], is_training, bn_decay,
                                              scope='fa_layer1', bn=use_bn, ibn=use_ibn)

            up_l4_points = pointnet_fp_module(l0_xyz, l4_xyz, None, l4_points, [64], is_training, bn_decay,
                                              scope='fa_layer2', bn=use_bn, ibn=use_ibn)

            up_l3_points = pointnet_fp_module(l0_xyz, l3_xyz, None, l3_points, [64], is_training, bn_decay,
                                              scope='fa_layer3', bn=use_bn, ibn=use_ibn)

            up_l2_points = pointnet_fp_module(l0_xyz, l2_xyz, None, l2_points, [64], is_training, bn_decay,
                                              scope='fa_layer4', bn=use_bn, ibn=use_ibn)

            ###concat feature
        with tf.variable_scope('up_layer', reuse=tf.AUTO_REUSE):
            new_points_list = []
            for i in range(up_ratio):
                if i>3:
                    transform = input_transform_net(l0_xyz, is_training, bn_decay, K=3)
                    xyz_transformed = tf.matmul(l0_xyz, transform)

                    concat_feat = tf.concat([up_gl_points, up_gl_points-up_l5_points, up_gl_points-up_l4_points, up_gl_points-up_l3_points, up_gl_points-up_l2_points, up_gl_points-l1_points, xyz_transformed],
                                            axis=-1)
                    print('concat_feat1', concat_feat)
                else:
                    concat_feat = tf.concat([up_gl_points, up_l5_points, up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz],
                                            axis=-1)
                    print('concat_feat2', concat_feat)
                #concat_feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz], axis=-1)
                concat_feat = tf.expand_dims(concat_feat, axis=2)
                concat_feat = tf_util2.conv2d(concat_feat, 256, [1, 1],
                                              padding='VALID', stride=[1, 1],
                                              bn=False, is_training=is_training,
                                              scope='fc_layer0_%d' % (i), bn_decay=bn_decay)

                new_points = tf_util2.conv2d(concat_feat, 128, [1, 1],
                                             padding='VALID', stride=[1, 1],
                                             bn=use_bn, is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay)
                new_points_list.append(new_points)
            net = tf.concat(new_points_list, axis=1)

            coord_feature = tf_util2.conv2d(net, 64, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer1', bn_decay=bn_decay)


            coord = tf_util2.conv2d(coord_feature, 3, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer2', bn_decay=bn_decay,
                                    activation_fn=None, weight_decay=0.0)  # B*(2N)*1*3

            coarse_highres = tf.squeeze(coord, [2])  # B*(2N)*3
            coord_feature = tf.squeeze(coord_feature, [2])
            fps_idx = farthest_point_sample(int(self.num_fine)/2, coarse_highres)
            coord_feature = gather_point(coord_feature, fps_idx)
            coarse_fps = gather_point(coarse_highres, fps_idx)

            coord_feature = tf.expand_dims(coord_feature, 2)

            print('coord_feature', coord, coord_feature)

            score = tf_util2.conv2d(coord_feature, 16, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer3', bn_decay=bn_decay)

            score = tf_util2.conv2d(score, 8, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer4', bn_decay=bn_decay)

            score = tf_util2.conv2d(score, 1, [1, 1],
                                    padding='VALID', stride=[1, 1],
                                    bn=False, is_training=is_training,
                                    scope='fc_layer5', bn_decay=bn_decay)

            score = tf.nn.softplus(score)
            score = tf.squeeze(score, [2,3])

            _, idx = tf.math.top_k(score, self.num_coarse)

            coarse = gather_point(coarse_fps, idx)

            coord_feature = tf.squeeze(coord_feature, [2])
            coord_feature = gather_point(coord_feature, idx)

            print('coarse', coord_feature, coarse)


        with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
            grid = tf.meshgrid(tf.linspace(-0.05, 0.05, self.grid_size), tf.linspace(-0.05, 0.05, self.grid_size))
            print('grid:', grid)
            grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [-1, 2]), 0)
            print('grid:', grid)
            grid_feat = tf.tile(grid, [coarse.shape[0], self.num_coarse, 1])
            print('grid_feat', grid_feat)

            point_feat = tf.tile(tf.expand_dims(tf.concat([coarse, coord_feature], axis=-1), 2), [1, 1, self.grid_size ** 2, 1])
            point_feat = tf.reshape(point_feat, [coarse.shape[0], self.num_fine, -1])
            print('point_feat', point_feat)

            global_feat = tf.tile(tf.expand_dims(gl_feature, 1), [1, self.num_fine, 1])

            #print('global_feat', global_feat)

            feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)
            print('feat:', feat)

            center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            center = tf.reshape(center, [-1, self.num_fine, 3])

            print('center', center)

            fine = mlp_conv(feat, [512, 512, 3]) + center
            print('fine:', fine)

        return coarse_highres, coarse, fine
def get_gen_model(point_cloud, is_training, scope, bradius = 1.0, reuse=None, use_rv=False, use_bn = False,use_ibn = False,
                  use_normal=False,bn_decay=None, up_ratio = 4):

    with tf.variable_scope(scope,reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        l0_xyz = point_cloud[:,:,0:3]
        if use_normal:
            l0_points = point_cloud[:,:,3:]
        else:
            l0_points = None

        # Layer 1
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=num_point, radius=0.05,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer1')

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=num_point/2, radius=0.1,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer2')

        l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=num_point/4, radius=0.2,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer3')

        l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=num_point/8, radius=0.3,bn=use_bn,ibn = use_ibn,
                                                           nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False,
                                                           is_training=is_training, bn_decay=bn_decay, scope='layer4')

        # # combine random variables into the network
        # if use_rv:
        #     rv = tf.tile(tf.random_normal([batch_size, 1, 128], mean=0.0, stddev=1.0), [1, 16, 1])
        #     l4_points = tf.concat((l4_points, rv), axis=-1)


        # Feature Propagation layers

        l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay,
                                       scope='fa_layer1',bn=use_bn,ibn = use_ibn)

        l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay,
                                       scope='fa_layer2',bn=use_bn,ibn = use_ibn)

        l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay,
                                       scope='fa_layer3',bn=use_bn,ibn = use_ibn)

        l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_xyz, l1_points, [128,128,128], is_training, bn_decay,
                                          scope='fa_layer4', bn=use_bn, ibn=use_ibn)

        # concat_features = tf.concat((l0_xyz, l0_points), axis=2)
        feat_num = l0_points.get_shape()[2].value

        ###FC layer
        l0_points = tf.expand_dims(l0_points, axis=2)
        net = tf_util2.conv2d(l0_points, feat_num * 4, 1, padding='VALID', bn=use_bn, is_training=is_training,
                              scope='fc1', bn_decay=bn_decay)
        net = tf.reshape(net, [batch_size, 4 * num_point, 1, -1])
        net = tf_util2.conv2d(net, 64, 1, padding='VALID', bn=use_bn, is_training=is_training,
                              scope='fc2', bn_decay=bn_decay)
        net = tf_util2.conv2d(net, 3, 1, padding='VALID', bn=use_bn, is_training=is_training,
                              scope='fc3', bn_decay=bn_decay, activation_fn=None)
        net = tf.squeeze(net, [2])

        # coord = tf.squeeze(coord, [2])  # B*(2N)*3


    return net,None,None
Exemple #10
0
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
    """ Feature Transform Net, input is BxNx1xK
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util2.conv2d(inputs,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv1',
                          bn_decay=bn_decay)
    net = tf_util2.conv2d(net,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv2',
                          bn_decay=bn_decay)
    net = tf_util2.conv2d(net,
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=False,
                          is_training=is_training,
                          scope='tconv3',
                          bn_decay=bn_decay)
    net = tf.layers.max_pooling2d(net, [num_point, 1], [1, 1],
                                  padding='VALID',
                                  name='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util2.fully_connected(net,
                                   512,
                                   bn=False,
                                   is_training=is_training,
                                   scope='tfc1',
                                   bn_decay=bn_decay)
    net = tf_util2.fully_connected(net,
                                   256,
                                   bn=False,
                                   is_training=is_training,
                                   scope='tfc2',
                                   bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [K * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
Exemple #11
0
def get_gen_model(point_cloud,
                  is_training,
                  scope,
                  bradius=1.0,
                  reuse=None,
                  use_rv=False,
                  use_bn=False,
                  use_ibn=False,
                  use_normal=False,
                  bn_decay=None,
                  up_ratio=4,
                  idx=None):

    with tf.variable_scope(scope, reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        l0_xyz = point_cloud[:, :, 0:3]
        if use_normal:
            l0_points = point_cloud[:, :, 3:]
        else:
            l0_points = None
        # Layer 1
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(
            l0_xyz,
            l0_points,
            npoint=num_point,
            radius=bradius * 0.05,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[32, 32, 64],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer1')

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=num_point / 2,
            radius=bradius * 0.1,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer2')

        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=num_point / 4,
            radius=bradius * 0.2,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer3')

        l4_xyz, l4_points, l4_indices = pointnet_sa_module(
            l3_xyz,
            l3_points,
            npoint=num_point / 8,
            radius=bradius * 0.3,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[256, 256, 512],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer4')

        # Feature Propagation layers
        up_l4_points = pointnet_fp_module(l0_xyz,
                                          l4_xyz,
                                          None,
                                          l4_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer1',
                                          bn=use_bn,
                                          ibn=use_ibn)

        up_l3_points = pointnet_fp_module(l0_xyz,
                                          l3_xyz,
                                          None,
                                          l3_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer2',
                                          bn=use_bn,
                                          ibn=use_ibn)

        up_l2_points = pointnet_fp_module(l0_xyz,
                                          l2_xyz,
                                          None,
                                          l2_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer3',
                                          bn=use_bn,
                                          ibn=use_ibn)

        ###concat feature
        with tf.variable_scope('up_layer', reuse=reuse):
            new_points_list = []
            for i in range(up_ratio):
                concat_feat = tf.concat([
                    up_l4_points, up_l3_points, up_l2_points, l1_points, l0_xyz
                ],
                                        axis=-1)
                concat_feat = tf.expand_dims(concat_feat, axis=2)
                concat_feat = tf_util2.conv2d(concat_feat,
                                              256, [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=False,
                                              is_training=is_training,
                                              scope='fc_layer0_%d' % (i),
                                              bn_decay=bn_decay)

                new_points = tf_util2.conv2d(concat_feat,
                                             128, [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=use_bn,
                                             is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay)
                new_points_list.append(new_points)
            net = tf.concat(new_points_list, axis=1)

        #get the xyz
        coord = tf_util2.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=False,
                                is_training=is_training,
                                scope='fc_layer1',
                                bn_decay=bn_decay)

        coord = tf_util2.conv2d(coord,
                                3, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=False,
                                is_training=is_training,
                                scope='fc_layer2',
                                bn_decay=bn_decay,
                                activation_fn=None,
                                weight_decay=0.0)  # B*(2N)*1*3
        coord = tf.squeeze(coord, [2])  # B*(2N)*3

    return coord, None