Beispiel #1
0
def transformer(net_late, xyz, pos_enc, gb_points, npoint, num, dim, is_training, bn_decay):

  feature1 = net_late
  feature1 = tf.expand_dims(feature1, 2)
  last_dim = feature1.shape[-1]
  nsample = feature1.shape[-2]

  pos_enc = tf_util.conv2d(pos_enc, dim/2, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='pos_mlp1_%d_%d'%(npoint,num), bn_decay=bn_decay)
  beta = tf_util.conv2d(pos_enc, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='beta%d_%d'%(npoint,num), bn_decay=bn_decay, activation_fn=None, is_bias = False)


  phi = tf_util.conv2d(feature1, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='phi_%d_%d'%(npoint,num), activation_fn=None, is_bias=False,bn_decay=bn_decay)

  print(xyz.shape, phi.shape, feature1.shape)
  _, phi, _, _ = sample_and_group(feature1.shape[1], 0, 16, xyz, tf.squeeze(phi))

  b_phi = tf_util.conv2d(feature1, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='b_phi_%d_%d'%(npoint,num), activation_fn=None, is_bias=False,bn_decay=bn_decay)
  _, b_phi, _, _ = sample_and_group(feature1.shape[1], 0, 16, xyz, tf.squeeze(b_phi))


  alpha = tf_util.conv2d(feature1, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='alpha_%d_%d'%(npoint,num), activation_fn=None, is_bias=False, bn_decay=bn_decay)


  last_1 = tf_util.conv2d(b_phi - phi + beta, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='last1_%d_%d'%(npoint,num), bn_decay=bn_decay)

  last_1 = tf_util.conv2d(last_1, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='last2_%d_%d'%(npoint,num), activation_fn=None, is_bias=False, bn_decay=bn_decay)



  last_2 = beta + alpha
  last_1 = tf.nn.softmax(last_1, 2)
  edge_total = tf.multiply(last_1, last_2)
  edge_total = tf.reduce_sum(edge_total, -2, keep_dims=True)
  print("edge_total",edge_total.shape)

  return edge_total
Beispiel #2
0
def trasit_down(net, xyz, dim, num_points, num,is_training, bn_decay):
  new_xyz, net, grouped_xyz, gb_points = sample_and_group(num_points, 0, 8, xyz, net)
  net = tf_util.conv2d(net, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='transit_down_%d_%d'%(num_points, num),bn_decay=bn_decay)

  net = tf.reduce_max(net, -2)
 
  return new_xyz, net, grouped_xyz
Beispiel #3
0
def transformer_block(net, xyz, npoint, num, dim, is_training, bn_decay):

  print(net.shape)
  output_dim = dim

  net2 = tf.expand_dims(net, -2)
  net2 = tf_util.conv2d(net2, dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='block1_%d_%d'%(npoint,num), activation_fn=None, is_bias=False,bn_decay=bn_decay)
  net2 = tf.squeeze(net2)
  new_xyz, grouped_net2, grouped_xyz, gb_points = sample_and_group(npoint, 0, 16, xyz, net2)

  net2 = transformer(net2, xyz, grouped_xyz, gb_points, npoint, num, dim, is_training, bn_decay)
  net2 = tf_util.conv2d(net2, output_dim, [1, 1],
                       padding='VALID', stride=[1, 1],
                       bn=True, is_training=is_training,
                       scope='block2_%d_%d'%(npoint,num), activation_fn=None, is_bias=False,bn_decay=bn_decay)

  net2 = tf.squeeze(net2)


  return tf.concat([net, net2], -1)
Beispiel #4
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz
    # npoints = [256, 64, 10]
    # npoints = [512, 128, 10]
    # npoints = [512, 128, 8]
    npoints = [512, 128, 32]
    ANCHOR = 27
    l1_xyz, l1_points, l1_nn, l1_offset = sample_and_group(npoint=npoints[0],
                                                           radius=0.2,
                                                           nsample=32,
                                                           xyz=l0_xyz,
                                                           points=None,
                                                           knn=False,
                                                           use_xyz=True)
    l2_xyz, l2_points, l2_nn, l2_offset = sample_and_group(npoint=npoints[1],
                                                           radius=0.4,
                                                           nsample=64,
                                                           xyz=l1_xyz,
                                                           points=None,
                                                           knn=False,
                                                           use_xyz=True)
    l3_xyz, l3_points, l3_nn, l3_offset = sample_and_group(npoint=npoints[2],
                                                           radius=0.8,
                                                           nsample=64,
                                                           xyz=l2_xyz,
                                                           points=None,
                                                           knn=False,
                                                           use_xyz=True)

    # # (1024, 3)   ->  (1024,  64)  fc-0
    # # (1024, 64)  ->  (1024,  64)  conv-1
    # # (1024, 64)  ->  (1024, 128)  fc-1
    # # (1024, 128) ->  (1024, 256)  conv-2
    # # (1024, 256) ->  (1024, 256)  fc-2
    # k_list = [10, 10, 10]
    # cube_list = [0.2, 0.2, 0.2]
    # channels = [64, 128, 256]
    # xyz_list = [l0_xyz, l0_xyz, l0_xyz, l0_xyz]

    # # (1024, 3)  -> (1024, 64)  fc-0
    # # (1024, 64) -> (512,  64)  conv-1
    # # (512,  64) -> (512, 256)  fc-1
    # # (512, 256) -> (10,  256)  conv-2
    # # (10, 1024) -> (10, 1024)  fc-2
    # k_list = [32, 64, 64]
    # cube_list = [0.1, 0.2, 0.4]
    # channels = [64, 256, 512]
    # xyz_list = [l0_xyz, l1_xyz, l2_xyz, l3_xyz]

    # # (1024, 3)  -> (1024, 64)  fc-0
    # # (1024, 64) -> (512,  64)  conv-1
    # # (512,  64) -> (512, 256)  fc-1
    # # (512, 256) -> (10,  256)  conv-2
    # # (10, 1024) -> (10, 1024)  fc-2
    # k_list = [32, 64, 128]
    # cube_list = [0.2, 0.4, 1.0]
    # channels = [64, 256, 512]
    # xyz_list = [l0_xyz, l1_xyz, l2_xyz, l3_xyz]

    # # (1024, 3)  -> (1024, 64)  fc-0
    # # (1024, 64) -> (512,  64)  conv-1
    # # (512,  64) -> (512, 256)  fc-1
    # # (512, 256) -> (10,  256)  conv-2
    # # (10, 1024) -> (10, 1024)  fc-2
    # k_list = [10, 10, 10]
    # cube_list = [0.1, 0.2, 0.4]
    # channels = [64, 256, 1024]
    # xyz_list = [l0_xyz, l1_xyz, l2_xyz, l3_xyz]

    # k_list = [10, 10, 10, 10, 10]
    # cube_list = [0.2, 0.2, 0.4, 0.4, 0.8]
    # channels = [64, 64, 256, 256, 1024]
    # xyz_list = [l0_xyz, l1_xyz, l1_xyz, l2_xyz, l2_xyz, l3_xyz]

    # # (1024,  3) -> (1024, 32)  fc-0
    # # (1024, 32) -> (1024, 32)  conv-1-1 K=32 l0 -> l0
    # # (1024, 32) -> (1024, 32)  fc-1-1
    # # (1024, 32) -> (512,  32)  conv-1-2 K=32 l0 -> l1
    # # (512,  32) -> (512, 128)  fc-1-2
    # # (512, 128) -> (512, 128)  conv-2-1 K=16 l1 -> l1
    # # (512, 128) -> (512, 128)  fc-2-1
    # # (512, 128) -> (128, 128)  conv-2-2 K=16 l1 -> l2
    # # (128, 128) -> (128, 512)  fc-2-2
    # # (128, 512) -> (128, 512)  conv-3-1 K=8  l2 -> l2
    # # (128, 512) -> (128, 512)  fc-3-1
    # # (128, 512) -> (8,   512)  conv-3-2 K=8  l2 -> l3
    # # (8,   512) -> (8,   512)  fc 3-2
    # k_list = [32, 32, 16, 16, 8, 8]
    # cube_list = [0.2, 0.2, 0.4, 0.4, 0.8, 0.8]
    # channels = [32, 32, 128, 128, 512, 512]
    # xyz_list = [l0_xyz, l0_xyz, l1_xyz, l1_xyz, l2_xyz, l2_xyz, l3_xyz]

    # (1024,  3) -> (1024, 32)  fc-0
    # (1024, 32) -> (1024, 32)  conv-1-1 K=32   l0 -> l0
    # (1024, 32) -> (1024, 32)  fc-1-1
    # (1024, 32) -> (512,  32)  conv-1-2 K=32   l0 -> l1
    # (512,  32) -> (512, 128)  fc-1-2
    # (512, 128) -> (512, 128)  conv-2-1 K=16   l1 -> l1
    # (512, 128) -> (512, 128)  fc-2-1
    # (512, 128) -> (128, 128)  conv-2-2 K=16   l1 -> l2
    # (128, 128) -> (128, 512)  fc-2-2
    # (128, 512) -> (128, 512)  conv-3-1 K=16   l2 -> l2
    # (128, 512) -> (128, 512)  fc-3-1
    # (128, 512) -> (1,   512)  conv-3-2 K=128  l2 -> l3
    # (1,   512) -> (1,   512)  fc 3-2
    # k_list = [32, 32, 16, 16, 16, 128]
    # cube_list = [0.2, 0.2, 0.4, 0.4, 0.8, 1.2]
    # channels = [32, 32, 128, 128, 512, 512]
    # xyz_list = [l0_xyz, l0_xyz, l1_xyz, l1_xyz, l2_xyz, l2_xyz, l3_xyz]

    k_list = [32, 32, 16, 16, 16, 16]
    cube_list = [0.2, 0.2, 0.4, 0.4, 0.8, 0.8]
    channels = [32, 32, 128, 128, 512, 512]
    xyz_list = [l0_xyz, l0_xyz, l1_xyz, l1_xyz, l2_xyz, l2_xyz, l3_xyz]

    # # (1024,  3) -> (1024, 32)  fc-0
    # # (1024, 32) -> (1024, 32)  conv-1-1 K=10 l0 -> l0
    # # (1024, 32) -> (1024, 32)  fc-1-1
    # # (1024, 32) -> (1024, 32)  conv-1-2 K=10 l0 -> l0
    # # (1024, 32) -> (1024, 32)  fc-1-2
    # # (1024, 32) -> (1024, 32)  conv-1-3 K=10 l0 -> l0
    # # (1024, 32) -> (1024, 32)  fc-1-3
    # # (1024, 32) -> (512,  32)  conv-1-4 K=10 l0 -> l1
    # # (512,  32) -> (512, 128)  fc-1-4
    # # (512, 128) -> (512, 128)  conv-2-1 K=10 l1 -> l1
    # # (512, 128) -> (512, 128)  fc-2-1
    # # (512, 128) -> (128, 128)  conv-2-2 K=10 l1 -> l2
    # # (128, 128) -> (128, 512)  fc-2-2
    # # (128, 512) -> (128, 512)  conv-3-1 K=10 l2 -> l2
    # # (128, 512) -> (128, 512)  fc-3-1
    # # (128, 512) -> (10,  512)  conv-3-2 K=10 l2 -> l3
    # # (10,  512) -> (10,  512)  fc 3-2
    # k_list = [10, 10, 10, 10, 10, 10, 10, 10]
    # cube_list = [0.2, 0.2, 0.2, 0.2, 0.4, 0.4, 0.8, 0.8]
    # channels = [32, 32, 32, 32, 128, 128, 512, 512]
    # xyz_list = [l0_xyz, l0_xyz, l0_xyz, l0_xyz, l1_xyz, l1_xyz, l2_xyz, l2_xyz, l3_xyz]

    nlayers = len(xyz_list) - 1
    if True:
        offset_list = [
            tf.py_func(get_knn, [_xyz, _xyz2, _k], [tf.float32, tf.int64])
            for _xyz, _xyz2, _k in zip(xyz_list[:-1], xyz_list[1:], k_list)
        ]
        nn_list = [_offset[1] for _offset in offset_list]
        offset_list = [_offset[0] for _offset in offset_list]
        offset00, _ = tf.py_func(get_knn,
                                 [xyz_list[0], xyz_list[0], k_list[0]],
                                 [tf.float32, tf.int64])

    else:
        offset_list = [l1_offset, l2_offset, l3_offset]
        nn_list = [l1_nn, l2_nn, l3_nn]

    nn_list = [tf.cast(nn, tf.int64) for nn in nn_list]
    alpha_list = [
        cont_filter_interp(_offset, _cube, ANCHOR)
        for (_offset, _cube) in zip(offset_list, cube_list)
    ]

    def _fc(points, w, bn=True):
        N = points.shape[0]
        in_channels = w.shape[0]
        out_channels = w.shape[1]
        points_ = tf.reshape(points, [-1, in_channels])
        points_ = tf.matmul(points_, w)
        points = tf.reshape(points_, [N, -1, out_channels])
        if bn:
            points = tf.contrib.layers.batch_norm(points,
                                                  center=True,
                                                  scale=True,
                                                  is_training=is_training,
                                                  decay=bn_decay,
                                                  updates_collections=None)
        return points

    def _conv_block(points, knn, alpha, c_in, c_out):
        conv_init = tf.initializers.uniform_unit_scaling(1.0)
        conv_w = tf.get_variable('convw', [
            ANCHOR,
            c_in,
        ],
                                 initializer=conv_init,
                                 trainable=True)
        fc_w = tf.get_variable('fcw', [c_in, c_out],
                               initializer=tf.initializers.truncated_normal(
                                   0.0, 2.0 / np.sqrt(float(c_out))),
                               trainable=True)
        points = cont_filter_conv(points, knn, alpha, conv_w)
        points = _fc(points, fc_w, bn=True)
        points = tf.nn.relu(points)
        return points

    c_in = channels[0]
    c_init = 3
    # points = tf.ones_like(l0_xyz[:, :, :1])
    # points = tf.reduce_mean(offset00, [2])  # [B, M, K, D]
    # points = tf.reshape(points, [16, 1024, 3])
    points = l0_xyz
    fc0 = tf.get_variable('w0', [c_init, c_in],
                          initializer=tf.initializers.truncated_normal(
                              0.0, 2.0 / np.sqrt(float(c_in))))
    points = _fc(points, fc0, bn=False)
    print(points)
    for layer in range(nlayers):
        with tf.variable_scope('l{}'.format(layer)):
            print(layer, len(alpha_list))
            alpha = alpha_list[layer]
            nn = nn_list[layer]
            c_out = channels[layer]
            points = _conv_block(points, nn, alpha, c_in, c_out)
            c_in = c_out

    # Fully connected layers
    l3_points = tf.reduce_max(points, [1])  # [B, C]
    # l3_points = tf.reduce_mean(points, [1])  # [B, C]
    net = tf.reshape(l3_points, [batch_size, channels[-1]])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Beispiel #5
0
def input_reweight_all_net(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
    ''' from PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz)
            #new_points: (batch_size, npoint, nsample, 3+channel)

        ############################# input channel reweight ###################
        channel_weights = channel_weight_all(new_points, mlp, is_training, bn_decay, scope+'_input_reweight') # (batch_size, npoints, nsample, 3+channel)
        new_points = tf.multiply(new_points, channel_weights)
        ########################################################################   
    
        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='conv%d'%(i), bn_decay=bn_decay,
                                        data_format=data_format) # (batch_size, npoint, nsample, mlp[-1])
        if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])

        # Pooling in Local Regions
        if pooling=='max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool') 
        elif pooling=='avg':
            new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
        elif pooling=='weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz,axis=-1,ord=2,keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keep_dims=True) # (batch_size, npoint, nsample, 1)
                new_points *= weights # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling=='max_and_avg':
            max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
            avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        # [Optional] Further Processing 
        if mlp2 is not None:
            if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
            for i, num_out_channel in enumerate(mlp2):
                new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
                                            padding='VALID', stride=[1,1],
                                            bn=bn, is_training=is_training,
                                            scope='conv_post_%d'%(i), bn_decay=bn_decay,
                                            data_format=data_format) 
            if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])

        new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx