def get_transform(point_cloud, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert(K==3)
        weights = tf.get_variable('weights', [128, 3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
def get_transform_K(inputs, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx1xK gray image
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Example #4
0
def caps_net(inputs, labels):
    inputs = tf.reshape(inputs, [-1, 28, 28, 1])

    conv1 = U.conv2d(inputs, 256, "conv1", filter_size=(3,3), stride=(1,1), pad="VALID")
    conv1_act = tf.nn.relu(conv1) # [-1, 20, 20, 256]
    conv1_act = tf.expand_dims(conv1_act, axis=-2) 
    primary_caps = capsule_conv(conv1_act, 32, 8, kernel_size=(9,9), strides=(2, 2))
    primary_caps = tf.reshape(primary_caps, [-1, primary_caps.shape[1].value*primary_caps.shape[2].value*32, 8])

    digitscaps = capsule(primary_caps, 10, 16)

    lengths = tf.sqrt(U.sum(tf.square(digitscaps),axis=2) + 1e-9)

    preds = tf.argmax(lengths, axis = -1)
    probs = tf.nn.softmax(lengths)

    masked_digitscaps = mask_scene(digitscaps, lengths)
    reconstruction_pred = reconstruct_fc(masked_digitscaps)

    r_loss = reconstruction_loss(tf.reshape(inputs, [-1, 784]), reconstruction_pred)
    m_loss = margin_loss(labels, lengths)
    loss = U.mean(m_loss + r_loss, axis=-1)
    opti = tf.train.AdamOptimizer()
    train = opti.minimize(loss)

    corr_pred = tf.equal(preds, tf.argmax(labels, 1))
    acc = U.mean(tf.cast(corr_pred, tf.float32))
    
    r_loss = U.mean(r_loss, axis=-1)
    m_loss = U.mean(m_loss, axis=-1)

    return train, acc, r_loss, m_loss, reconstruction_pred
Example #5
0
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm,[1,1,3])
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                         padding='VALID', stride=[1,1],
                                         bn=bn, is_training=is_training,
                                         scope='conv_%d'%(i), bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1]
        return new_points1
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
                                 is_training, bn_decay, end_points):
    ''' 3D Box Estimation PointNet v1 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    ''' 
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg4', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1],
        padding='VALID', scope='maxpool2')
    net = tf.squeeze(net, axis=[1,2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
        is_training=is_training, bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
        3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
    return output, end_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)
    
    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    
    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Example #8
0
def get_center_regression_net(object_point_cloud, one_hot_vec,
                              is_training, bn_decay, end_points):
    ''' Regression network for center delta. a.k.a. T-Net.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in 3D mask coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        predicted_center: TF tensor in shape (B,3)
    ''' 
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3-stage1', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1],
        padding='VALID', scope='maxpool-stage1')
    net = tf.squeeze(net, axis=[1,2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
        is_training=is_training, bn_decay=bn_decay)
    predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
        scope='fc3-stage1')
    return predicted_center, end_points
Example #9
0
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Example #10
0
def get_model(point_cloud, is_training, bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 1
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud, 16, nn_idx, activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6, is_training=is_training, bn_decay=bn_decay,
                                                   layer='layer0', k=k, i=i)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat([tf.expand_dims(point_cloud, -2), neighbors_features], axis=-1)

    locals_max_transform = tf.reduce_max(tf.concat(local_features, axis=-1), axis=-2, keep_dims=True)


    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(neighbors_features, locals_max_transform, is_training, bn_decay, K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)


    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 4
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud_transformed, 16, nn_idx, activation=tf.nn.elu, in_dropout=0.6,
                                           coef_dropout=0.6, is_training=is_training, bn_decay=bn_decay,
                                           layer='layer1', k=k, i=i)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat([tf.expand_dims(point_cloud_transformed, -2), neighbors_features], axis=-1)

    net = tf_util.conv2d(neighbors_features, 64, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='gapnet1', bn_decay=bn_decay)
    net1 = net

    locals_max = tf.reduce_max(tf.concat(local_features, axis=-1), axis=-2, keep_dims=True)


    net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='gapnet2', bn_decay=bn_decay)
    net2 = net


    net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='gapnet3', bn_decay=bn_decay)
    net3 = net


    net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='gapnet4', bn_decay=bn_decay)
    net4 = net


    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4, locals_max], axis=-1), 1024, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training,
                         scope='agg', bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
def get_model(point_cloud, is_training, part_num, batch_size, \
 num_point, weight_decay, bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        K = 3
        transform = get_transform(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    out1 = tf_util.conv2d(input_image,
                          64, [1, K],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv1',
                          bn_decay=bn_decay)
    out2 = tf_util.conv2d(out1,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv2',
                          bn_decay=bn_decay)
    out3 = tf_util.conv2d(out2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv3',
                          bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        K = 128
        transform = get_transform_K(out3, is_training, bn_decay, K)

    end_points['transform'] = transform

    squeezed_out3 = tf.reshape(out3, [batch_size, num_point, 128])
    net_transformed = tf.matmul(squeezed_out3, transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    out4 = tf_util.conv2d(net_transformed,
                          512, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv4',
                          bn_decay=bn_decay)
    out5 = tf_util.conv2d(out4,
                          2048, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv5',
                          bn_decay=bn_decay)

    concat = tf.concat(axis=3, values=[out1, out2, out3, out4, out5])

    net = tf_util.conv2d(concat,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv1',
                         weight_decay=weight_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.8,
                          is_training=is_training,
                          scope='seg/dp1')
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         weight_decay=weight_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.8,
                          is_training=is_training,
                          scope='seg/dp2')
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv3',
                         weight_decay=weight_decay)
    net = tf_util.conv2d(net,
                         part_num, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         bn=False,
                         scope='seg/conv4',
                         weight_decay=weight_decay)

    net = tf.reshape(net, [batch_size, num_point, part_num])

    return net, end_points
def pointnet_sa_module(xyz,
                       points,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       knn=False,
                       use_xyz=True,
                       use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, knn, use_xyz)

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay,
                                        data_format=data_format)
        if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        # Pooling in Local Regions
        if pooling == 'max':
            new_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
        elif pooling == 'avg':
            new_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keep_dims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        # [Optional] Further Processing
        if mlp2 is not None:
            if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
            for i, num_out_channel in enumerate(mlp2):
                new_points = tf_util.conv2d(new_points,
                                            num_out_channel, [1, 1],
                                            padding='VALID',
                                            stride=[1, 1],
                                            bn=bn,
                                            is_training=is_training,
                                            scope='conv_post_%d' % (i),
                                            bn_decay=bn_decay,
                                            data_format=data_format)
            if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx
Example #13
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 4, activation_fn=None, scope='fc3')

    return net, end_points
Example #14
0
def forward(point_cloud, is_training, bn_decay=None):
    """PointNetVLAD,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 3, 
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE = 64
    OUTPUT_DIM = 256
    point_cloud = tf.reshape(
        point_cloud,
        [batch_num_queries * num_pointclouds_per_query, num_points, 3])

    point_cloud_xyz = point_cloud

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud,
                                              is_training,
                                              bn_decay,
                                              K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    print('input_image:', input_image)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net,
                                                  is_training,
                                                  bn_decay,
                                                  K=64)
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    print('net:', net)

    net = tf.reshape(net, [-1, 1024])
    net = tf.nn.l2_normalize(net, 1)

    output, weights = vlad_forward(point_cloud_xyz,
                                   net,
                                   max_samples=num_points,
                                   is_training=is_training)

    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output, 1)
    output = tf.reshape(
        output, [batch_num_queries, num_pointclouds_per_query, OUTPUT_DIM])

    return output, weights
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
    """ Feature Transform Net, input is BxNx1xK
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util.conv2d(inputs,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [K * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
def get_model_DGCNN(name,
                    point_cloud,
                    is_training,
                    is_dist=False,
                    weight_decay=0.0001,
                    bn_decay=None,
                    k=20,
                    reuse=tf.AUTO_REUSE):
    '''DGCNN-based backbone network (PDE-net)'''

    with tf.variable_scope(name, reuse=reuse):

        num_point = point_cloud.get_shape()[1].value
        input_image = tf.expand_dims(point_cloud, -1)
        input_point_cloud = tf.expand_dims(point_cloud, -2)
        adj = tf_util.pairwise_distance(point_cloud[:, :, :3])
        nn_idx = tf_util.knn(adj, k=k)
        ###
        edge_feature1 = tf_util.get_edge_feature(input_image,
                                                 nn_idx=nn_idx,
                                                 k=k)
        net = tf_util.conv2d(edge_feature1,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             weight_decay=weight_decay,
                             scope='adj_conv1',
                             bn_decay=bn_decay,
                             is_dist=is_dist)
        net_1 = tf.reduce_max(net, axis=-2, keep_dims=True)

        edge_feature2 = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)
        net = tf_util.conv2d(edge_feature2,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             weight_decay=weight_decay,
                             scope='adj_conv3',
                             bn_decay=bn_decay,
                             is_dist=is_dist)
        net_2 = tf.reduce_max(net, axis=-2, keep_dims=True)

        edge_feature3 = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)
        net = tf_util.conv2d(edge_feature3,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             weight_decay=weight_decay,
                             scope='adj_conv5',
                             bn_decay=bn_decay,
                             is_dist=is_dist)
        net_3 = tf.reduce_max(net, axis=-2, keep_dims=True)

        net = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='adj_conv7',
                             bn_decay=bn_decay,
                             is_dist=is_dist)
        out_max = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')
        expand = tf.tile(out_max, [1, num_point, 1, 1])

        ##############
        net = tf.concat(
            axis=3, values=[expand, net_1, net_2, net_3, input_point_cloud])
        ############
        net = tf_util.conv2d(net,
                             512, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dir/conv1',
                             is_dist=is_dist)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.conv2d(net,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dir/conv2',
                             is_dist=is_dist)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp2')
        net = tf_util.conv2d(net,
                             3, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             activation_fn=None,
                             is_training=is_training,
                             scope='dir/conv3',
                             is_dist=is_dist)
        net = tf.squeeze(net, axis=2)
        return net
Example #17
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    l0_points = point_cloud[:, :, 3:]
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32,
                                                       mlp=[32, 32, 64], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32,
                                                       mlp=[64, 64, 128], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32,
                                                       mlp=[128, 128, 256], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32,
                                                       mlp=[256, 256, 512], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points_sem = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256, 256], is_training, bn_decay,
                                       scope='sem_fa_layer1')
    l2_points_sem = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_sem, [256, 256], is_training, bn_decay,
                                       scope='sem_fa_layer2')
    l1_points_sem = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_sem, [256, 128], is_training, bn_decay,
                                       scope='sem_fa_layer3')
    l0_points_sem = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_sem, [128, 128, 128], is_training, bn_decay,
                                       scope='sem_fa_layer4')

    # FC layers
    net_sem = tf_util.conv1d(l0_points_sem, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_fc1',
                             bn_decay=bn_decay)
    net_sem_cache = tf_util.conv1d(net_sem, 128, 1, padding='VALID', bn=True, is_training=is_training,
                                   scope='sem_cache', bn_decay=bn_decay)
    net_ins_sem = net_sem_cache
    net_ins_sem = tf.stop_gradient(net_ins_sem)

    # ins

    net_ins = tf_util.conv2d(point_cloud, 64, [1, 9], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='ins_conv1', bn_decay=bn_decay)
    net_ins = tf_util.conv2d(net_ins, 64, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='ins_conv2', bn_decay=bn_decay)
    net_ins = tf_util.conv2d(net_ins, 64, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='ins_conv3', bn_decay=bn_decay)
    net_ins = tf_util.conv2d(net_ins, 128, [1, 1], padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training, scope='ins_conv4', bn_decay=bn_decay)
    net_ins = tf.squeeze(net_ins, axis=2)


    net_ins = tf.concat([net_ins_sem, net_ins], axis=2)
    net_ins = tf.expand_dims(net_ins, axis=2)
    net_ins = tf_util.conv2d(net_ins, 192, [1, 1], padding='VALID', stride=[1, 1], activation_fn=None,
                             is_training=is_training,
                             scope='net_ins1', bn_decay=bn_decay)
    net_ins = tf_util.conv2d(net_ins, 128, [1, 1], padding='VALID', stride=[1, 1], activation_fn=None,
                             is_training=is_training,
                             scope='net_ins2', bn_decay=bn_decay)
    net_ins = tf_util.conv2d(net_ins, 128, [1, 1], padding='VALID', stride=[1, 1], activation_fn=None,
                             is_training=is_training,
                             scope='net_ins3', bn_decay=bn_decay)
    net_ins = tf.squeeze(net_ins, axis=2)

    k = 30
    adj_matrix = tf_util.pairwise_distance_cosine(net_ins)
    nn_idx = tf_util.knn_thres(adj_matrix, k=k)
    nn_idx = tf.stop_gradient(nn_idx)
    net_sem = tf_util.get_local_feature(net_sem, nn_idx=nn_idx, k=k)  # [b, n, k, c]
    net_sem = tf.reduce_max(net_sem, axis=-2, keep_dims=False)

    net_sem = tf_util.dropout(net_sem, keep_prob=0.5, is_training=is_training, scope='sem_dp1')
    net_sem = tf_util.conv1d(net_sem, num_class, 1, padding='VALID', activation_fn=None, scope='sem_fc4')

    print("net_sem: " + str(tf.shape(net_sem)))
    print("net_ins: " + str(tf.shape(net_ins)))

    return net_sem, net_ins
Example #18
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20
    #print(batch_size, num_point)

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

#  print(edge_feature.shape)
    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(
            edge_feature, is_training, bn_decay, K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(
        point_cloud_transformed, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn2', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn3', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 128, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn4', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='agg', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    # print(net)

    # 1 sum  shards' feature (except additional padding shards)
    # print(filters)
#  net = tf.multiply(net, filters)   # remove additional padding shards
    net = tf.reduce_sum(net, 0, keep_dims=True)



    print("reduce_sum", net.shape)
    print(net)


    # 2 average shards' featre
    #  net = tf.reduce_mean(net,0, kee_dims=True)

    # 3 mutiply transpose matrix : B*1024 X 1024*B -> B*B or 1024*B X B*1024 -> 1024*1024
    #  net = tf.matmul(net,net,transpose_b=True) #shape=B*B
    #  net = tf_util.conv2d(net,1024,[1,1],padding='VALID', stride=[1,1], bn=True, is_training=is_training, scope='agg', bn_decay=bn_decay)p
    #  print(net.shape)


    #  print(net.shape)
    net = skip_dense(net, 1024, 10, 0.1, is_training)
    print("skip_dense: ", net.shape)
    == == == =
Example #19
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)

    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image,
                         64, [1, 9],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Example #20
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    k = 30

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 1
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer0',
                                                   k=k,
                                                   i=i,
                                                   is_dist=True)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud, -2), neighbors_features], axis=-1)

    locals_max_transform = tf.reduce_max(tf.concat(local_features, axis=-1),
                                         axis=-2,
                                         keep_dims=True)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(neighbors_features,
                                        locals_max_transform,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)

    point_cloud_transformed = tf.matmul(point_cloud, transform)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 4
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud_transformed,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer1',
                                                   k=k,
                                                   i=i,
                                                   is_dist=True)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud_transformed, -2), neighbors_features],
        axis=-1)

    locals_max1 = tf.reduce_max(tf.concat(local_features, axis=-1),
                                axis=-2,
                                keep_dims=True)

    net = tf_util.conv2d(neighbors_features,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet1',
                         bn_decay=bn_decay,
                         is_dist=True)
    net1 = net

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet2',
                         bn_decay=bn_decay,
                         is_dist=True)
    net2 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet3',
                         bn_decay=bn_decay,
                         is_dist=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 4
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(net,
                                                   128,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer2',
                                                   k=k,
                                                   i=i,
                                                   is_dist=True)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud_transformed, -2), neighbors_features],
        axis=-1)

    locals_max2 = tf.reduce_max(tf.concat(local_features, axis=-1),
                                axis=-2,
                                keep_dims=True)

    net = tf_util.conv2d(neighbors_features,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet4',
                         bn_decay=bn_decay,
                         is_dist=True)
    net4 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet5',
                         bn_decay=bn_decay,
                         is_dist=True)
    net5 = net

    net = tf_util.conv2d(net,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet6',
                         bn_decay=bn_decay,
                         is_dist=True)
    net6 = net

    net = tf_util.conv2d(tf.concat([net3, net6, locals_max1, locals_max2],
                                   axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet8',
                         bn_decay=bn_decay,
                         is_dist=True)
    net8 = net

    out_max = tf_util.max_pool2d(net8, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net8])

    net9 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net9 = tf_util.dropout(net9,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net9 = tf_util.conv2d(net9,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net9 = tf_util.dropout(net9,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net9 = tf_util.conv2d(net9,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net9 = tf_util.conv2d(net9,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net9 = tf.reshape(net9, [batch_size, num_point, part_num])

    return net9
def conv_module(xyz, points, patch, mlp, conv, mlp2, is_training, bn_decay, scope, bn=True, use_xyz=True,
                use_nchw=False, mlp2ac=False, pooling='max', k=1, center=False, use_pooling=False):
    data_format = 'NCHW' if use_nchw else 'NHWC'
    convac = tf.nn.relu
    if mlp2ac:
        ac = tf.nn.relu
    else:
        ac = None
    with tf.variable_scope(scope) as sc:
        # npoint = tf.shape(patch)[1]
        npoint = patch.get_shape()[1].value
        # print(patch.get_shape())
        # kernel_size = patch.get_shape()[2].value/k
        batch_size = tf.shape(patch)[0]
        if points is None:
            points = xyz
        elif use_xyz:
            points = tf.concat([xyz, points], axis=2)
        # channel = tf.shape(points)[2]
        channel = points.get_shape()[2].value

        new_points = group_point(points, patch)  # (batch_size, npoint, k*kernel_size, channel)
        kernel_size = int(1.0 * new_points.get_shape()[2].value / k)

        new_points = tf.reshape(new_points, [-1, npoint, k, kernel_size, channel])
        # print(new_points.get_shape())
        # print(new_points.get_shape())
        if pooling == 'avg':
            new_points = tf.reduce_mean(new_points, axis=[2], name='avgpool')
        else:
            new_points = tf.reduce_max(new_points, axis=[2], name='maxpool')
        # print(new_points.get_shape())


        # pooling
        pooling = tf.reduce_max(new_points, axis=[2], name='pooling')
        # print(pooling.get_shape())
        if use_pooling:
            pooling = tf.reduce_max(new_points, axis=[2], name='pooling')
            # print(pooling.get_shape())
            if mlp2 != None:
                endchannel = mlp2[-1]
            else:
                endchannel = conv[-1]
                convac = None
            if channel != endchannel:
                pooling = tf_util.conv1d(pooling, endchannel, 1, padding='VALID', activation_fn=None,
                                         scope='pooling_conv')

        if center:
            center_xyz = new_points[:, :, :, 0:3]
            center_xyz = tf.reduce_mean(center_xyz, axis=[2], name='center_avgpool')
            center_xyz = tf.tile(tf.reshape(center_xyz, [-1, npoint, 1, 3]), [1, 1, kernel_size, 1])
            new_points = tf.concat([new_points[:, :, :, 0:3] - center_xyz, new_points[:, :, :, 3:]], axis=-1)

        if use_nchw:
            new_points = tf.transpose(new_points, [0, 3, 1, 2])
        if mlp is not None:
            for i, num_out_channel in enumerate(mlp):
                new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1],
                                            padding='VALID', stride=[1, 1],
                                            bn=bn, is_training=is_training,
                                            scope='conv_1_%d' % (i), bn_decay=bn_decay,
                                            data_format=data_format)

        new_points = tf_util.conv2d(new_points, num_output_channels=conv[0], kernel_size=[1, kernel_size],
                                    padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='ker_conv',
                                    bn_decay=bn_decay, data_format=data_format, activation_fn=convac)
        if mlp2 is not None:
            for i, num_out_channel in enumerate(mlp2):
                new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1],
                                            padding='VALID', stride=[1, 1],
                                            bn=bn, is_training=is_training,
                                            scope='conv_2_%d' % (i), bn_decay=bn_decay,
                                            data_format=data_format, activation_fn=ac)
        if use_nchw:
            new_points = tf.transpose(new_points, [0, 2, 3, 1])

        new_points = tf.squeeze(new_points, [2])
        if use_pooling:
            return new_points + pooling
        else:
            return new_points
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 50, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC

    return net, end_points
def get_model(point_cloud, query_points, is_training, bn_decay=None):
    """ range regression PointNet, input is BxNx3(point_cloud) and Bx2(query_points), output Bx1 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #
    query_points = tf.tile(
        tf.expand_dims(query_points, [1]),
        [1, num_point, 1])  # Now, query_points is with shape BxNx2
    query_points = tf.concat(
        [query_points, tf.zeros([batch_size, num_point, 1])],
        2)  # Now, query_points is with shape BxNx3
    # point_cloud_ab = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 2]) # alpha and beta, shape:BxNx2
    # point_cloud_range = tf.slice(point_cloud, [0, 0, 2], [-1, -1, 1]) # range, shape:BxNx1
    # shift_ab = point_cloud_ab - query_points
    shifted_pl = point_cloud - query_points
    #

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(shifted_pl, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(shifted_pl, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 1, activation_fn=None, scope='fc3')

    return net, end_points
def get_model_RRFSegNet(name,
                        points,
                        is_training,
                        k=20,
                        is_dist=True,
                        weight_decay=0.0004,
                        bn_decay=None,
                        reuse=tf.AUTO_REUSE):
    ''' RRFSegNet-based Backbone Network (PDE-net) '''

    with tf.variable_scope(name, reuse=reuse):
        num_point = points.get_shape()[1].value
        Position = points[:, :, :3]
        adj = tf_util.pairwise_distance(Position)
        nn_idx = tf_util.knn(adj, k=k)
        ### layer_1
        relation_features1 = tf_util.get_relation_features(points,
                                                           nn_idx=nn_idx,
                                                           k=k)
        net_1 = relation_reasoning_layers('layer_1',
                                          relation_features1,
                                          is_training=is_training,
                                          bn_decay=bn_decay,
                                          nodes_list=[64, 64, 64],
                                          weight_decay=weight_decay,
                                          is_dist=is_dist)
        ### layer_2
        relation_features1 = tf_util.get_relation_features(net_1,
                                                           nn_idx=nn_idx,
                                                           k=k)
        net_2 = relation_reasoning_layers('layer_2',
                                          relation_features1,
                                          is_training=is_training,
                                          bn_decay=bn_decay,
                                          nodes_list=[128, 128, 128],
                                          weight_decay=weight_decay,
                                          is_dist=is_dist)

        ###generate global features
        global_net = tf_util.conv2d(tf.concat([net_1, net_2], axis=-1),
                                    1024, [1, 1],
                                    padding='VALID',
                                    stride=[1, 1],
                                    weight_decay=weight_decay,
                                    bn=True,
                                    is_training=is_training,
                                    scope='mpl_global',
                                    bn_decay=bn_decay,
                                    is_dist=is_dist)

        global_net = tf.reduce_max(global_net, axis=1, keep_dims=True)
        global_net = tf.tile(global_net, [1, num_point, 1, 1])

        ###
        concat = tf.concat(axis=3, values=[global_net, net_1, net_2])

        # CONV
        net = tf_util.conv2d(concat,
                             256, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dir/conv1',
                             weight_decay=weight_decay,
                             is_dist=is_dist,
                             bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.conv2d(net,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dir/conv2',
                             is_dist=is_dist)
        net = tf_util.conv2d(net,
                             3, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             activation_fn=None,
                             is_training=is_training,
                             scope='dir/conv3',
                             is_dist=is_dist)
        net = tf.squeeze(net, axis=2)

        return net
Example #25
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Example #26
0
def get_model_groupdata(group_data, mask, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx4, output Bx40 """
    batch_size = group_data.get_shape()[0].value  #32
    num_point = group_data.get_shape()[1].value  #1024

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net_edge_net(group_data,
                                                 mask,
                                                 is_training,
                                                 bn_decay,
                                                 K=4)

    group_data_transformed = tf.matmul(
        tf.reshape(group_data, [batch_size, -1, 4]), transform)
    group_data_transformed = tf.reshape(group_data_transformed,
                                        [batch_size, num_point, -1])  # B N K C
    #input_image = tf.expand_dims(group_data_transformed, -1)
    input_image = group_data_transformed
    with tf.variable_scope('edge_net1') as sc:
        net, kernel, _, _ = edge_net.edge_unit(input_image,
                                               mask,
                                               'max',
                                               config.neighbor_num,
                                               32,
                                               scope='conv1',
                                               bn=True,
                                               is_training=is_training,
                                               bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform
Example #27
0
def get_model(point_cloud,
              is_training,
              bn_decay=None,
              block_size=2,
              wd=0.0,
              h_size=9):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    input_image = tf.expand_dims(point_cloud, -1)

    # Point functions (MLP implemented as conv2d)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         weight_decay=wd,
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         weight_decay=wd,
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         weight_decay=wd,
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         16 * h_size**2, [1, 1],
                         weight_decay=wd,
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='max_pool')

    net = out(net, batch_size, h_size)

    for i in range(block_size):
        net = block(net, i, is_training, bn_decay, wd)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  3,
                                  activation_fn=None,
                                  scope='fc2',
                                  weight_decay=wd)

    return net, end_points
Example #28
0
def get_model_ec(group_data, mask, is_training, bn_decay=None):
    # groupdata B N K*C
    batch_size = group_data.get_shape()[0].value
    num_point = group_data.get_shape()[1].value
    ec = econ.create_ec(group_data, mask)  # B N K ec_leghth
    ec_length = ec.get_shape()[3].value
    ec = tf.reshape(ec, [batch_size, num_point, -1])  # B N 9
    with tf.variable_scope('transform_net1_ec') as sc:
        transform = input_transform_net_edge_net(ec,
                                                 mask,
                                                 is_training,
                                                 bn_decay,
                                                 K=ec_length)

    ec_transformed = tf.matmul(tf.reshape(ec, [batch_size, -1, ec_length]),
                               transform)
    ec_transformed = tf.reshape(ec_transformed, [batch_size, num_point, -1])
    input_image = ec_transformed

    with tf.variable_scope('ec_net1') as sc:
        net, kernel, _, _ = edge_net.edge_unit(input_image,
                                               mask,
                                               'max',
                                               config.neighbor_num,
                                               32,
                                               scope='conv1',
                                               bn=True,
                                               is_training=is_training,
                                               bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform
Example #29
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points,
                                                num_out_channel, [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, j),
                                                bn_decay=bn_decay)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Example #30
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 50, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC

    return net, end_points
Example #31
0
def get_model(point_cloud,
              is_training,
              num_neighbors,
              farthest_distance,
              bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx9, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = point_cloud[:, :, 0:3]
    l0_points = point_cloud[:, :, 3:9]

    #######Contextual representation

    new_xyz = l0_xyz  # (batch_size, npoint, 3)
    idx, pts_cnt = query_ball_point(farthest_distance, num_neighbors, l0_xyz,
                                    new_xyz)

    neighbor_xyz = group_point(l0_xyz, idx)
    neighbor_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                            [1, 1, num_neighbors, 1])

    neighbor_points = group_point(l0_points, idx)
    neighbor_representation = tf.concat([neighbor_xyz, neighbor_points],
                                        axis=-1)
    neighbor_representation = tf.reshape(neighbor_representation,
                                         (batch_size, num_point, -1))

    num_channel = neighbor_representation.get_shape()[2].value
    points = tf_util.conv1d(point_cloud,
                            num_channel,
                            1,
                            padding='VALID',
                            bn=True,
                            is_training=is_training,
                            scope='points_fc',
                            bn_decay=bn_decay)

    neighbor_representation_gp = gating_process(
        neighbor_representation,
        num_channel,
        padding='VALID',
        is_training=is_training,
        scope='neighbor_representation_gp',
        bn_decay=bn_decay)
    points_gp = gating_process(points,
                               num_channel,
                               padding='VALID',
                               is_training=is_training,
                               scope='points_gp',
                               bn_decay=bn_decay)

    l0_points_CR = tf.concat([
        neighbor_representation_gp * points,
        points_gp * neighbor_representation
    ],
                             axis=-1)

    ########## Positional Representation

    idx, pts_cnt = query_ball_point(0.06, 16, l0_xyz, l0_xyz)
    neighbor_xyz = group_point(l0_xyz, idx)
    # neighbor_xyz = self.gather_neighbour(xyz, neigh_idx)
    xyz_tile = tf.tile(tf.expand_dims(l0_xyz, axis=2),
                       [1, 1, tf.shape(idx)[-1], 1])
    relative_xyz = xyz_tile - neighbor_xyz
    # relative_xyz =neighbor_xyz

    relative_dis = tf.reduce_sum(tf.square(relative_xyz),
                                 axis=-1,
                                 keepdims=True)
    encoded_position = tf.concat(
        [relative_dis, relative_xyz, xyz_tile, neighbor_xyz], axis=-1)
    encoded_position = tf_util.conv2d(encoded_position,
                                      num_channel * 2, [1, 1],
                                      padding='VALID',
                                      stride=[1, 1],
                                      bn=True,
                                      is_training=is_training,
                                      scope='conv011',
                                      bn_decay=bn_decay)

    encoded_neighbours = group_point(l0_points, idx)
    positional_representation = tf.concat(
        [encoded_neighbours, encoded_position], axis=-1)
    positional_representation = tf.reduce_mean(positional_representation,
                                               axis=[2],
                                               keep_dims=True,
                                               name='avgpool')
    points = tf_util.conv2d(positional_representation,
                            num_channel * 2, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='attp',
                            bn_decay=bn_decay)
    points = tf.squeeze(points, [2])
    l0_points = points + l0_points_CR

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module_withgab(
        l0_xyz,
        l0_points,
        npoint=1024,
        radius=0.1,
        nsample=32,
        mlp=[32, 32, 64],
        mlp2=[64, 64],
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer1',
        gab=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module_withgab(
        l1_xyz,
        l1_points,
        npoint=256,
        radius=0.2,
        nsample=32,
        mlp=[64, 64, 128],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module_withgab(
        l2_xyz,
        l2_points,
        npoint=64,
        radius=0.4,
        nsample=32,
        mlp=[128, 128, 256],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module_withgab(
        l3_xyz,
        l3_points,
        npoint=16,
        radius=0.8,
        nsample=32,
        mlp=[256, 256, 512],
        mlp2=None,
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz,
                                   l4_xyz,
                                   l3_points,
                                   l4_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz,
                                   l1_xyz,
                                   l0_points,
                                   l1_points, [128, 128, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)

    ########## Channel-wise Attention
    input = net
    output_a = tf_util.conv2d(tf.expand_dims(input, 1),
                              128, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv_output_a',
                              bn_decay=bn_decay)

    output_b = tf_util.conv2d(tf.expand_dims(input, 1),
                              128, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv_output_b',
                              bn_decay=bn_decay)

    output_b = tf.transpose(output_b, [0, 1, 3, 2])

    output_a = tf.squeeze(output_a, [1])
    output_b = tf.squeeze(output_b, [1])

    energy = tf.matmul(output_b, output_a)

    D = tf.reduce_max(energy, -1)
    D = tf.expand_dims(D, -1)

    energy_new = tf.tile(D, multiples=[1, 1, energy.shape[2]]) - energy
    attention = tf.nn.softmax(energy_new, axis=-1)

    output_d = tf_util.conv2d(tf.expand_dims(input, 1),
                              128, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv_output_d',
                              bn_decay=bn_decay)
    output_d = tf.squeeze(output_d, [1])
    output_CA = tf.matmul(output_d, attention)

    gamma2 = tf_util._variable_with_weight_decay('weightsgamma2m',
                                                 shape=[1],
                                                 use_xavier=True,
                                                 stddev=1e-3,
                                                 wd=0.0)
    output_CA = output_CA * gamma2 + input
    output = output_CA

    ########## Squeeze-and-Excitation
    ex1 = tf.reduce_mean(input, axis=[1], keep_dims=True, name='avgpool1')
    print(ex1.get_shape())
    ex1 = tf_util.conv1d(ex1, 64, 1, padding='VALID', scope='ex1')
    print(ex1.get_shape())
    ex1 = tf_util.conv1d(ex1, 128, 1, padding='VALID', scope='ex2')
    print(ex1.get_shape())
    output2 = input * ex1

    # output=output+output2

    output = tf.concat([output, output2], axis=-1)

    end_points['feats'] = output
    net = tf_util.dropout(output,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         13,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    return net, end_points
Example #32
0
def pointnet_sa_module(xyz,
                       points,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       sort_k=None,
                       tnet_spec=None,
                       knn=False,
                       use_xyz=True):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    with tf.variable_scope(scope) as sc:
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, tnet_spec, knn, use_xyz)
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay)
        if pooling == 'avg':
            new_points = tf_util.avg_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='avgpool1')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg1'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keep_dims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)
        elif pooling == 'min':
            new_points = tf_util.max_pool2d(-1 * new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='minpool1')
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool1')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool1')
            new_points = tf.concat([avg_points, max_points], axis=-1)
        elif pooling == 'sort':
            new_points = tf.transpose(tf.nn.top_k(
                (tf.transpose(new_points, perm=[0, 1, 3, 2])), sort_k)[0],
                                      perm=[0, 1, 3, 2])  # (?, 2048, 1, 64)

        print new_points.get_shape()
        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv_post_%d' % (i),
                                        bn_decay=bn_decay)
        if pooling != 'sort':
            new_points = tf.squeeze(new_points,
                                    [2])  # (batch_size, npoints, mlp2[-1])
        print new_points.get_shape()
        return new_xyz, new_points, idx
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
                            is_training, bn_decay, end_points):
    ''' 3D instance segmentation PointNet v1 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    net = tf.expand_dims(point_cloud, 2)

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')

    global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net, 2, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    logits = tf.squeeze(logits, [2]) # BxNxC
    return logits, end_points
Example #34
0
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training,
                                 bn_decay, end_points):
    ''' 3D Box Estimation PointNet v1 network.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in object coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
            including box centers, heading bin class scores and residuals,
            and size cluster scores and residuals
    '''
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv-reg4',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool2')
    net = tf.squeeze(net, axis=[1, 2])
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net,
                                  512,
                                  scope='fc1',
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  scope='fc2',
                                  bn=True,
                                  is_training=is_training,
                                  bn_decay=bn_decay)

    # The first 3 numbers: box center coordinates (cx,cy,cz),
    # the next NUM_HEADING_BIN*2:  heading bin class scores and bin residuals
    # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
    output = tf_util.fully_connected(net,
                                     3 + NUM_HEADING_BIN * 2 +
                                     NUM_SIZE_CLUSTER * 4,
                                     activation_fn=None,
                                     scope='fc3')
    return output, end_points
Example #35
0
def pointnet_sa_module_spider(xyz,
                              points,
                              npoint,
                              radius,
                              nsample,
                              mlp,
                              mlp2,
                              group_all,
                              is_training,
                              bn_decay,
                              scope,
                              pooling,
                              bn=True,
                              knn=False,
                              use_xyz=True,
                              use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                npoint, radius, nsample, xyz, points, knn, use_xyz)

        #------------------------------------------------------------------------
        #print('-----------------', edge_feature)
        batch_size = grouped_xyz.get_shape()[0].value
        num_point = grouped_xyz.get_shape()[1].value
        K_knn = grouped_xyz.get_shape()[2].value
        in_channels = new_points.get_shape()[3].value
        shape = [1, 1, 1, 3]
        shape1 = [1, 1, 1, 1, 1]
        num_gau = 10

        X = grouped_xyz[:, :, :, 0]
        Y = grouped_xyz[:, :, :, 1]
        Z = grouped_xyz[:, :, :, 2]

        X = tf.expand_dims(X, -1)  #[x, 1]
        Y = tf.expand_dims(Y, -1)
        Z = tf.expand_dims(Z, -1)

        #var = grouped_xyz*grouped_xyz

        initializer = tf.contrib.layers.xavier_initializer()

        w_x = tf.tile(tf_util._variable_on_cpu('weight_x', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_y = tf.tile(tf_util._variable_on_cpu('weight_y', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_z = tf.tile(tf_util._variable_on_cpu('weight_z', shape, initializer),
                      [batch_size, num_point, K_knn, 1])
        w_xyz = tf.tile(
            tf_util._variable_on_cpu('weight_xyz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xy = tf.tile(
            tf_util._variable_on_cpu('weight_xy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yz = tf.tile(
            tf_util._variable_on_cpu('weight_yz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xz = tf.tile(
            tf_util._variable_on_cpu('weight_xz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases = tf.tile(
            tf_util._variable_on_cpu('biases', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])

        w_xx = tf.tile(
            tf_util._variable_on_cpu('weight_xx', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yy = tf.tile(
            tf_util._variable_on_cpu('weight_yy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_zz = tf.tile(
            tf_util._variable_on_cpu('weight_zz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xxy = tf.tile(
            tf_util._variable_on_cpu('weight_xxy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xyy = tf.tile(
            tf_util._variable_on_cpu('weight_xyy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_xxz = tf.tile(
            tf_util._variable_on_cpu('weight_xxz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xzz = tf.tile(
            tf_util._variable_on_cpu('weight_xzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yyz = tf.tile(
            tf_util._variable_on_cpu('weight_yyz', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yzz = tf.tile(
            tf_util._variable_on_cpu('weight_yzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        w_xxx = tf.tile(
            tf_util._variable_on_cpu('weight_xxx', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_yyy = tf.tile(
            tf_util._variable_on_cpu('weight_yyy', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        w_zzz = tf.tile(
            tf_util._variable_on_cpu('weight_zzz', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        biases1 = tf.tile(
            tf_util._variable_on_cpu('biases1', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases2 = tf.tile(
            tf_util._variable_on_cpu('biases2', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases3 = tf.tile(
            tf_util._variable_on_cpu('biases3', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases4 = tf.tile(
            tf_util._variable_on_cpu('biases4', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases5 = tf.tile(
            tf_util._variable_on_cpu('biases5', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases6 = tf.tile(
            tf_util._variable_on_cpu('biases6', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases7 = tf.tile(
            tf_util._variable_on_cpu('biases7', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases8 = tf.tile(
            tf_util._variable_on_cpu('biases8', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases9 = tf.tile(
            tf_util._variable_on_cpu('biases9', shape,
                                     tf.constant_initializer(0.0)),
            [batch_size, num_point, K_knn, 1])
        biases10 = tf.tile(
            tf_util._variable_on_cpu('biases10', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases11 = tf.tile(
            tf_util._variable_on_cpu('biases11', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases12 = tf.tile(
            tf_util._variable_on_cpu('biases12', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases13 = tf.tile(
            tf_util._variable_on_cpu('biases13', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases14 = tf.tile(
            tf_util._variable_on_cpu('biases14', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases15 = tf.tile(
            tf_util._variable_on_cpu('biases15', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases16 = tf.tile(
            tf_util._variable_on_cpu('biases16', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases17 = tf.tile(
            tf_util._variable_on_cpu('biases17', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases18 = tf.tile(
            tf_util._variable_on_cpu('biases18', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases19 = tf.tile(
            tf_util._variable_on_cpu('biases19', shape, initializer),
            [batch_size, num_point, K_knn, 1])
        biases20 = tf.tile(
            tf_util._variable_on_cpu('biases20', shape, initializer),
            [batch_size, num_point, K_knn, 1])

        g1 = w_x * X + w_y * Y + w_z * Z + w_xyz * X * Y * Z + biases
        g2 = w_xy * X * Y + w_yz * Y * Z + w_xz * X * Z
        g3 = w_xx * X * X + w_yy * Y * Y + w_zz * Z * Z
        g4 = w_xxy * X * X * Y + w_xyy * X * Y * Y + w_xxz * X * X * Z
        g5 = w_xzz * X * Z * Z + w_yyz * Y * Y * Z + w_yzz * Y * Z * Z
        g6 = w_xxx * X * X * X + w_yyy * Y * Y * Y + w_zzz * Z * Z * Z
        g_d = g1 + g2 + g3 + g4 + g5 + g6

        #paris_Lille
        #g_d = g1

        g_d1 = tf.exp(-0.5 * (g_d - biases1) * (g_d - biases1) /
                      (biases11 * biases11))
        g_d2 = tf.exp(-0.5 * (g_d - biases2) * (g_d - biases2) /
                      (biases12 * biases12))
        g_d3 = tf.exp(-0.5 * (g_d - biases3) * (g_d - biases3) /
                      (biases13 * biases13))
        g_d4 = tf.exp(-0.5 * (g_d - biases4) * (g_d - biases4) /
                      (biases14 * biases14))
        g_d5 = tf.exp(-0.5 * (g_d - biases5) * (g_d - biases5) /
                      (biases15 * biases15))
        g_d6 = tf.exp(-0.5 * (g_d - biases6) * (g_d - biases6) /
                      (biases16 * biases16))
        g_d7 = tf.exp(-0.5 * (g_d - biases7) * (g_d - biases7) /
                      (biases17 * biases17))
        g_d8 = tf.exp(-0.5 * (g_d - biases8) * (g_d - biases8) /
                      (biases18 * biases18))
        g_d9 = tf.exp(-0.5 * (g_d - biases9) * (g_d - biases9) /
                      (biases19 * biases19))
        g_d10 = tf.exp(-0.5 * (g_d - biases10) * (g_d - biases10) /
                       (biases20 * biases20))
        '''g_d1 = tf.exp((g_d-biases1))
        g_d2 = tf.exp((g_d-biases2))
        g_d3 = tf.exp((g_d-biases3))
        g_d4 = tf.exp((g_d-biases4))
        g_d5 = tf.exp((g_d-biases5))
        g_d6 = tf.exp((g_d-biases6))
        g_d7 = tf.exp((g_d-biases7))
        g_d8 = tf.exp((g_d-biases8))
        g_d9 = tf.exp((g_d-biases9))
        g_d10 = tf.exp((g_d-biases10))'''

        g_d1 = tf.expand_dims(g_d1, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d2 = tf.expand_dims(g_d2, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d3 = tf.expand_dims(g_d3, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d4 = tf.expand_dims(g_d4, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d5 = tf.expand_dims(g_d5, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d6 = tf.expand_dims(g_d6, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d7 = tf.expand_dims(g_d7, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d8 = tf.expand_dims(g_d8, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d9 = tf.expand_dims(g_d9, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d10 = tf.expand_dims(g_d10, 3)  #[batch_size, num_point, K_knn, 1, 1]
        g_d1 = tf.tile(g_d1,
                       [1, 1, 1, in_channels, 1
                        ])  #[batch_size, num_point, K_knn, in_channels, 1]
        g_d2 = tf.tile(g_d2, [1, 1, 1, in_channels, 1])
        g_d3 = tf.tile(g_d3, [1, 1, 1, in_channels, 1])
        g_d4 = tf.tile(g_d4, [1, 1, 1, in_channels, 1])
        g_d5 = tf.tile(g_d5, [1, 1, 1, in_channels, 1])
        g_d6 = tf.tile(g_d6, [1, 1, 1, in_channels, 1])
        g_d7 = tf.tile(g_d7, [1, 1, 1, in_channels, 1])
        g_d8 = tf.tile(g_d8, [1, 1, 1, in_channels, 1])
        g_d9 = tf.tile(g_d9, [1, 1, 1, in_channels, 1])
        g_d10 = tf.tile(g_d10, [1, 1, 1, in_channels, 1])
        new_points = tf.expand_dims(new_points, -1)
        new_points = new_points * g_d1 + new_points * g_d2 + new_points * g_d3 + new_points * g_d4 + new_points * g_d5 + new_points * g_d6 + new_points * g_d7 + new_points * g_d8 + new_points * g_d9 + new_points * g_d10
        new_points = tf.reshape(
            new_points, [batch_size, num_point, K_knn, in_channels * 3])

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2])
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay,
                                        data_format=data_format)
        if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1])

        # Pooling in Local Regions
        if pooling == 'max':
            new_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
        elif pooling == 'avg':
            new_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                exp_dists = tf_util.conv2d(tf.transpose(
                    exp_dists, [0, 1, 3, 2]),
                                           K_knn, [1, 1],
                                           padding='VALID',
                                           bn=True,
                                           is_training=is_training,
                                           scope='weighted',
                                           bn_decay=bn_decay)
                exp_dists = tf.transpose(exp_dists, [0, 1, 3, 2])
                weights = exp_dists / (
                    tf.reduce_sum(exp_dists, axis=2, keep_dims=True) + 1e-8
                )  # (batch_size, npoint, nsample, 1)
                new_points1 = new_points
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
                avg_points_max = tf.reduce_max(new_points1,
                                               axis=[2],
                                               keep_dims=True,
                                               name='avgpool')
                new_points = tf.concat([new_points, avg_points_max], axis=-1)
        elif pooling == 'max_and_avg':
            max_points = tf.reduce_max(new_points,
                                       axis=[2],
                                       keep_dims=True,
                                       name='maxpool')
            avg_points = tf.reduce_mean(new_points,
                                        axis=[2],
                                        keep_dims=True,
                                        name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        '''_, new_points1, _, _ = pointSIFT_group(radius, new_xyz, new_points, use_xyz=False)
        new_points1 = tf.concat([tf.tile(tf.expand_dims(new_points,2),[1,1,8,1]), new_points1-tf.tile(tf.expand_dims(new_points,2),[1,1,8,1])], axis=-1)
        
        # Point Feature Embedding
        if use_nchw: new_points1 = tf.transpose(new_points1, [0,3,1,2])
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='convl%d'%(i), bn_decay=bn_decay,
                                        data_format=data_format) 
        if use_nchw: new_points1 = tf.transpose(new_points1, [0,2,3,1])
        new_points1 = tf.reduce_max(new_points1, axis=[2], keep_dims=False, name='maxpool')'''
        #new_points = tf.concat([new_points, new_points1], axis=-1)

        return new_xyz, new_points, idx
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.compat.v1.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])

    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Example #37
0
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # Sample and Grouping
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz)

        # Point Feature Embedding
        if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='conv%d'%(i), bn_decay=bn_decay,
                                        data_format=data_format) 
        if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])

        # Pooling in Local Regions
        if pooling=='max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
        elif pooling=='avg':
            new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
        elif pooling=='weighted_avg':
            with tf.variable_scope('weighted_avg'):
                dists = tf.norm(grouped_xyz,axis=-1,ord=2,keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keep_dims=True) # (batch_size, npoint, nsample, 1)
                new_points *= weights # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling=='max_and_avg':
            max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
            avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        # [Optional] Further Processing 
        if mlp2 is not None:
            if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
            for i, num_out_channel in enumerate(mlp2):
                new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
                                            padding='VALID', stride=[1,1],
                                            bn=bn, is_training=is_training,
                                            scope='conv_post_%d'%(i), bn_decay=bn_decay,
                                            data_format=data_format) 
            if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])

        new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx
def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert (K == 3)
        weights = tf.get_variable('weights', [256, 3 * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [3 * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
Example #39
0
def get_center_regression_net(object_point_cloud,
                              one_hot_vec,
                              is_training,
                              bn_decay,
                              end_points,
                              bn=True,
                              int_layers=None,
                              fc_scale=None):
    ''' Regression network for center delta. a.k.a. T-Net.
    Input:
        object_point_cloud: TF tensor in shape (B,M,C)
            point clouds in 3D mask coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
    Output:
        predicted_center: TF tensor in shape (B,3)
    '''
    num_point = object_point_cloud.get_shape()[1].value
    net = tf.expand_dims(object_point_cloud, 2)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=bn,
                         is_training=is_training,
                         scope='conv-reg1-stage1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=bn,
                         is_training=is_training,
                         scope='conv-reg2-stage1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=bn,
                         is_training=is_training,
                         scope='conv-reg3-stage1',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool-stage1')
    net = tf.squeeze(net, axis=[1, 2])
    net = tf.concat([net, one_hot_vec], axis=1)

    ## Custom feature layer
    if int_layers is not None:
        if fc_scale is not None:
            int_layers = tf_util.fully_connected(int_layers,
                                                 fc_scale,
                                                 bn=True,
                                                 is_training=is_training,
                                                 scope='cent_int_scale',
                                                 bn_decay=bn_decay)
            int_layers = tf_util.dropout(int_layers,
                                         keep_prob=0.6,
                                         is_training=is_training,
                                         scope='seg_int_dp2')
            # int_layers = tf_util.fully_connected(int_layers, fc_scale, bn=True,
            #     is_training=is_training, scope='cent_int_scal_2e', bn_decay=bn_decay)
            # int_layers = tf_util.dropout(int_layers, keep_prob=0.5,
            #     is_training=is_training, scope='seg_int_dp2')

        net = tf.concat([net, int_layers], axis=1)

    net = tf_util.fully_connected(net,
                                  256,
                                  scope='fc1-stage1',
                                  bn=bn,
                                  is_training=is_training,
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  128,
                                  scope='fc2-stage1',
                                  bn=bn,
                                  is_training=is_training,
                                  bn_decay=bn_decay)
    predicted_center = tf_util.fully_connected(net,
                                               3,
                                               activation_fn=None,
                                               scope='fc3-stage1')
    return predicted_center, end_points
Example #40
0
def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay,
                            end_points):
    ''' 3D instance segmentation PointNet v1 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    net = tf.expand_dims(point_cloud, 2)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    vis = net
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')

    global_feat = tf.concat(
        [global_feat,
         tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)],
        axis=3)
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])

    net = tf_util.conv2d(concat_feat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9',
                         bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net,
                            2, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            activation_fn=None,
                            scope='conv10')
    logits = tf.squeeze(logits, [2])  # BxNxC
    end_points['vis'] = vis
    return logits, end_points
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
		batch_size, num_point, weight_decay, bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        K = 3
        transform = get_transform(point_cloud, is_training, bn_decay, K = 3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    out1 = tf_util.conv2d(input_image, 64, [1,K], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
    out2 = tf_util.conv2d(out1, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
    out3 = tf_util.conv2d(out2, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)


    with tf.variable_scope('transform_net2') as sc:
        K = 128
        transform = get_transform_K(out3, is_training, bn_decay, K)

    end_points['transform'] = transform

    squeezed_out3 = tf.reshape(out3, [batch_size, num_point, 128])
    net_transformed = tf.matmul(squeezed_out3, transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    out4 = tf_util.conv2d(net_transformed, 512, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
    out5 = tf_util.conv2d(out4, 2048, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
    out_max = tf_util.max_pool2d(out5, [num_point,1], padding='VALID', scope='maxpool')

    # classification network
    net = tf.reshape(out_max, [batch_size, -1])
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='cla/dp1')
    net = tf_util.fully_connected(net, cat_num, activation_fn=None, scope='cla/fc3')

    # segmentation network
    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])

    expand = tf.tile(out_max, [1, num_point, 1, 1])
    concat = tf.concat(axis=3, values=[expand, out1, out2, out3, out4, out5])

    net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay)
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp1')
    net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay)
    net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp2')
    net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                        bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay)
    net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
                        bn=False, scope='seg/conv4', weight_decay=weight_decay)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net, net2, end_points
Example #42
0
def input_transform_net(edge_feature, is_training, bn_decay=None, K=3, is_dist=False):
  """ Input (XYZ) Transform Net, input is BxNx3 gray image
    Return:
      Transformation matrix of size 3xK """
  batch_size = edge_feature.get_shape()[0].value
  num_point = edge_feature.get_shape()[1].value

  print("(transform net) input", edge_feature.shape)
  print("(transform net) input to conv2d", edge_feature.shape)
  # input_image = tf.expand_dims(point_cloud, -1)
  net = tf_util.conv2d(edge_feature, 64, [1,1],
             padding='VALID', stride=[1,1],
             bn=True, is_training=is_training,
             scope='tconv1', bn_decay=bn_decay, is_dist=is_dist)
  print("(transform net) output of conv2d", net.shape)
  net = tf_util.conv2d(net, 128, [1,1],
             padding='VALID', stride=[1,1],
             bn=True, is_training=is_training,
             scope='tconv2', bn_decay=bn_decay, is_dist=is_dist)
  print("(transform net) output of conv2d", net.shape)
  
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  print("(transform net) output of reduce_max", net.shape)
  
  net = tf_util.conv2d(net, 1024, [1,1],
             padding='VALID', stride=[1,1],
             bn=True, is_training=is_training,
             scope='tconv3', bn_decay=bn_decay, is_dist=is_dist) 
  print("(transform net) output of conv2d", net.shape)

  net = tf_util.max_pool2d(net, [num_point,1],
               padding='VALID', scope='tmaxpool')
  print("(transform net) output of max", net.shape)

  net = tf.reshape(net, [batch_size, -1])
  print("(transform net) reshape output", net.shape)

  net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                  scope='tfc1', bn_decay=bn_decay,is_dist=is_dist)
  print("(transform net) fc1 output", net.shape)

  net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                  scope='tfc2', bn_decay=bn_decay,is_dist=is_dist)
  print("(transform net) fc2 output", net.shape)

  with tf.variable_scope('transform_XYZ') as sc:
    # assert(K==3)
    with tf.device('/cpu:0'):
      weights = tf.get_variable('weights', [256, K*K],
                    initializer=tf.constant_initializer(0.0),
                    dtype=tf.float32)
      biases = tf.get_variable('biases', [K*K],
                   initializer=tf.constant_initializer(0.0),
                   dtype=tf.float32)
    biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
    print("(transform net) matmul(net, weights):")
    print("(transform net) net:", net.shape)
    print("(transform net) weights:", weights.shape)

    transform = tf.matmul(net, weights)
    print("(transform net) transform", transform.shape)
    transform = tf.nn.bias_add(transform, biases)
    print("(transform net) transform", transform.shape)

  transform = tf.reshape(transform, [batch_size, K, K])
  return transform
Example #43
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Autoencoder for point clouds.
    Input:
        point_cloud: TF tensor BxNx3
        is_training: boolean
        bn_decay: float between 0 and 1
    Output:
        net: TF tensor BxNx3, reconstructed point clouds
        end_points: dict
    """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    # assert(num_point==2048)
    point_dim = point_cloud.get_shape()[2].value
    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    # Encoder
    net = tf_util.conv2d(input_image,
                         64, [1, point_dim],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv3',
                                bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    end_points['pointwise'] = net
    # global_feat = tf_util.max_pool2d(net, [num_point,1],
    #                                  padding='VALID', scope='maxpool')
    global_feat = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(global_feat, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc00',
                                  bn_decay=bn_decay)
    embedding = tf.reshape(net, [batch_size, -1])
    end_points['embedding'] = embedding

    # FC Decoder
    net = tf_util.fully_connected(embedding,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  1024 * 3,
                                  activation_fn=None,
                                  scope='fc3')
    pc_fc = tf.reshape(net, (batch_size, -1, 3))

    # UPCONV Decoder
    net = tf.reshape(embedding, [batch_size, 1, 1, -1])
    print net.shape
    net = tf_util.conv2d_transpose(net,
                                   512,
                                   kernel_size=[2, 2],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv1',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    print net.shape
    net = tf_util.conv2d_transpose(net,
                                   256,
                                   kernel_size=[3, 3],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv2',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    print net.shape
    net = tf_util.conv2d_transpose(net,
                                   256,
                                   kernel_size=[4, 4],
                                   stride=[2, 2],
                                   padding='VALID',
                                   scope='upconv3',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    print net.shape
    net = tf_util.conv2d_transpose(net,
                                   128,
                                   kernel_size=[5, 5],
                                   stride=[3, 3],
                                   padding='VALID',
                                   scope='upconv4',
                                   bn=True,
                                   bn_decay=bn_decay,
                                   is_training=is_training)
    print net.shape
    net = tf_util.conv2d_transpose(net,
                                   3,
                                   kernel_size=[1, 1],
                                   stride=[1, 1],
                                   padding='VALID',
                                   scope='upconv5',
                                   activation_fn=None)
    end_points['xyzmap'] = net
    pc_upconv = tf.reshape(net, [batch_size, -1, 3])

    # Set union
    net = tf.concat(values=[pc_fc, pc_upconv], axis=1)

    return net, end_points
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx4, onehotvec is Bx3, output BxNx2 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    input_image = tf.expand_dims(point_cloud, -1)

    net = tf_util.conv2d(input_image, 64, [1,6],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    point_feat = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(point_feat, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print global_feat

    global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
    print 'Global Feat: ', global_feat
    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    print point_feat, global_feat_expand
    concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
    print concat_feat

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)
    net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)

    logits = tf_util.conv2d(net, 2, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    logits = tf.squeeze(logits, [2]) # BxNxC
    print logits
    
    print '-----------'
    #net = tf.concat(axis=3, values=[net, tf.expand_dims(tf.slice(point_cloud, [0,0,0], [-1,-1,3]), 2)])
    mask = tf.slice(logits,[0,0,0],[-1,-1,1]) < tf.slice(logits,[0,0,1],[-1,-1,1])
    mask = tf.to_float(mask) # BxNx1
    mask_count = tf.tile(tf.reduce_sum(mask,axis=1,keep_dims=True), [1,1,3]) # Bx1x3
    print mask
    point_cloud_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) # BxNx3

    # ---- Subtract points mean ----
    mask_xyz_mean = tf.reduce_sum(tf.tile(mask, [1,1,3])*point_cloud_xyz, axis=1, keep_dims=True) # Bx1x3
    mask_xyz_mean = mask_xyz_mean/tf.maximum(mask_count,1) # Bx1x3
    point_cloud_xyz_stage1 = point_cloud_xyz - tf.tile(mask_xyz_mean, [1,num_point,1])
    print 'Point cloud xyz stage1: ', point_cloud_xyz_stage1

    # ---- Regress 1st stage center ----
    net = tf.expand_dims(point_cloud_xyz_stage1, 2)
    print net
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2-stage1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3-stage1', bn_decay=bn_decay)
    mask_expand = tf.tile(tf.expand_dims(mask,-1), [1,1,1,256])
    masked_net = net*mask_expand
    print masked_net
    net = tf_util.max_pool2d(masked_net, [num_point,1], padding='VALID', scope='maxpool-stage1')
    net = tf.squeeze(net, axis=[1,2])
    print net
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True, is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True, is_training=is_training, bn_decay=bn_decay)
    stage1_center = tf_util.fully_connected(net, 3, activation_fn=None, scope='fc3-stage1')
    stage1_center = stage1_center + tf.squeeze(mask_xyz_mean, axis=1) # Bx3
    end_points['stage1_center'] = stage1_center

    # ---- Subtract stage1 center ----
    point_cloud_xyz_submean = point_cloud_xyz - tf.expand_dims(stage1_center, 1)
    print 'Point cloud xyz submean: ', point_cloud_xyz_submean

    net = tf.expand_dims(point_cloud_xyz_submean, 2)
    print net
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg2', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv-reg4', bn_decay=bn_decay)
    mask_expand = tf.tile(tf.expand_dims(mask,-1), [1,1,1,512])
    masked_net = net*mask_expand
    print masked_net
    net = tf_util.max_pool2d(masked_net, [num_point,1], padding='VALID', scope='maxpool2')
    net = tf.squeeze(net, axis=[1,2])
    print net
    net = tf.concat([net, one_hot_vec], axis=1)
    net = tf_util.fully_connected(net, 512, scope='fc1', bn=True, is_training=is_training, bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, scope='fc2', bn=True, is_training=is_training, bn_decay=bn_decay)

    # First 3 are cx,cy,cz, next NUM_HEADING_BIN*2 are for heading
    # next NUM_SIZE_CLUSTER*4 are for dimension
    output = tf_util.fully_connected(net, 3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
    print output

    center = tf.slice(output, [0,0], [-1,3])
    center = center + stage1_center # Bx3
    end_points['center'] = center

    heading_scores = tf.slice(output, [0,3], [-1,NUM_HEADING_BIN])
    heading_residuals_normalized = tf.slice(output, [0,3+NUM_HEADING_BIN], [-1,NUM_HEADING_BIN])
    end_points['heading_scores'] = heading_scores # BxNUM_HEADING_BIN
    end_points['heading_residuals_normalized'] = heading_residuals_normalized # BxNUM_HEADING_BIN (should be -1 to 1)
    end_points['heading_residuals'] = heading_residuals_normalized * (np.pi/NUM_HEADING_BIN) # BxNUM_HEADING_BIN
    
    size_scores = tf.slice(output, [0,3+NUM_HEADING_BIN*2], [-1,NUM_SIZE_CLUSTER]) # BxNUM_SIZE_CLUSTER
    size_residuals_normalized = tf.slice(output, [0,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER], [-1,NUM_SIZE_CLUSTER*3])
    size_residuals_normalized = tf.reshape(size_residuals_normalized, [batch_size, NUM_SIZE_CLUSTER, 3]) # BxNUM_SIZE_CLUSTERx3
    end_points['size_scores'] = size_scores
    end_points['size_residuals_normalized'] = size_residuals_normalized
    end_points['size_residuals'] = size_residuals_normalized * tf.expand_dims(tf.constant(mean_size_arr, dtype=tf.float32), 0)

    return logits, end_points